Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7,769 changes: 5,459 additions & 2,310 deletions client/package-lock.json

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions client/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
"path": "^0.12.7",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-markdown": "^10.1.0",
"react-router-dom": "^6.23.1",
"react-scripts": "^5.0.1",
"utif": "^3.1.0",
Expand Down
182 changes: 182 additions & 0 deletions client/src/components/Chatbot.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
import React, { useEffect, useState, useRef } from 'react'
import { Button, Input, List, Typography, Space, Spin, Popconfirm } from 'antd'
import { SendOutlined, CloseOutlined, DeleteOutlined } from '@ant-design/icons'
import { queryChatBot, clearChat } from '../utils/api'
import ReactMarkdown from 'react-markdown'

const { TextArea } = Input
const { Text } = Typography
const initialMessage = [{ id: 1, text: "Hello! I'm your AI assistant. How can I help you today?", isUser: false }]

function Chatbot({ onClose }) {
const [messages, setMessages] = useState(() => {
const saved = localStorage.getItem('chatMessages')
return saved ? JSON.parse(saved) : initialMessage
})
const [inputValue, setInputValue] = useState('')
const [isSending, setIsSending] = useState(false)
const lastMessageRef = useRef(null)

const scrollToLastMessage = () => {
setTimeout(() => {
if (lastMessageRef.current) {
lastMessageRef.current.scrollIntoView({ behavior: 'smooth', block: 'start' })
}
}, 0)
}

useEffect(() => {
localStorage.setItem('chatMessages', JSON.stringify(messages))
}, [messages])

useEffect(() => {
scrollToLastMessage()
}, [messages, isSending])

const handleSendMessage = async () => {
if (!inputValue.trim() || isSending) return
const query = inputValue
setInputValue('')
const userMessage = { id: messages.length + 1, text: query, isUser: true }
setMessages(prev => [...prev, userMessage])
setIsSending(true)
try {
const responseText = await queryChatBot(query)
const botMessage = { id: userMessage.id + 1, text: responseText || 'Sorry, I could not generate a response.', isUser: false }
setMessages(prev => [...prev, botMessage])
} catch (e) {
setMessages(prev => [...prev, { id: prev.length + 1, text: 'Error contacting chatbot.', isUser: false }])
} finally {
setIsSending(false)
}
}

const handleKeyPress = (e) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault()
handleSendMessage()
}
}

const handleClearChat = async () => {
try {
await clearChat()
setMessages(initialMessage)
localStorage.setItem('chatMessages', JSON.stringify(initialMessage))
} catch (e) {
console.error('Failed to clear chat:', e)
}
}

return (
<div
style={{
height: '100vh',
display: 'flex',
flexDirection: 'column'
}}
>
<div
style={{
padding: '16px',
display: 'flex',
justifyContent: 'space-between',
alignItems: 'center',
}}
>
<Text strong>AI Assistant</Text>
<Space>
<Popconfirm
title="Clear chat history"
onConfirm={handleClearChat}
okText="Clear"
cancelText="Cancel"
>
<Button
type="text"
icon={<DeleteOutlined />}
size="small"
/>
</Popconfirm>
<Button
type="text"
icon={<CloseOutlined />}
onClick={onClose}
size="small"
/>
</Space>
</div>
<div
style={{
flex: 1,
overflow: 'auto',
padding: '16px',
}}
>
<List
dataSource={messages}
renderItem={(message, index) => {
const isLastMessage = index === messages.length - 1
return (
<List.Item
ref={isLastMessage ? lastMessageRef : null}
style={{
border: 'none',
padding: '8px 0',
justifyContent: message.isUser ? 'flex-end' : 'flex-start'
}}
>
<div
style={{
maxWidth: '80%',
padding: '8px 12px',
borderRadius: '12px',
backgroundColor: message.isUser ? '#1890ff' : '#f5f5f5',
color: message.isUser ? 'white' : 'black',

}}
>
{message.isUser ? (
<Text style={{ color: 'white' }}>
{message.text}
</Text>
) : (
<ReactMarkdown
components={{
ul: ({ children }) => <ul style={{ paddingLeft: '20px' }}>{children}</ul>,
ol: ({ children }) => <ol style={{ paddingLeft: '20px' }}>{children}</ol>
}}
>
{message.text}
</ReactMarkdown>
)}
</div>
</List.Item>
)
}}
/>
{isSending && (
<Spin size="small" />
)}
</div>
<div style={{ padding: '16px' }}>
<Space.Compact style={{ width: '100%' }}>
<TextArea
value={inputValue}
onChange={(e) => setInputValue(e.target.value)}
onKeyPress={handleKeyPress}
placeholder="Type your message..."
autoSize={{ minRows: 1, maxRows: 3 }}
/>
<Button
icon={<SendOutlined />}
onClick={handleSendMessage}
disabled={!inputValue.trim() || isSending}
/>
</Space.Compact>
</div>
</div>
)
}

export default Chatbot
22 changes: 22 additions & 0 deletions client/src/utils/api.js
Original file line number Diff line number Diff line change
Expand Up @@ -170,3 +170,25 @@ export async function stopModelInference () {
handleError(error)
}
}

export async function queryChatBot (query) {
try {
const res = await axios.post(
`${process.env.REACT_APP_API_PROTOCOL}://${process.env.REACT_APP_API_URL}/chat/query`,
{ query }
)
return res.data?.response
} catch (error) {
handleError(error)
}
}

export async function clearChat () {
try {
await axios.post(
`${process.env.REACT_APP_API_PROTOCOL}://${process.env.REACT_APP_API_URL}/chat/clear`
)
} catch (error) {
handleError(error)
}
}
23 changes: 22 additions & 1 deletion client/src/views/Views.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@ import Visualization from '../views/Visualization'
import ModelTraining from '../views/ModelTraining'
import ModelInference from '../views/ModelInference'
import Monitoring from '../views/Monitoring'
import { Layout, Menu } from 'antd'
import Chatbot from '../components/Chatbot'
import { Layout, Menu, Button } from 'antd'
import { MessageOutlined } from '@ant-design/icons'
import { getNeuroglancerViewer } from '../utils/api'

const { Content, Sider } = Layout
Expand All @@ -14,6 +16,7 @@ function Views () {
const [viewers, setViewers] = useState([])
const [isLoading, setIsLoading] = useState(false)
const [isInferring, setIsInferring] = useState(false)
const [isChatOpen, setIsChatOpen] = useState(false)
console.log(viewers)

const onClick = (e) => {
Expand Down Expand Up @@ -122,6 +125,24 @@ function Views () {
{renderMenu()}
</Content>
</Layout>
{isChatOpen ? (
<Sider
width={400}
theme='light'
>
<Chatbot onClose={() => setIsChatOpen(false)} />
</Sider>
) : (
<Button
type="primary"
shape="circle"
icon={<MessageOutlined />}
onClick={() => setIsChatOpen(true)}
style={{
margin: '8px 8px'
}}
/>
)}
</>
)}
</Layout>
Expand Down
42 changes: 42 additions & 0 deletions server_api/chatbot/chatbot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
from langchain_ollama import OllamaEmbeddings, ChatOllama
from langchain_community.vectorstores import FAISS
from langchain_classic.chains import ConversationalRetrievalChain
from langchain_classic.memory import ConversationBufferMemory
from langchain_classic.prompts import ChatPromptTemplate
from utils.utils import process_path

embeddings = OllamaEmbeddings(model='mistral:latest', base_url='http://cscigpu08.bc.edu:11434')
faiss_path = process_path('server_api/chatbot/faiss_index')
vectorstore = FAISS.load_local(faiss_path, embeddings, allow_dangerous_deserialization=True)
retriever = vectorstore.as_retriever()
system_prompt = '''
You are a helpful AI assistant for the PyTorch Connectomics client, designed to help non-technical users navigate and use the application effectively.
IMPORTANT GUIDELINES:
- You are helping end-users who have no programming knowledge
- Focus on what users can see and do in the interface, not technical implementation details
- Provide concise, step-by-step instructions for using the platform
- Explain features in terms of user actions (clicking buttons, navigating menus, etc.)
- Avoid technical jargon, API endpoints, or code-related explanations
EXAMPLES OF GOOD vs BAD RESPONSES:
BAD: "You need to set the isTraining boolean to true and call the start_model_training endpoint"
GOOD: "To start training a model, go to the 'Model Training' tab, configure your training parameters using the step-by-step wizard, then click the 'Start Training' button"
BAD: "Access the /neuroglancer endpoint with image and label paths"
GOOD: "To visualize your data, first upload your image and label files using the drag-and-drop area, then select them from the dropdown menus, enter the voxel scales, and click 'Visualize'"
BAD: "The trainingStatus state variable tracks the current training progress"
GOOD: "You can monitor your training progress by checking the status message below the training buttons, or by going to the 'Tensorboard' tab to see detailed metrics"
Remember: Help users navigate the no-code interface, not understand the underlying technical architecture.
Here is the related content that will help you answer the user's question:
{context}
'''
prompt = ChatPromptTemplate.from_messages([
('system', system_prompt),
('human', '{question}')
])
llm = ChatOllama(model='mistral:latest', base_url='http://cscigpu08.bc.edu:11434', temperature=0)
memory = ConversationBufferMemory(return_messages=True, memory_key="chat_history")
chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory,
combine_docs_chain_kwargs={"prompt": prompt}
)
Binary file added server_api/chatbot/faiss_index/index.faiss
Binary file not shown.
Binary file added server_api/chatbot/faiss_index/index.pkl
Binary file not shown.
Loading
Loading