Skip to content

Commit f4019ee

Browse files
committed
Added MCP functionality with tool calls and tool call display
1 parent ca3ab25 commit f4019ee

File tree

18 files changed

+1448
-91
lines changed

18 files changed

+1448
-91
lines changed

src/client/content/chatbot.py

Lines changed: 188 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,11 @@
22
Copyright (c) 2024, 2025, Oracle and/or its affiliates.
33
Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl.
44
5-
Session States Set:
6-
- user_client: Stores the Client
5+
This file merges the Streamlit Chatbot GUI with the MCPClient for a complete,
6+
runnable example demonstrating their integration.
77
"""
88

9-
# spell-checker:ignore streamlit, oraclevs, selectai
9+
# spell-checker:ignore streamlit, oraclevs, selectai, langgraph, prebuilt
1010
import asyncio
1111
import inspect
1212
import json
@@ -21,8 +21,9 @@
2121
import client.utils.api_call as api_call
2222

2323
from client.utils.st_footer import render_chat_footer
24-
import client.utils.client as client
2524
import common.logging_config as logging_config
25+
from client.mcp.client import MCPClient
26+
from pathlib import Path
2627

2728
logger = logging_config.logging.getLogger("client.content.chatbot")
2829

@@ -67,95 +68,220 @@ async def main() -> None:
6768
#########################################################################
6869
# Sidebar Settings
6970
#########################################################################
70-
# Get a list of available language models, if none, then stop
7171
ll_models_enabled = st_common.enabled_models_lookup("ll")
7272
if not ll_models_enabled:
7373
st.error("No language models are configured and/or enabled. Disabling Client.", icon="🛑")
7474
st.stop()
75-
# the sidebars will set this to False if not everything is configured.
7675
state.enable_client = True
7776
st_common.tools_sidebar()
7877
st_common.history_sidebar()
7978
st_common.ll_sidebar()
8079
st_common.selectai_sidebar()
8180
st_common.vector_search_sidebar()
82-
# Stop when sidebar configurations not set
8381
if not state.enable_client:
8482
st.stop()
8583

8684
#########################################################################
8785
# Chatty-Bot Centre
8886
#########################################################################
89-
# Establish the Client
90-
if "user_client" not in state:
91-
state.user_client = client.Client(
92-
server=state.server,
93-
settings=state.client_settings,
94-
timeout=1200,
95-
)
96-
user_client: client.Client = state.user_client
97-
98-
history = await user_client.get_history()
87+
88+
if "messages" not in state:
89+
state.messages = []
90+
9991
st.chat_message("ai").write("Hello, how can I help you?")
100-
vector_search_refs = []
101-
for message in history or []:
102-
if not message["content"]:
92+
93+
for message in state.messages:
94+
role = message.get("role")
95+
display_role = ""
96+
if role in ("human", "user"):
97+
display_role = "human"
98+
elif role in ("ai", "assistant"):
99+
if not message.get("content") and not message.get("tool_trace"):
100+
continue
101+
display_role = "assistant"
102+
else:
103103
continue
104-
if message["role"] == "tool" and message["name"] == "oraclevs_tool":
105-
vector_search_refs = json.loads(message["content"])
106-
if message["role"] in ("ai", "assistant"):
107-
with st.chat_message("ai"):
108-
st.markdown(message["content"])
109-
if vector_search_refs:
110-
show_vector_search_refs(vector_search_refs)
111-
vector_search_refs = []
112-
elif message["role"] in ("human", "user"):
113-
with st.chat_message("human"):
114-
content = message["content"]
104+
105+
with st.chat_message(display_role):
106+
if "tool_trace" in message and message["tool_trace"]:
107+
for tool_call in message["tool_trace"]:
108+
with st.expander(f"🛠️ **Tool Call:** `{tool_call['name']}`", expanded=False):
109+
st.text("Arguments:")
110+
st.code(json.dumps(tool_call.get('args', {}), indent=2), language="json")
111+
if "error" in tool_call:
112+
st.text("Error:")
113+
st.error(tool_call['error'])
114+
else:
115+
st.text("Result:")
116+
st.code(tool_call.get('result', ''), language="json")
117+
if message.get("content"):
118+
# Display file attachments if present
119+
if "attachments" in message and message["attachments"]:
120+
for file in message["attachments"]:
121+
# Show appropriate icon based on file type
122+
if file["type"].startswith("image/"):
123+
st.image(file["preview"], use_container_width=True)
124+
st.markdown(f"🖼️ **{file['name']}** ({file['size']//1024} KB)")
125+
elif file["type"] == "application/pdf":
126+
st.markdown(f"📄 **{file['name']}** ({file['size']//1024} KB)")
127+
elif file["type"] in ("text/plain", "text/markdown"):
128+
st.markdown(f"📝 **{file['name']}** ({file['size']//1024} KB)")
129+
else:
130+
st.markdown(f"📎 **{file['name']}** ({file['size']//1024} KB)")
131+
132+
# Display message content - handle both string and list formats
133+
content = message.get("content")
115134
if isinstance(content, list):
116-
for part in content:
117-
if part["type"] == "text":
118-
st.write(part["text"])
119-
elif part["type"] == "image_url" and part["image_url"]["url"].startswith("data:image"):
120-
st.image(part["image_url"]["url"])
135+
# Extract and display only text parts
136+
text_parts = [part["text"] for part in content if part["type"] == "text"]
137+
st.markdown("\n".join(text_parts))
121138
else:
122-
st.write(content)
139+
st.markdown(content)
123140

124141
sys_prompt = state.client_settings["prompts"]["sys"]
125142
render_chat_footer()
143+
126144
if human_request := st.chat_input(
127145
f"Ask your question here... (current prompt: {sys_prompt})",
128146
accept_file=True,
129-
file_type=["jpg", "jpeg", "png"],
147+
file_type=["jpg", "jpeg", "png", "pdf", "txt", "docx"],
148+
key=f"chat_input_{len(state.messages)}",
130149
):
131-
st.chat_message("human").write(human_request.text)
132-
file_b64 = None
133-
if human_request["files"]:
134-
file = human_request["files"][0]
135-
file_bytes = file.read()
136-
file_b64 = base64.b64encode(file_bytes).decode("utf-8")
150+
# Process message with potential file attachments
151+
message = {"role": "user", "content": human_request.text}
152+
153+
# Handle file attachments
154+
if hasattr(human_request, "files") and human_request.files:
155+
# Store file information separately from content
156+
message["attachments"] = []
157+
for file in human_request.files:
158+
file_bytes = file.read()
159+
file_b64 = base64.b64encode(file_bytes).decode("utf-8")
160+
message["attachments"].append({
161+
"name": file.name,
162+
"type": file.type,
163+
"size": len(file_bytes),
164+
"data": file_b64,
165+
"preview": f"data:{file.type};base64,{file_b64}" if file.type.startswith("image/") else None
166+
})
167+
168+
state.messages.append(message)
169+
st.rerun()
170+
if state.messages and state.messages[-1]["role"] == "user":
137171
try:
138-
message_placeholder = st.chat_message("ai").empty()
139-
full_answer = ""
140-
async for chunk in user_client.stream(message=human_request.text, image_b64=file_b64):
141-
full_answer += chunk
142-
message_placeholder.markdown(full_answer)
143-
# Stream until we hit the end then refresh to replace with history
144-
st.rerun()
145-
except Exception:
146-
logger.error("Exception:", exc_info=1)
147-
st.chat_message("ai").write(
148-
"""
149-
I'm sorry, something's gone wrong. Please try again.
150-
If the problem persists, please raise an issue.
151-
"""
152-
)
153-
if st.button("Retry", key="reload_chatbot"):
154-
st_common.clear_state_key("user_client")
172+
with st.chat_message("ai"):
173+
with st.spinner("Thinking..."):
174+
client_settings_for_request = state.client_settings.copy()
175+
model_id = client_settings_for_request.get('ll_model', {}).get('model')
176+
if model_id:
177+
all_model_configs = st_common.enabled_models_lookup("ll")
178+
model_config = all_model_configs.get(model_id, {})
179+
if 'api_key' in model_config:
180+
if 'll_model' not in client_settings_for_request:
181+
client_settings_for_request['ll_model'] = {}
182+
client_settings_for_request['ll_model']['api_key'] = model_config['api_key']
183+
184+
# Prepare message history for backend
185+
message_history = []
186+
for msg in state.messages:
187+
# Create a copy of the message
188+
processed_msg = msg.copy()
189+
190+
# If there are attachments, include them in the content
191+
if "attachments" in msg and msg["attachments"]:
192+
# Start with the text content
193+
text_content = msg["content"]
194+
195+
# Handle list content format (from OpenAI API)
196+
if isinstance(text_content, list):
197+
text_parts = [part["text"] for part in text_content if part["type"] == "text"]
198+
text_content = "\n".join(text_parts)
199+
200+
# Create a list to hold structured content parts
201+
content_list = [{"type": "text", "text": text_content}]
202+
203+
non_image_references = []
204+
for attachment in msg["attachments"]:
205+
if attachment["type"].startswith("image/"):
206+
# Only add image URLs for user messages
207+
if msg["role"] in ("human", "user"):
208+
# Normalize image MIME types for compatibility
209+
mime_type = attachment["type"]
210+
if mime_type == "image/jpg":
211+
mime_type = "image/jpeg"
212+
213+
content_list.append({
214+
"type": "image_url",
215+
"image_url": {
216+
"url": f"data:{mime_type};base64,{attachment['data']}",
217+
"detail": "low"
218+
}
219+
})
220+
else:
221+
# Handle non-image files as text references
222+
non_image_references.append(f"\n[File: {attachment['name']} ({attachment['size']//1024} KB)]")
223+
224+
# If there were non-image files, append their references to the main text part
225+
if non_image_references:
226+
content_list[0]['text'] += "".join(non_image_references)
227+
228+
processed_msg["content"] = content_list
229+
# Convert list content to string format
230+
elif isinstance(msg.get("content"), list):
231+
text_parts = [part["text"] for part in msg["content"] if part["type"] == "text"]
232+
processed_msg["content"] = str("\n".join(text_parts))
233+
# Otherwise, ensure content is a string
234+
else:
235+
processed_msg["content"] = str(msg.get("content", ""))
236+
237+
message_history.append(processed_msg)
238+
239+
async with MCPClient(client_settings=client_settings_for_request) as mcp_client:
240+
final_text, tool_trace, new_history = await mcp_client.invoke(
241+
message_history=message_history
242+
)
243+
244+
# Update the history for display.
245+
# Keep the original message structure with attachments
246+
for i in range(len(new_history) - 1, -1, -1):
247+
if new_history[i].get("role") == "assistant":
248+
# Preserve any attachments from the user message
249+
user_message = state.messages[-1]
250+
if "attachments" in user_message:
251+
new_history[-1]["attachments"] = user_message["attachments"]
252+
253+
new_history[i]["content"] = final_text
254+
new_history[i]["tool_trace"] = tool_trace
255+
break
256+
257+
state.messages = new_history
258+
st.rerun()
259+
260+
except Exception as e:
261+
logger.error("Exception during invoke call:", exc_info=True)
262+
# Extract just the error message
263+
error_msg = str(e)
264+
265+
# Check if it's a file-related error
266+
if "file" in error_msg.lower() or "image" in error_msg.lower() or "content" in error_msg.lower():
267+
st.error(f"Error: {error_msg}")
268+
269+
# Add a button to remove files and retry
270+
if st.button("Remove files and retry", key="remove_files_retry"):
271+
# Remove attachments from the latest message
272+
if state.messages and "attachments" in state.messages[-1]:
273+
del state.messages[-1]["attachments"]
274+
st.rerun()
275+
else:
276+
st.error(f"Error: {error_msg}")
277+
278+
if st.button("Retry", key="reload_chatbot_error"):
279+
if state.messages and state.messages[-1]["role"] == "user":
280+
state.messages.pop()
155281
st.rerun()
156282

157283

158-
if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename:
284+
if __name__ == "__main__" or ("page" in inspect.stack()[1].filename if inspect.stack() else False):
159285
try:
160286
asyncio.run(main())
161287
except ValueError as ex:
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import inspect
2+
3+
from client.mcp.frontend import display_commands_tab, display_ide_tab, get_fastapi_base_url, get_server_capabilities
4+
5+
import streamlit as st
6+
7+
def main():
8+
fastapi_base_url = get_fastapi_base_url()
9+
tools, resources, prompts = get_server_capabilities(fastapi_base_url)
10+
if "chat_history" not in st.session_state:
11+
st.session_state.chat_history = []
12+
ide, commands = st.tabs(["🛠️ IDE", "📚 Available Commands"])
13+
14+
with ide:
15+
# Display the IDE tab using the original AI Optimizer logic.
16+
display_ide_tab()
17+
with commands:
18+
# Display the commands tab using the original AI Optimizer logic.
19+
display_commands_tab(tools, resources, prompts)
20+
21+
22+
23+
if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename:
24+
main()

0 commit comments

Comments
 (0)