add mcp
This commit is contained in:
@@ -9,9 +9,10 @@ from langchain.tools import tool
|
||||
from langchain_core.messages import BaseMessage, SystemMessage
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
from langgraph.graph import END, MessagesState, StateGraph
|
||||
from langgraph.prebuilt import ToolNode
|
||||
from langgraph.graph import END, MessagesState, StateGraph, START
|
||||
from langgraph.prebuilt import ToolNode, tools_condition
|
||||
from langgraph.types import Command
|
||||
from langchain_mcp_adapters.client import MultiServerMCPClient
|
||||
|
||||
|
||||
class AgentState(MessagesState):
|
||||
@@ -36,11 +37,34 @@ backend_tools = [
|
||||
# your_tool_here
|
||||
]
|
||||
|
||||
# Extract tool names from backend_tools for comparison
|
||||
backend_tool_names = [tool.name for tool in backend_tools]
|
||||
# Initialize MCP client
|
||||
mcp_client = MultiServerMCPClient(
|
||||
{
|
||||
"cavepedia": {
|
||||
"transport": "streamable_http",
|
||||
"url": "https://mcp.caving.dev/mcp",
|
||||
"timeout": 10.0,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# Global variable to hold loaded MCP tools
|
||||
_mcp_tools = None
|
||||
|
||||
async def get_mcp_tools():
|
||||
"""Lazy load MCP tools on first access."""
|
||||
global _mcp_tools
|
||||
if _mcp_tools is None:
|
||||
try:
|
||||
_mcp_tools = await mcp_client.get_tools()
|
||||
print(f"Loaded {len(_mcp_tools)} tools from MCP server")
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to load MCP tools: {e}")
|
||||
_mcp_tools = []
|
||||
return _mcp_tools
|
||||
|
||||
|
||||
async def chat_node(state: AgentState, config: RunnableConfig) -> Command[str]:
|
||||
async def chat_node(state: AgentState, config: RunnableConfig) -> dict:
|
||||
"""
|
||||
Standard chat node based on the ReAct design pattern. It handles:
|
||||
- The model to use (and binds in CopilotKit actions and the tools defined above)
|
||||
@@ -55,11 +79,15 @@ async def chat_node(state: AgentState, config: RunnableConfig) -> Command[str]:
|
||||
# 1. Define the model
|
||||
model = ChatAnthropic(model="claude-sonnet-4-5-20250929")
|
||||
|
||||
# 1.5 Load MCP tools from the cavepedia server
|
||||
mcp_tools = await get_mcp_tools()
|
||||
|
||||
# 2. Bind the tools to the model
|
||||
model_with_tools = model.bind_tools(
|
||||
[
|
||||
*state.get("tools", []), # bind tools defined by ag-ui
|
||||
*backend_tools,
|
||||
*mcp_tools, # Add MCP tools from cavepedia server
|
||||
# your_tool_here
|
||||
],
|
||||
# 2.1 Disable parallel tool calls to avoid race conditions,
|
||||
@@ -70,7 +98,7 @@ async def chat_node(state: AgentState, config: RunnableConfig) -> Command[str]:
|
||||
|
||||
# 3. Define the system message by which the chat model will be run
|
||||
system_message = SystemMessage(
|
||||
content="You are a helpful assistant."
|
||||
content="You are a helpful assistant with access to cave-related information through the Cavepedia MCP server. You can help users find information about caves, caving techniques, and related topics."
|
||||
)
|
||||
|
||||
# 4. Run the model to generate a response
|
||||
@@ -82,44 +110,40 @@ async def chat_node(state: AgentState, config: RunnableConfig) -> Command[str]:
|
||||
config,
|
||||
)
|
||||
|
||||
# only route to tool node if tool is not in the tools list
|
||||
if route_to_tool_node(response):
|
||||
print("routing to tool node")
|
||||
return Command(
|
||||
goto="tool_node",
|
||||
update={
|
||||
"messages": [response],
|
||||
},
|
||||
)
|
||||
|
||||
# 5. We've handled all tool calls, so we can end the graph.
|
||||
return Command(
|
||||
goto=END,
|
||||
update={
|
||||
"messages": [response],
|
||||
},
|
||||
)
|
||||
# 5. Return the response in the messages
|
||||
return {"messages": [response]}
|
||||
|
||||
|
||||
def route_to_tool_node(response: BaseMessage):
|
||||
async def tool_node_wrapper(state: AgentState) -> dict:
|
||||
"""
|
||||
Route to tool node if any tool call in the response matches a backend tool name.
|
||||
Custom tool node that handles both backend tools and MCP tools.
|
||||
"""
|
||||
tool_calls = getattr(response, "tool_calls", None)
|
||||
if not tool_calls:
|
||||
return False
|
||||
# Load MCP tools and combine with backend tools
|
||||
mcp_tools = await get_mcp_tools()
|
||||
all_tools = [*backend_tools, *mcp_tools]
|
||||
|
||||
for tool_call in tool_calls:
|
||||
if tool_call.get("name") in backend_tool_names:
|
||||
return True
|
||||
return False
|
||||
# Use the standard ToolNode with all tools
|
||||
node = ToolNode(tools=all_tools)
|
||||
result = await node.ainvoke(state)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# Define the workflow graph
|
||||
workflow = StateGraph(AgentState)
|
||||
workflow.add_node("chat_node", chat_node)
|
||||
workflow.add_node("tool_node", ToolNode(tools=backend_tools))
|
||||
workflow.add_edge("tool_node", "chat_node")
|
||||
workflow.set_entry_point("chat_node")
|
||||
workflow.add_node("tools", tool_node_wrapper) # Must be named "tools" for tools_condition
|
||||
|
||||
# Set entry point
|
||||
workflow.add_edge(START, "chat_node")
|
||||
|
||||
# Use tools_condition for proper routing
|
||||
workflow.add_conditional_edges(
|
||||
"chat_node",
|
||||
tools_condition,
|
||||
)
|
||||
|
||||
# After tools execute, go back to chat
|
||||
workflow.add_edge("tools", "chat_node")
|
||||
|
||||
graph = workflow.compile()
|
||||
|
||||
Reference in New Issue
Block a user