web
This commit is contained in:
9
web/agent/.gitignore
vendored
Normal file
9
web/agent/.gitignore
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
venv/
|
||||
__pycache__/
|
||||
*.pyc
|
||||
.env
|
||||
.vercel
|
||||
|
||||
# python
|
||||
.venv/
|
||||
.langgraph_api/
|
||||
10
web/agent/langgraph.json
Normal file
10
web/agent/langgraph.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"python_version": "3.12",
|
||||
"dockerfile_lines": [],
|
||||
"dependencies": ["."],
|
||||
"package_manager": "uv",
|
||||
"graphs": {
|
||||
"sample_agent": "./main.py:graph"
|
||||
},
|
||||
"env": ".env"
|
||||
}
|
||||
136
web/agent/main.py
Normal file
136
web/agent/main.py
Normal file
@@ -0,0 +1,136 @@
|
||||
"""
|
||||
This is the main entry point for the agent.
|
||||
It defines the workflow graph, state, tools, nodes and edges.
|
||||
"""
|
||||
|
||||
from typing import Any, List
|
||||
|
||||
from langchain.tools import tool
|
||||
from langchain_core.messages import BaseMessage, SystemMessage
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langgraph.graph import END, MessagesState, StateGraph
|
||||
from langgraph.prebuilt import ToolNode
|
||||
from langgraph.types import Command
|
||||
|
||||
|
||||
class AgentState(MessagesState):
|
||||
"""
|
||||
Here we define the state of the agent
|
||||
|
||||
In this instance, we're inheriting from CopilotKitState, which will bring in
|
||||
the CopilotKitState fields. We're also adding a custom field, `language`,
|
||||
which will be used to set the language of the agent.
|
||||
"""
|
||||
|
||||
proverbs: List[str]
|
||||
tools: List[Any]
|
||||
# your_custom_agent_state: str = ""
|
||||
|
||||
|
||||
@tool
|
||||
def get_weather(location: str):
|
||||
"""
|
||||
Get the weather for a given location.
|
||||
"""
|
||||
return f"The weather for {location} is 70 degrees."
|
||||
|
||||
|
||||
# @tool
|
||||
# def your_tool_here(your_arg: str):
|
||||
# """Your tool description here."""
|
||||
# print(f"Your tool logic here")
|
||||
# return "Your tool response here."
|
||||
|
||||
backend_tools = [
|
||||
get_weather
|
||||
# your_tool_here
|
||||
]
|
||||
|
||||
# Extract tool names from backend_tools for comparison
|
||||
backend_tool_names = [tool.name for tool in backend_tools]
|
||||
|
||||
|
||||
async def chat_node(state: AgentState, config: RunnableConfig) -> Command[str]:
|
||||
"""
|
||||
Standard chat node based on the ReAct design pattern. It handles:
|
||||
- The model to use (and binds in CopilotKit actions and the tools defined above)
|
||||
- The system prompt
|
||||
- Getting a response from the model
|
||||
- Handling tool calls
|
||||
|
||||
For more about the ReAct design pattern, see:
|
||||
https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg
|
||||
"""
|
||||
|
||||
# 1. Define the model
|
||||
model = ChatOpenAI(model="gpt-5-mini")
|
||||
|
||||
# 2. Bind the tools to the model
|
||||
model_with_tools = model.bind_tools(
|
||||
[
|
||||
*state.get("tools", []), # bind tools defined by ag-ui
|
||||
*backend_tools,
|
||||
# your_tool_here
|
||||
],
|
||||
# 2.1 Disable parallel tool calls to avoid race conditions,
|
||||
# enable this for faster performance if you want to manage
|
||||
# the complexity of running tool calls in parallel.
|
||||
parallel_tool_calls=False,
|
||||
)
|
||||
|
||||
# 3. Define the system message by which the chat model will be run
|
||||
system_message = SystemMessage(
|
||||
content=f"You are a helpful assistant. The current proverbs are {state.get('proverbs', [])}."
|
||||
)
|
||||
|
||||
# 4. Run the model to generate a response
|
||||
response = await model_with_tools.ainvoke(
|
||||
[
|
||||
system_message,
|
||||
*state["messages"],
|
||||
],
|
||||
config,
|
||||
)
|
||||
|
||||
# only route to tool node if tool is not in the tools list
|
||||
if route_to_tool_node(response):
|
||||
print("routing to tool node")
|
||||
return Command(
|
||||
goto="tool_node",
|
||||
update={
|
||||
"messages": [response],
|
||||
},
|
||||
)
|
||||
|
||||
# 5. We've handled all tool calls, so we can end the graph.
|
||||
return Command(
|
||||
goto=END,
|
||||
update={
|
||||
"messages": [response],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def route_to_tool_node(response: BaseMessage):
|
||||
"""
|
||||
Route to tool node if any tool call in the response matches a backend tool name.
|
||||
"""
|
||||
tool_calls = getattr(response, "tool_calls", None)
|
||||
if not tool_calls:
|
||||
return False
|
||||
|
||||
for tool_call in tool_calls:
|
||||
if tool_call.get("name") in backend_tool_names:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
# Define the workflow graph
|
||||
workflow = StateGraph(AgentState)
|
||||
workflow.add_node("chat_node", chat_node)
|
||||
workflow.add_node("tool_node", ToolNode(tools=backend_tools))
|
||||
workflow.add_edge("tool_node", "chat_node")
|
||||
workflow.set_entry_point("chat_node")
|
||||
|
||||
graph = workflow.compile()
|
||||
16
web/agent/pyproject.toml
Normal file
16
web/agent/pyproject.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[project]
|
||||
name = "sample-agent"
|
||||
version = "0.1.0"
|
||||
description = "A LangGraph agent"
|
||||
requires-python = ">=3.12"
|
||||
dependencies = [
|
||||
"langchain==1.1.0",
|
||||
"langgraph==1.0.4",
|
||||
"langsmith>=0.4.49",
|
||||
"openai>=1.68.2,<2.0.0",
|
||||
"fastapi>=0.115.5,<1.0.0",
|
||||
"uvicorn>=0.29.0,<1.0.0",
|
||||
"python-dotenv>=1.0.0,<2.0.0",
|
||||
"langgraph-cli[inmem]>=0.4.7",
|
||||
"langchain-openai>=1.1.0",
|
||||
]
|
||||
1625
web/agent/uv.lock
generated
Normal file
1625
web/agent/uv.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user