Commit 6bcdfb52 by tinywell

agent 简单封装

parent edaf65fc
from typing import Any, List
from langchain_core.prompts import PromptTemplate
from langchain.agents import AgentExecutor, create_tool_calling_agent,create_structured_chat_agent
from langchain.tools import BaseTool
from langgraph.prebuilt import create_react_agent
class Agent:
def __init__(self, llm, tools: List[BaseTool],prompt: PromptTemplate = None, verbose: bool = False):
self.llm = llm
self.tools = tools
self.prompt = prompt
if not prompt:
agent = create_react_agent(llm, tools,debug=verbose)
self.agent_executor = agent
else:
agent = create_structured_chat_agent(llm, tools, prompt)
self.agent_executor = AgentExecutor(agent=agent,tools=tools,verbose=verbose)
def exec(self, prompt_args: dict = {}, stream: bool = False):
# if stream:
# for step in self.agent_executor.stream(prompt_args):
# yield step
return self.agent_executor.invoke(input=prompt_args)
import sys,os
sys.path.append("../")
from typing import List, Union, Type, Optional
from langchain import hub
import langchain_core
from langchain_core.tools import tool, BaseTool
from langchain_core.prompts import ChatPromptTemplate,PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate,HumanMessagePromptTemplate,SystemMessagePromptTemplate,MessagesPlaceholder
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_openai import ChatOpenAI
from langchain_openai.embeddings import OpenAIEmbeddings
from langchain.agents import AgentExecutor, create_tool_calling_agent,create_structured_chat_agent
from pydantic import BaseModel, Field
from src.server.agent import Agent
from src.config.prompts import PROMPT_AGENT_SYS, PROMPT_AGENT_HUMAN
class CalcInput(BaseModel):
a: int = Field(...,description="第一个数")
b: int = Field(...,description="第二个数")
class Calc(BaseTool):
name = "calc"
description = "一个简单的计算工具,可以计算两个数的和"
args_schema: Type[BaseModel] = CalcInput
def _run(
self, a: int, b: int, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
print(f"Calculating {a} + {b}")
return a + b
tools = [Calc()]
llm = ChatOpenAI(
openai_api_key='xxxxxxxxxxxxx',
openai_api_base='http://192.168.10.14:8000/v1',
# openai_api_base='https://127.0.0.1:8000/v1',
model_name='Qwen2-7B',
verbose=True,
temperature=0,
)
# prompt = hub.pull("hwchase17/openai-functions-agent")
input_variables=['agent_scratchpad', 'input', 'tool_names', 'tools']
input_types={'chat_history': List[Union[langchain_core.messages.ai.AIMessage, langchain_core.messages.human.HumanMessage, langchain_core.messages.chat.ChatMessage, langchain_core.messages.system.SystemMessage, langchain_core.messages.function.FunctionMessage, langchain_core.messages.tool.ToolMessage]]}
messages=[
SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=['tool_names', 'tools'], template=PROMPT_AGENT_SYS)),
MessagesPlaceholder(variable_name='chat_history', optional=True),
HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['agent_scratchpad', 'input'], template=PROMPT_AGENT_HUMAN))
]
prompt = ChatPromptTemplate(
input_variables=input_variables,
input_types=input_types,
messages=messages
)
# for msg in prompt.messages:
# print(msg)
agent = Agent(llm=llm, tools=tools, prompt=prompt, verbose=True)
# res = agent.agent_executor.invoke(input={"input": "what is 1 + 1?"})
# res = agent.invoke(prompt_args={"input": "what is 1 + 1?"})
res = agent.exec(prompt_args={"input": "what is 1 + 1?"})
# agent = create_structured_chat_agent(llm, tools, prompt)
# agent_executor = AgentExecutor(agent=agent,tools=tools,verbose=True,handle_parsing_errors=True)
# res = agent_executor.invoke(input={"input": "what is 1 + 1?"})
print(res)
......@@ -29,7 +29,7 @@ def test_qaext():
def test_chatextend():
ext = ChatExtend(base_llm)
message = [
("我们明天去爬山吧", "好呀"),
("明天去爬山怎么样", "好主意"),
("天气怎么样", "天气晴朗"),
]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment