Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
L
LAE
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
文靖昊
LAE
Commits
6bcdfb52
Commit
6bcdfb52
authored
Jul 10, 2024
by
tinywell
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
agent 简单封装
parent
edaf65fc
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
104 additions
and
1 deletions
+104
-1
agent.py
src/server/agent.py
+26
-0
agent_test.py
test/agent_test.py
+77
-0
rag_test.py
test/rag_test.py
+1
-1
No files found.
src/server/agent.py
0 → 100644
View file @
6bcdfb52
from
typing
import
Any
,
List
from
langchain_core.prompts
import
PromptTemplate
from
langchain.agents
import
AgentExecutor
,
create_tool_calling_agent
,
create_structured_chat_agent
from
langchain.tools
import
BaseTool
from
langgraph.prebuilt
import
create_react_agent
class
Agent
:
def
__init__
(
self
,
llm
,
tools
:
List
[
BaseTool
],
prompt
:
PromptTemplate
=
None
,
verbose
:
bool
=
False
):
self
.
llm
=
llm
self
.
tools
=
tools
self
.
prompt
=
prompt
if
not
prompt
:
agent
=
create_react_agent
(
llm
,
tools
,
debug
=
verbose
)
self
.
agent_executor
=
agent
else
:
agent
=
create_structured_chat_agent
(
llm
,
tools
,
prompt
)
self
.
agent_executor
=
AgentExecutor
(
agent
=
agent
,
tools
=
tools
,
verbose
=
verbose
)
def
exec
(
self
,
prompt_args
:
dict
=
{},
stream
:
bool
=
False
):
# if stream:
# for step in self.agent_executor.stream(prompt_args):
# yield step
return
self
.
agent_executor
.
invoke
(
input
=
prompt_args
)
test/agent_test.py
0 → 100644
View file @
6bcdfb52
import
sys
,
os
sys
.
path
.
append
(
"../"
)
from
typing
import
List
,
Union
,
Type
,
Optional
from
langchain
import
hub
import
langchain_core
from
langchain_core.tools
import
tool
,
BaseTool
from
langchain_core.prompts
import
ChatPromptTemplate
,
PromptTemplate
from
langchain_core.prompts.chat
import
ChatPromptTemplate
,
HumanMessagePromptTemplate
,
SystemMessagePromptTemplate
,
MessagesPlaceholder
from
langchain_core.callbacks
import
CallbackManagerForToolRun
from
langchain_openai
import
ChatOpenAI
from
langchain_openai.embeddings
import
OpenAIEmbeddings
from
langchain.agents
import
AgentExecutor
,
create_tool_calling_agent
,
create_structured_chat_agent
from
pydantic
import
BaseModel
,
Field
from
src.server.agent
import
Agent
from
src.config.prompts
import
PROMPT_AGENT_SYS
,
PROMPT_AGENT_HUMAN
class
CalcInput
(
BaseModel
):
a
:
int
=
Field
(
...
,
description
=
"第一个数"
)
b
:
int
=
Field
(
...
,
description
=
"第二个数"
)
class
Calc
(
BaseTool
):
name
=
"calc"
description
=
"一个简单的计算工具,可以计算两个数的和"
args_schema
:
Type
[
BaseModel
]
=
CalcInput
def
_run
(
self
,
a
:
int
,
b
:
int
,
run_manager
:
Optional
[
CallbackManagerForToolRun
]
=
None
)
->
str
:
"""Use the tool."""
print
(
f
"Calculating {a} + {b}"
)
return
a
+
b
tools
=
[
Calc
()]
llm
=
ChatOpenAI
(
openai_api_key
=
'xxxxxxxxxxxxx'
,
openai_api_base
=
'http://192.168.10.14:8000/v1'
,
# openai_api_base='https://127.0.0.1:8000/v1',
model_name
=
'Qwen2-7B'
,
verbose
=
True
,
temperature
=
0
,
)
# prompt = hub.pull("hwchase17/openai-functions-agent")
input_variables
=
[
'agent_scratchpad'
,
'input'
,
'tool_names'
,
'tools'
]
input_types
=
{
'chat_history'
:
List
[
Union
[
langchain_core
.
messages
.
ai
.
AIMessage
,
langchain_core
.
messages
.
human
.
HumanMessage
,
langchain_core
.
messages
.
chat
.
ChatMessage
,
langchain_core
.
messages
.
system
.
SystemMessage
,
langchain_core
.
messages
.
function
.
FunctionMessage
,
langchain_core
.
messages
.
tool
.
ToolMessage
]]}
messages
=
[
SystemMessagePromptTemplate
(
prompt
=
PromptTemplate
(
input_variables
=
[
'tool_names'
,
'tools'
],
template
=
PROMPT_AGENT_SYS
)),
MessagesPlaceholder
(
variable_name
=
'chat_history'
,
optional
=
True
),
HumanMessagePromptTemplate
(
prompt
=
PromptTemplate
(
input_variables
=
[
'agent_scratchpad'
,
'input'
],
template
=
PROMPT_AGENT_HUMAN
))
]
prompt
=
ChatPromptTemplate
(
input_variables
=
input_variables
,
input_types
=
input_types
,
messages
=
messages
)
# for msg in prompt.messages:
# print(msg)
agent
=
Agent
(
llm
=
llm
,
tools
=
tools
,
prompt
=
prompt
,
verbose
=
True
)
# res = agent.agent_executor.invoke(input={"input": "what is 1 + 1?"})
# res = agent.invoke(prompt_args={"input": "what is 1 + 1?"})
res
=
agent
.
exec
(
prompt_args
=
{
"input"
:
"what is 1 + 1?"
})
# agent = create_structured_chat_agent(llm, tools, prompt)
# agent_executor = AgentExecutor(agent=agent,tools=tools,verbose=True,handle_parsing_errors=True)
# res = agent_executor.invoke(input={"input": "what is 1 + 1?"})
print
(
res
)
test/rag_test.py
View file @
6bcdfb52
...
...
@@ -29,7 +29,7 @@ def test_qaext():
def
test_chatextend
():
ext
=
ChatExtend
(
base_llm
)
message
=
[
(
"
我们明天去爬山吧"
,
"好呀
"
),
(
"
明天去爬山怎么样"
,
"好主意
"
),
(
"天气怎么样"
,
"天气晴朗"
),
]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment