Commit 65d090a2 by 周峻哲

Merge branch 'dev_lae' of http://gitlab.iv.brilliance.com.cn/wenjinghao/LAE into dev_lae

parents 17e7b130 4d1057e8
# 使用官方的Python 3.10基础镜像
FROM python:3.10
# 设置工作目录
WORKDIR /app
# 复制当前目录下的所有文件到工作目录
COPY . /app
# 配置pip使用清华镜像源
RUN pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
# 配置python环境
RUN pip install --no-cache-dir -r requirements.txt
# 运行一个简单的Python命令以确保环境正确配置
CMD ["python", "--version"]
version: '3'
services:
postgres:
image: postgres
container_name: lae_pgsql
environment:
POSTGRES_PASSWORD: 111111
ports:
- "5433:5432"
volumes:
- ./lae_pg_data:/var/lib/postgresql/data
- ./init_db:/docker-entrypoint-initdb.d
python:
build:
context: .
dockerfile: Dockerfile
image: lae_python_image:3.10
container_name: lae_python
volumes:
- .:/app
ports:
- "8003:8003"
stdin_open: true
tty: true
command: /bin/bash
volumes:
lae_pg_data:
bitsandbytes==0.41.1
#bitsandbytes==0.41.1
cpm-kernels==1.0.11
fastapi==0.100.0
Flask==2.3.2
Flask==2.1.1
jieba==0.42.1
langchain==0.1.13
peft==0.4.0
......
......@@ -152,6 +152,31 @@ class CRUD:
return self.db.fetchall()
def get_chats(self, account):
query = f'SELECT chat.chat_id,chat.info FROM chat JOIN c_user ON chat.user_id = c_user.user_id WHERE c_user.account = (%s);'
query = f'SELECT chat.chat_id,chat.info FROM chat JOIN c_user ON chat.user_id = c_user.user_id WHERE c_user.account = (%s) ORDER BY chat.create_time DESC;'
self.db.execute_args(query, (account,))
return self.db.fetchall()
def create_chat(self, user_id, info, deleted):
query = f'INSERT INTO chat(user_id, info, deleted) VALUES (%s,%s,%s) RETURNING chat_id'
self.db.execute_args(query, (user_id, info, deleted))
ans = self.db.fetchall()[0][0]
return ans
def get_uersid_from_account(self, account):
query = f'SELECT user_id FROM c_user WHERE account = (%s)'
self.db.execute_args(query, (account, ))
ans = self.db.fetchall()[0][0]
print(ans)
return ans
def get_chat_info(self, chat_id):
query = f'SELECT info FROM chat WHERE chat_id = (%s)'
self.db.execute_args(query, (chat_id,))
ans = self.db.fetchall()[0][0]
print(ans)
return ans
def set_info(self, chat_id, info):
query = f'UPDATE chat SET info = (%s) WHERE chat_id = (%s)'
self.db.execute_args(query, (info, chat_id))
......@@ -41,6 +41,7 @@ PROMPT1 = PromptTemplate(input_variables=["context", "question"], template=promp
SAFE_RESPONSE = "您好,我不具备人类属性,因此没有名字。我可以协助您完成范围广泛的任务并提供有关各种主题的信息,比如回答问题,提是供定义和解释及建议。如果您有任何问题,请随时向我提问。"
BLOCKED_KEYWORDS = ["文心一言", "百度", "模型"]
class QA:
def __init__(self, _prompt, _base_llm, _llm_kwargs, _prompt_kwargs, _db, _faiss_db):
self.prompt = _prompt
......@@ -124,6 +125,47 @@ class QA:
yield history
await task
async def async_chat2(self, history):
_question = history[-1][0]
history = history[:-1]
if self.contains_blocked_keywords(_question):
self.cur_answer = SAFE_RESPONSE
yield [(_question, self.cur_answer)]
return
self.cur_similarity = self.get_similarity(_aquestion=_question)
self.cur_question = self.prompt.format(context=self.cur_similarity, question=_question)
callback = AsyncIteratorCallbackHandler()
async def wrap_done(fn: Awaitable, event: asyncio.Event):
try:
await fn
except Exception as e:
import traceback
traceback.print_exc()
print(f"Caught exception: {e}")
finally:
event.set()
task = asyncio.create_task(
wrap_done(self.llm.arun(context=self.cur_similarity, question=_question, callbacks=[callback]),
callback.done))
self.cur_answer = ""
print(_question, self.cur_answer)
history.append((_question, self.cur_answer))
async for token in callback.aiter():
self.cur_answer += token
if self.contains_blocked_keywords(self.cur_answer):
self.cur_answer = SAFE_RESPONSE
history[-1] = (_question, self.cur_answer)
yield history
return
history[-1] = (_question, self.cur_answer)
yield history
await task
def get_history(self):
self.history = self.crud.get_history(self.chat_id)
return self.history
......@@ -149,6 +191,27 @@ class QA:
self.chat_id = chat_id
self.history = self.crud.get_history(self.chat_id)
def create_chat(self, user_account):
user_id = self.crud.get_uersid_from_account(user_account)
self.chat_id = self.crud.create_chat(user_id, '\t\t', '0')
def set_info(self, question):
info = self.crud.get_chat_info(self.chat_id)
if info == '\t\t':
if len(question) <= 10:
n_info = question
self.crud.set_info(self.chat_id, n_info)
else:
info_prompt = """'''
{question}
'''
请你用十个字之内总结上述问题,你的输出不得大于10个字。
"""
info_prompt_t = PromptTemplate(input_variables=["question"], template=info_prompt)
info_llm = LLMChain(llm=self.base_llm, prompt=info_prompt_t, llm_kwargs=self.llm_kwargs)
n_info = info_llm.run(question=question)
self.crud.set_info(self.chat_id, n_info)
if __name__ == "__main__":
# 数据库连接
......
# -*- coding: utf-8 -*-
import sys
sys.path.append('../')
import gradio as gr
from langchain.prompts import PromptTemplate
......@@ -70,64 +73,162 @@ def main():
return gr.Button(interactive=True)
def get_users():
return my_chat.get_users()
o_users = my_chat.get_users()
users_l = [item[0] for item in o_users]
return gr.components.Radio(choices=users_l, label="选择一个用户", value=users_l[0], interactive=True), users_l[
0]
def create_chat(user_account):
my_chat.create_chat(user_account)
def get_chats(user_account):
o_chats = my_chat.get_chats(user_account)
chats_l = [item[0]+':'+item[1] for item in o_chats]
return gr.components.Radio(choices=chats_l, label="选择一个对话", value=chats_l[0], interactive=True)
if len(o_chats) >= 13:
o_chats = o_chats[:13]
chats_l = [item[0] + ':' + item[1] for item in o_chats]
if my_chat.chat_id:
result = [item for item in chats_l if item.split(":")[0].strip() == my_chat.chat_id][0]
return gr.components.Radio(choices=chats_l, label="历史对话", value=result, interactive=True,
show_label=True)
else:
return gr.components.Radio(choices=chats_l, label="历史对话", value=chats_l[0], interactive=True,
show_label=True)
def set_info(question):
my_chat.set_info(question)
def set_chat_id(chat_id_info):
chat_id = chat_id_info.split(':')[0]
my_chat.set_chat_id(chat_id)
o_users = get_users()
users_l = [item[0] for item in o_users]
o_chats = my_chat.get_chats(users_l[0])
chats_l = [item[0]+':'+item[1] for item in o_chats]
set_chat_id(chats_l[0])
print(my_chat.chat_id)
print(type(my_chat.chat_id))
a = my_chat.get_history()
print(a)
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">低空经济知识问答</h1>""")
def load():
l_users, t_user = get_users()
l_chats = get_chats(t_user)
return l_users, l_chats
def clear_text(t):
if t == "请输入您的问题":
return ""
else:
return t
def reset_text():
return ""
def blur(t):
if t == "":
return "请输入您的问题"
else:
return t
def text_his(text, history):
history = history + [[text, None]]
return history
def clear_tip(his):
if not his[0][0] and his[0][1] == "你好,我是新晨科技股份的人工智能助手小晨,如果您有低空经济相关问题,欢迎随时向我咨询。":
return his[1:]
else:
return his
with gr.Blocks(css='index.css', title="低空经济知识问答") as demo:
gr.HTML("""<h1 align="center">低空经济知识问答</h1>""", visible=False)
with gr.Row():
with gr.Column(scale=2):
users = gr.components.Radio(choices=users_l, label="选择一个用户", value=users_l[0], interactive=True, visible=False)
chats = gr.components.Radio(choices=chats_l, label="选择一个对话", value=chats_l[0], interactive=True)
with gr.Column(scale=2, visible=False):
users = gr.components.Radio(choices=[], label="选择一个用户", interactive=True,
visible=False, show_label=False)
chats = gr.components.Radio(choices=[], label="历史对话", interactive=True,
show_label=True, visible=False)
new_chat_btn = gr.Button("新建对话", visible=False)
with gr.Column(scale=8):
chatbot = gr.Chatbot(bubble_full_width=False,
avatar_images=(ICON_PATH + '\\user.png', ICON_PATH + "\\bot.png"),
value=show_history(), height=400)
input_text = gr.Textbox(show_label=True, lines=3, label="文本输入")
sub_btn = gr.Button("提交")
users.change(get_chats, [users], [chats]).then(
set_chat_id, [chats], None
).then(
show_history, None, chatbot
)
chats.change(set_chat_id, [chats], None).then(
show_history, None, chatbot
)
sub_btn.click(my_chat.async_chat, [input_text], [chatbot]
).then(
stop_btn, None, sub_btn
).then(
my_chat.update_history, None, None
).then(
show_history, None, chatbot
).then(
clear, None, [input_text]
).then(
restart_btn, None, sub_btn
avatar_images=(ICON_PATH + '\\user2.png', ICON_PATH + "\\bot2.png"),
value=[[None,
"我是新晨科技股份有限公司开发的人工智能助手,名叫小晨,如果您有任何问题,欢迎随时向我咨询"]],
height=400, show_copy_button=True,
show_label=False, line_breaks=True)
with gr.Row():
input_text = gr.Textbox(show_label=False, lines=1, label="文本输入", scale=9, container=False,
placeholder="请输入您的问题", max_lines=1)
sub_btn = gr.Button("提交", scale=1)
sub_btn.click(
stop_btn, [], sub_btn
).success(
clear_tip, [chatbot], [chatbot]
).success(
text_his, [input_text, chatbot], [chatbot]
).success(
reset_text, [], input_text
).success(
my_chat.async_chat2, [chatbot], [chatbot]
).success(
restart_btn, [], sub_btn
)
demo.queue().launch(share=False, inbrowser=True, server_name='192.168.22.127', server_port=GR_PORT)
# input_text.submit(
# stop_btn, [], sub_btn
# ).then(
# my_chat.async_chat2, [input_text, chatbot], [chatbot]
# ).then(
# restart_btn, [], sub_btn
# )
demo.load(load, [], [users, chats])
# input_text.submit(my_chat.async_chat, [input_text], [chatbot]
# ).then(
# stop_btn, None, sub_btn
# ).then(
# set_info, [input_text], []
# ).then(
# get_chats, [users], [chats]
# ).then(
# my_chat.update_history, None, None
# ).then(
# show_history, None, chatbot
# ).then(
# clear, None, [input_text]
# ).then(
# restart_btn, None, sub_btn
# ).then(
# reset_text, [], input_text
# )
# new_chat_btn.click(create_chat, [users], []).then(
# get_chats, [users], [chats]
# )
# users.change(get_chats, [users], [chats]).then(
# set_chat_id, [chats], None
# ).then(
# show_history, None, chatbot
# )
# chats.change(set_chat_id, [chats], None).then(
# show_history, None, chatbot
# )
# sub_btn.click(my_chat.async_chat, [input_text], [chatbot]
# ).then(
# stop_btn, None, sub_btn
# ).then(
# set_info, [input_text], []
# ).then(
# get_chats, [users], [chats]
# ).then(
# my_chat.update_history, None, None
# ).then(
# show_history, None, chatbot
# ).then(
# clear, None, [input_text]
# ).then(
# restart_btn, None, sub_btn
# ).then(
# reset_text, [], input_text
# )
demo.queue().launch(share=False, inbrowser=True, server_name='192.168.22.32', server_port=8888)
if __name__ == "__main__":
......
#component-5 {
height: 76vh !important;
overflow: auto !important;
}
.wrap.svelte-vm32wk.svelte-vm32wk.svelte-vm32wk {
display: inline !important;
}
.wrap .svelte-vm32wk label {
margin-bottom: 10px !important;
}
#component-9 {
height: 88vh !important;
border: #f6faff;
box-shadow: none;
background: #f6faff;
}
footer {
visibility: hidden;
}
.app.svelte-1kyws56.svelte-1kyws56 {
height: 100vh;
}
@media screen and (max-width: 768px) {
input[type="text"], textarea {
-webkit-user-modify: read-write-plaintext-only;
-webkit-text-size-adjust: none;
}
#component-3 {
display: none;
}
#component-5 {
display: none !important;
}
#component-9 {
height: 89vh !important;
}
.app.svelte-1kyws56.svelte-1kyws56 {
height: 100vh;
}
#component-10 {
position: fixed;
bottom: 15px;
right: 0px;
padding: 0 20px;
}
#component-12 {
min-width: min(74px, 100%);
}
}
#component-3 button {
background: #4999ff !important;
color: white !important;
}
#component-3 button:hover {
background: #4999ff !important;
color: white !important;
}
span.svelte-1gfkn6j {
margin: 55px 0;
}
gradio-app {
background-color: #f6faff !important;
}
.bot.svelte-1pjfiar.svelte-1pjfiar.svelte-1pjfiar {
background: white;
border: none;
box-shadow: 0px 0px 9px 0px rgba(0, 0, 0, 0.1);
border-radius: 4px;
}
.user.svelte-1pjfiar.svelte-1pjfiar.svelte-1pjfiar {
background: white;
border: none;
box-shadow: 0px 0px 9px 0px rgba(0, 0, 0, 0.1);
border-radius: 4px;
}
.message-buttons-bubble.svelte-1pjfiar.svelte-1pjfiar.svelte-1pjfiar {
/* background: white; */
border: none;
box-shadow: 0px 0px 9px 0px rgba(0, 0, 0, 0.1);
}
label.svelte-vm32wk > .svelte-vm32wk + .svelte-vm32wk {
color: #989898;
}
p {
color: #26415f;
}
.wrapper.svelte-nab2ao {
background: #f6faff;
}
#component-12 {
background: #4999ff;
color: white;
}
.svelte-11hlfrc svg {
color: gray;
}
#component-6 {
position: fixed;
width: 219px;
height: 36px;
border-radius: 18px;
top: 4%;
z-index: 4;
left: 8%;
}
div.svelte-sfqy0y {
background-color: white;
}
......@@ -78,9 +78,9 @@ def test_faiss_load():
"password": VEC_DB_PASSWORD},
show_number=SIMILARITY_SHOW_NUMBER,
reset=False)
print(vecstore_faiss.join_document(vecstore_faiss.get_text_similarity("什么是低空飞行")))
print(vecstore_faiss.join_document(vecstore_faiss.get_text_similarity("我国什么时候全面开放低空领域")))
if __name__ == "__main__":
test_faiss_from_dir()
# test_faiss_from_dir()
test_faiss_load()
"""给定三个字符串 s1、s2、s3,请你帮忙验证 s3 是否是由 s1 和 s2 交错 组成的。
两个字符串 s 和 t 交错 的定义与过程如下,其中每个字符串都会被分割成若干 非空
子字符串
s = s1 + s2 + ... + sn
t = t1 + t2 + ... + tm
|n - m| <= 1
交错 是 s1 + t1 + s2 + t2 + s3 + t3 + ... 或者 t1 + s1 + t2 + s2 + t3 + s3 + ...
注意:a + b 意味着字符串 a 和 b 连接。
示例 1:
输入:s1 = "aabcc", s2 = "dbbca", s3 = "aadbbcbcac"
输出:true
示例 2:
输入:s1 = "aabcc", s2 = "dbbca", s3 = "aadbbbaccc"
输出:false
示例 3:
输入:s1 = "", s2 = "", s3 = ""
输出:true
提示:
0 <= s1.length, s2.length <= 100
0 <= s3.length <= 200
s1、s2、和 s3 都由小写英文字母组成"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root is None:
return True
if root.left is None and root.right is None:
return True
if root.left is None:
return self.isValidBST(root.right) and root.val < root.right.val
if root.right is None:
return self.isValidBST(root.left) and root.val > root.left.val
return self.isValidBST(root.left) and self.isValidBST(
root.right) and root.val > root.left.val and root.val < root.right.val
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment