Commit 558e898c by 陈正乐

init

parent 4662b93b
data/
docker/
docs/
images/
model/
src/tuning/
src/tools/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
dist/
eggs/
*.egg-info/
bin/
include/
lib/
local/
man/
share/
pip-wheel-metadata/
htmlcov/
.coverage
.tox/
.pytest_cache/
pytest.ini
# PyCharm
.idea/
# VSCode
.vscode/
# Jupyter Notebook
.ipynb_checkpoints
# Django
*.log
*.pot
*.pyc
local_settings.py
db.sqlite3
db.sqlite3-journal
media
# Flask
instance/
.webassets-cache
# Sphinx documentation
docs/_build/
model/
data/
exam/
.env
src/vector/faiss_store
src/scenarios/psbc/tag_memory_store/vectorstore
Python-3.11.0.tgz
Python-3.10.8.tgz
OpenSSL_1_1_1d.tar.gz
deps/averaged_perceptron_tagger.zip
deps/punkt.zip
deps/nltk.py
deps/averaged_perceptron_tagger.tar.gz
deps/punkt.tar.gz
FROM ubuntu:22.04 as base
# COPY sources.list /etc/apt/sources.list
RUN apt update && apt -y upgrade
RUN apt install -y gcc make wget perl zlib1g-dev libffi-dev libbz2-dev libreadline-dev liblzma-dev libsqlite3-dev
#RUN wget https://www.python.org/ftp/python/3.10.8/Python-3.10.8.tgz
#RUN wget https://github.com/openssl/openssl/archive/OpenSSL_1_1_1d.tar.gz
#download local
COPY deps/Python-3.10.8.tgz /
COPY deps/OpenSSL_1_1_1d.tar.gz /
RUN cd / && tar -zxf OpenSSL_1_1_1d.tar.gz
RUN cd openssl-OpenSSL_1_1_1d && ./config --prefix=/usr/local/openssl && make && make install
RUN rm -f /usr/bin/openssl /usr/lib64/openssl /usr/lib64/libssl.so \
&& ln -s /usr/local/openssl/bin/openssl /usr/bin/openssl \
&& ln -s /usr/local/openssl/include/openssl /usr/include/openssl \
&& ln -s /usr/local/openssl/lib/libssl.so /usr/lib64/libssl.so \
&& echo "/usr/local/openssl/lib" >> /etc/ld.so.conf \
&& ldconfig -v
RUN cd / && tar -zxf Python-3.10.8.tgz
RUN cd Python-3.10.8/ \
&& ./configure --enable-optimizations --prefix=/usr/local/python3.10 --with-openssl=/usr/local/openssl \
&& make && make install
# && rm -rf /Python-3.10.8.tgz /Python-3.10.8 /OpenSSL_1_1_1d.tar.gz /openssl-OpenSSL_1_1_1d
FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04
COPY deps/sources.list /etc/apt/sources.list
RUN apt update && apt -y upgrade
RUN apt install -y gcc wget perl vim net-tools libpq-dev
RUN useradd -m aigc && usermod -s /bin/bash aigc && usermod -G sudo aigc
COPY --from=base /usr/local/openssl /usr/local/openssl
COPY --from=base /usr/local/python3.10 /usr/local/python3.10
RUN rm -f /usr/bin/openssl /usr/lib64/openssl /usr/lib64/libssl.so \
&& ln -s /usr/local/openssl/bin/openssl /usr/bin/openssl \
&& ln -s /usr/local/openssl/include/openssl /usr/include/openssl \
&& ln -s /usr/local/openssl/lib/libssl.so /usr/lib64/libssl.so \
&& echo "/usr/local/openssl/lib" >> /etc/ld.so.conf \
&& ldconfig -v
RUN ln -s /usr/local/python3.10/bin/python3.10 /usr/local/bin/python3 \
&& ln -s /usr/local/python3.10/bin/pip3.10 /usr/local/bin/pip3 \
&& ln -s /usr/local/bin/python3 /usr/bin/python \
&& echo "export PATH=\$PATH:/usr/local/python3.10/bin" >> /etc/profile \
&& rm -f /usr/bin/pip && ln -s /usr/local/bin/pip3 /usr/bin/pip
ADD deps/punkt.tar.gz /usr/local/python3.10/nltk_data/tokenizers/
ADD deps/averaged_perceptron_tagger.tar.gz /usr/local/python3.10/nltk_data/taggers/
WORKDIR /home/aigc/
RUN mkdir .beai
RUN apt update && apt install -y libreoffice
RUN pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
COPY deps/requirements.txt requirements.txt
RUN pip install -r requirements.txt
# RUN python -m pip install --upgrade pip && pip install faiss-gpu
COPY . .
# WORKDIR /home/aigc/src/scenarios/spdsvb
# USER aigc
EXPOSE 5000
EXPOSE 8001
EXPOSE 8002
CMD ["bash"]
IMAGE_NAME = "brilliance/aigc_llm:0.2.0"
@PHONY: image
image:
docker build -t $(IMAGE_NAME) .
This source diff could not be displayed because it is too large. You can view the blob instead.
bitsandbytes==0.41.1
cpm-kernels==1.0.11
fastapi==0.100.0
Flask==2.3.2
jieba==0.42.1
langchain==0.0.278
peft==0.4.0
psycopg2==2.9.7
pydantic==1.10.12
requests==2.31.0
sentence-transformers==2.2.2
torch==2.0.1
transformers==4.31.0
uvicorn==0.23.1
unstructured==0.8.1
qianfan==0.0.5
faiss-gpu==1.7.2 # https://github.com/facebookresearch/faiss/blob/main/INSTALL.md
\ No newline at end of file
# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-updates main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-updates main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-backports main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-backports main restricted universe multiverse
# deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-security main restricted universe multiverse
# # deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-security main restricted universe multiverse
deb http://security.ubuntu.com/ubuntu/ jammy-security main restricted universe multiverse
# deb-src http://security.ubuntu.com/ubuntu/ jammy-security main restricted universe multiverse
# 预发布软件源,不建议启用
# deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-proposed main restricted universe multiverse
# # deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-proposed main restricted universe multiverse
\ No newline at end of file
#!/bin/sh
# This will start a code-server container and expose it at http://127.0.0.1:8080.
# It will also mount your current directory into the container as `/home/coder/project`
# and forward your UID/GID so that all file system operations occur as your user outside
# the container.
#
# Your $HOME/.config is mounted at $HOME/.config within the container to ensure you can
# easily access/modify your code-server config in $HOME/.config/code-server/config.json
# outside the container.
mkdir -p ~/.config
docker run -d --name code-server -p 8443:8080 \
-v "$HOME/.config:/home/coder/.config" \
-v "$HOME/:/home/coder/project" \
-u "$(id -u):$(id -g)" \
codercom/code-server:latest
\ No newline at end of file
version: '3.8'
services:
db:
image: postgres:15-alpine3.17
restart: always
environment:
POSTGRES_USER: vecdoc
POSTGRES_PASSWORD: vecdoc
POSTGRES_DB: vecdoc
ports:
- "5432:5432"
volumes:
- db-data:/var/lib/postgresql/data
volumes:
db-data:
\ No newline at end of file
#!/bin/bash
DB_CONTAINER_NAME=${1:-'vector_db'}
DB_NAME=${2:-'vecdoc'}
DB_USER=${3:-'vecdoc'}
STORE_PATH=${4:-'export.sql'}
# if [[ -z $1 || -z $2 || -z $3 ]];then
# echo "need input container name, db name, user name "
# echo "***.sh containername dbname username [storepath]"
# echo "default storepath ./export.sql"
# echo "- ./export.sql:/docker-entrypoint-initdb.d/export.sql"
# exit 1
# fi
# storepath=$4
# if [ -z $4 ];then
# storepath = "./export.sql"
set -x
docker exec -i $DB_CONTAINER_NAME pg_dump -d $DB_NAME -U $DB_USER > $STORE_PATH
set +x
\ No newline at end of file
{
"swagger": "2.0",
"info": {
"version": "1.0.0",
"title": "AI Chatbot API"
},
"basePath": "/aigc",
"schemes": [
"http"
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"paths": {
"/ask": {
"post": {
"summary": "Chat with the AI chatbot",
"description": "Send a question to the AI chatbot and receive a response",
"parameters": [
{
"name": "body",
"in": "body",
"description": "The request body",
"required": true,
"schema": {
"type": "object",
"properties": {
"question": {
"type": "string"
},
"modelOptions": {
"type": "object",
"properties": {
"isEnhancement": {
"type": "boolean",
"description": "Whether to use the enhancement model"
},
"isExpert": {
"type": "boolean",
"description": "Whether to use the expert model"
},
"isCommon": {
"type": "boolean",
"description": "Whether to use the common model"
},
"sliderTemp": {
"type": "number",
"description": "The temperature of the response"
}
}
},
"dialog": {
"type": "array",
"items": {
"type": "object",
"properties": {
"q": {
"type": "string",
"description": "The question in the dialog"
},
"a": {
"type": "string",
"description": "The answer in the dialog"
}
}
},
"description": "The dialog history"
}
}
}
}
],
"responses": {
"200": {
"description": "Successful response",
"schema": {
"type": "object",
"properties": {
"code": {
"type": "integer",
"format": "int32"
},
"msg": {
"type": "string"
},
"data": {
"type": "object",
"properties": {
"q": {
"type": "string",
"description": "The input question"
},
"a": {
"type": "string",
"description": "The response answer"
}
}
}
}
}
},
"400": {
"description": "Invalid request"
},
"500": {
"description": "Internal server error"
}
}
}
},
"/docqa": {
"post": {
"summary": "Answer a question based on uploaded documents",
"description": "This endpoint accepts a POST request with a JSON payload containing a query and optional parameters. It returns a JSON response containing the answer to the query.",
"consumes": [
"multipart/form-data"
],
"parameters": [
{
"name": "params",
"in": "formData",
"description": "JSON payload containing query and optional parameters",
"required": true,
"type": "object",
"properties":{
"chatid":{
"type":"string",
"description":"会话 id"
},
"query":{
"type":"string",
"description":"用户输入的问题"
},
"chain_type":{
"type":"string",
"description":"链类型"
},
"detail":{
"type":"boolean",
"description":"是否返回关联知识(default: true)"
},
"summary":{
"type":"boolean",
"description":"上传文档时是否进行总结(default: false)"
}
}
},
{
"name": "file",
"in": "formData",
"description": "File(s) to be uploaded for document retrieval",
"required": false,
"type": "file"
}
],
"responses": {
"200": {
"description": "Successful response",
"schema": {
"type": "object",
"properties": {
"code": {
"type": "integer",
"description": "HTTP status code"
},
"msg": {
"type": "string",
"description": "Response message"
},
"data": {
"type": "object",
"properties": {
"q": {
"type": "string",
"description": "Query string"
},
"a": {
"type": "string",
"description": "Answer to the query"
},
"similarity":{
"type":"array",
"description":"关联文档"
}
}
}
}
}
},
"400": {
"description": "Bad request",
"schema": {
"type": "object",
"properties": {
"code": {
"type": "integer",
"description": "HTTP status code"
},
"msg": {
"type": "string",
"description": "Error message"
}
}
}
},
"500": {
"description": "Internal server error",
"schema": {
"type": "object",
"properties": {
"code": {
"type": "integer",
"description": "HTTP status code"
},
"msg": {
"type": "string",
"description": "Error message"
}
}
}
}
}
}
}
}
}
\ No newline at end of file
#!/bin/bash
# Define source and destination directories
src_dir="/home/zfh/aird"
dest_dir="/home/zfh/aird_backup_his/$(date +%F)"
# clear the old backup
if [ -d "$dest_dir" ]; then
rm -rf "$dest_dir"
rm -f $dest_dir.tar.gz
fi
mkdir -p "$dest_dir"
if [ ! -d "$dest_dir/model/ckpt" ]; then
mkdir -p "$dest_dir/model/ckpt"
fi
# Copy directories and files to destination directory
exclusions=(tools tuning .env)
exclusions+=("__pycache__/")
rsync -av "${exclusions[@]/#/--exclude=}" "${src_dir}/src/" "${dest_dir}/src/"
# Define the list of directories to copy
# dirs_to_copy=(chatglm2-6b-qlora-spdsvb-INSv9 chatglm-6b-pt-spdsvb-INSv9-128-5e-3-3000 chatglm2-6b-pt-spdsvb-INSv11-128-5e-3-3010 chatglm2-6b-qlora-INSv11-rank16-1e-3-30)
# rsync -av "${dirs_to_copy[@]/#/${src_dir}\/model\/ckpt\/}" "${dest_dir}/model/ckpt/"
# rsync -av $src_dir/model/moka-ai/ $dest_dir/model/moka-ai/
rsync -av $src_dir/deps $dest_dir/
cp $src_dir/Dockerfile $dest_dir/Dockerfile
cp $src_dir/Makefile $dest_dir/Makefile
cp $src_dir/.dockerignore $dest_dir/.dockerignore
sed -i 's/\/home\/zfh/\/home\/ssvb/g' $dest_dir/src/common/consts.py
# Create a tar archive of the destination directory
# tar -czvf $dest_dir.tar.gz $dest_dir
tar -czvf /home/zfh/deploy/aird.$(date +%F).tar.gz -C $dest_dir .
rm -f /home/zfh/deploy/aird_backup
ln -sf $dest_dir /home/zfh/deploy/aird_backup
\ No newline at end of file
MODEL_PATH_ChatGLM = "/home/zfh/models/chatglm-6b"
MODEL_PATH_ChatGLM2 = "/home/zfh/models/chatglm2-6b"
MODEL_PATH_ChatGLM2_32K = "/home/zfh/models/chatglm2-6b-32k"
MODEL_NAME_ChatGLM = "THUDM/chatglm-6b"
MODEL_NAME_ChatGLM2 = "THUDM/chatglm2-6b"
INSTRUCTION_V1="你是浦发硅谷银行网银系统的专家,请帮助解答用户在使用过程中遇到的问题。\n"
\ No newline at end of file
from typing import List
from langchain.schema import Document
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager
)
class PrintRetrievalHandler(BaseCallbackHandler):
'''
回调,输出查询使用的相似性文档
'''
def __init__(self) -> None:
super().__init__()
self.similarity:dict = []
def on_retriever_start(self, query: str, **kwargs):
print(f"**Question:** {query}")
def on_retriever_end(self, documents, **kwargs):
self.similarity = [{"page_content":doc.page_content,"from_file":doc.metadata["filename"] or "","page_number":doc.metadata["page_number"] or 0} for doc in documents]
def getsimilarity(self)->List[Document]:
return self.similarity
\ No newline at end of file
import sys
from os import path
import os
import re
import shutil
import json
from langchain.output_parsers import json as json_parser
sys.path.append("../")
from langchain.chains.summarize import load_summarize_chain
from langchain.chains.question_answering import load_qa_chain
from langchain.schema.language_model import BaseLanguageModel
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
from typing import List,Dict,Optional
from langchain.schema import Document
from langchain.vectorstores.base import VectorStoreRetriever
from langchain.vectorstores.utils import maximal_marginal_relevance
from langchain.callbacks.manager import (
Callbacks,
)
from langchain.embeddings.base import Embeddings
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
HuggingFaceInstructEmbeddings, #pip install InstructorEmbedding
)
from llm.chatglm import ChatGLMSerLLM
from llm.ernie_with_sdk import ChatERNIESerLLM
from vector.pgsqldocstore import PgSqlDocstore
from langchain.chains.router.llm_router import LLMRouterChain
from langchain.vectorstores import FAISS
from langchain.docstore.in_memory import InMemoryDocstore
import faiss
from loader import load
from scenarios.spdsvb.similarity import VectorStore_FAISS
from .prompts import (
QA_PROMPT,
REFINE_QA_PROMPT,
SUMMARISE_PROMPT,
REFINE_SUMMARISE_PROMPT,
EXTRACTION_PROMPT,
REFINE_EXTRACTION_PROMPT,
ROUTER_PROMPT,
GLM_CHAT_QUESTION_PROMPT,
GLM_CHAT_COMBINE_PROMPT,
ERNIE_GLM_CHAT_QUESTION_PROMPT,
ERNIE_GLM_CHAT_COMBINE_PROMPT,
ERNIE_EXTRACTION_PROMPT,
ERNIE_REFINE_EXTRACTION_PROMPT,
MAP_REDUCE_SUMMARISE_PROMPT,
GLM_MAP_EXTRACTION_PROMPT,
GLM_MAP_EXTRACTION_COMBINE_PROMPT,
FOREACH_REFINE_EXTRACTION_PROMPT,
FOREACH_EXTRACTION_PROMPT,
FOREACH_MAP_EXTRACTION_PROMPT,
FOREACH_MAP_Q_EXTRACTION_PROMPT
)
def del_vectorstore_path(index_name:str,vectorstorepath:str="./vectorstore"):
if index_name:
filepath = path.join(vectorstorepath,index_name)
if path.exists(filepath):
shutil.rmtree(filepath)
class DocumentQA():
r"""
基于文档问答的封装类
"""
def __init__(self,llm: BaseLanguageModel):
self.llm = llm
# 文档摘要提取
def summarize_document(
self,
filepaths:[str] = [],
load_kwargs: Optional[dict] = None,
documents:List[Document] = [],
chain_type:str = "map_reduce",
chain_type_kwargs: Optional[dict] = {},
**kwargs) -> str:
r"""
对文档进行总结比较好:参数说明
filepath:需要总结的文件路径可为空
load_kwargs:文档载入参数,查阅load.load方法
documents:需要总结的文档
chain_type:链类型,总结方法不同,"stuff","map_reduce","refine"
chain_type_kwargs:{
question_prompt:提问prompt
refine_prompt:refine提问prompt
document_variable_name:文档关键字
...
}
kwargs: {
callbacks
}
"""
default_chain_type_kwargs = dict
if chain_type == "map_reduce":
default_chain_type_kwargs = {"map_prompt":MAP_REDUCE_SUMMARISE_PROMPT,"combine_prompt":MAP_REDUCE_SUMMARISE_PROMPT}
else:
chain_type = "refine"
default_chain_type_kwargs = {"question_prompt":SUMMARISE_PROMPT,"refine_prompt":REFINE_SUMMARISE_PROMPT,"document_variable_name":"text"}
default_chain_type_kwargs.update(**chain_type_kwargs)
effect_documents = []
if len(filepaths) <= 0 and len(documents) <= 0:
raise ValueError(f"file and documents cannot be empty at the same time")
if len(filepaths) > 0:
load_documents= load.loads(filepaths,**load_kwargs)
effect_documents.extend(load_documents)
if len(documents) > 0:
effect_documents.extend(documents)
# 拼接段文本,不改变原始排版
effect_documents = load.append(effect_documents,sentence_size=load_kwargs["sentence_size"])
chain = load_summarize_chain(self.llm, chain_type=chain_type,**default_chain_type_kwargs)
try:
result= chain.run(input_documents=effect_documents,**kwargs)
# print(result)
return result
except Exception as e:
print(e)
raise ValueError(f'文档总结报错:{e}')
# 文档提问
def qa_from_document(
self,
query:str,
retriever:VectorStoreRetriever,
chain_type:str = "map_reduce",
chain_type_kwargs: Optional[dict] = {},
chain_kwargs: Optional[dict] = {},
**kwargs) -> str:
r"""
对文档进行提问比较好:参数说明
query:问题
retriever:向量库
chain_type:链类型,总结方法不同,"stuff","map_reduce","refine"
chain_type_kwargs:{ 根据chain类型提供指定参数
question_prompt:提问prompt
refine_prompt:refine提问prompt
document_variable_name:文档关键字
verbose:debug日志
...
}
chain_kwargs:{}
kwargs:{
callbacks
}
"""
#设置默认链参数
default_chain_type_kwargs = dict
if chain_type == "map_reduce":
question_prompt=GLM_CHAT_QUESTION_PROMPT
combine_prompt=GLM_CHAT_COMBINE_PROMPT
if isinstance(self.llm,ChatERNIESerLLM):
question_prompt=ERNIE_GLM_CHAT_QUESTION_PROMPT
combine_prompt=ERNIE_GLM_CHAT_COMBINE_PROMPT
default_chain_type_kwargs = {"question_prompt":question_prompt,"combine_prompt":combine_prompt}
else:
chain_type = "refine"
default_chain_type_kwargs = {"question_prompt":QA_PROMPT,"refine_prompt":REFINE_QA_PROMPT,"document_variable_name":"context"}
default_chain_type_kwargs.update(**chain_type_kwargs)
qa = RetrievalQA.from_chain_type(llm=self.llm, chain_type=chain_type,
chain_type_kwargs=default_chain_type_kwargs,retriever=retriever,**chain_kwargs)
try:
result = qa.run(query=query,**kwargs)
# print(result)
return result
except Exception as e:
print(e)
raise ValueError(f'文档问答报错:{e}')
# 文档提取
# 注意:
# 若使用GLM,建议调用extract_from_document_foreach,同时chain_type=refine
# 若使用ERNIE,建议调用extract_from_document,同时chain_type=refine
def extract_from_document(
self,
query:str,
filepaths:[str] = [],
load_kwargs: Optional[dict] = None,
documents:List[Document] = [],
chain_type:str = "refine",
chain_type_kwargs: Optional[dict] = {},
**kwargs) -> json:
r"""
从文档中提取关键信息:参数说明
query:需要提取的知识点
filepaths:需要总结的文件路径可为空
load_kwargs:文档载入参数,查阅load.load方法
documents:需要总结的文档
chain_type:链类型,总结方法不同,"stuff","map_reduce","refine"
chain_type_kwargs:{
verbose:debug日志
question_prompt:提问prompt
refine_prompt:refine提问prompt
document_variable_name:文档关键字
}
kwargs:{
callbacks:run回调
}
"""
#设置默认链参数
default_chain_type_kwargs = dict
if chain_type == "map_reduce":
question_prompt=GLM_MAP_EXTRACTION_PROMPT
combine_prompt=GLM_MAP_EXTRACTION_COMBINE_PROMPT
if isinstance(self.llm,ChatERNIESerLLM):
# todo
raise ValueError(f"Ernie's performance is not good when 'chain_type=map-reduce'")
default_chain_type_kwargs = {"question_prompt":question_prompt,"combine_prompt":combine_prompt}
else:
chain_type = "refine"
question_prompt=EXTRACTION_PROMPT
refine_prompt=REFINE_EXTRACTION_PROMPT
if isinstance(self.llm,ChatERNIESerLLM):
question_prompt=ERNIE_EXTRACTION_PROMPT
refine_prompt=ERNIE_REFINE_EXTRACTION_PROMPT
default_chain_type_kwargs = {"question_prompt":question_prompt,"refine_prompt":refine_prompt,"document_variable_name":"context"}
default_chain_type_kwargs.update(**chain_type_kwargs)
effect_documents = []
if len(filepaths) <= 0 and len(documents) <= 0:
raise ValueError(f"file and documents cannot be empty at the same time")
if len(filepaths) > 0:
load_documents= load.loads(filepaths,**load_kwargs)
effect_documents.extend(load_documents)
if len(documents) > 0:
effect_documents.extend(documents)
effect_documents = load.append(documents=effect_documents,sentence_size=load_kwargs["sentence_size"])
chain = load_qa_chain(llm=self.llm,chain_type=chain_type,**default_chain_type_kwargs)
try:
result = chain.run(input_documents=effect_documents, question=query,**kwargs)
# print(result)
if isinstance(self.llm,ChatERNIESerLLM):
try:
return json_parser.parse_json_markdown(json_string=result)
except Exception as e:
print(result)
print("序列化报错",e)
return json()
else:
return result
except Exception as e:
print(e)
raise ValueError(f'文档提取报错:{e}')
def extract_from_document_foreach(
self,
query:str,
pattern:str = r'[、]',
filepaths:[str] = [],
load_kwargs: Optional[dict] = None,
documents:List[Document] = [],
chain_type:str = "refine",
chain_type_kwargs: Optional[dict] = {},
**kwargs) -> json:
r"""
从文档中提取关键信息:参数说明
query:需要提取的知识点
pattern:知识点分割符号
filepaths:需要总结的文件路径可为空
load_kwargs:文档载入参数,查阅load.load方法
documents:需要总结的文档
chain_type:链类型,总结方法不同,"stuff","map_reduce","refine"
chain_type_kwargs:{
verbose:debug日志
question_prompt:提问prompt
refine_prompt:refine提问prompt
document_variable_name:文档关键字
}
kwargs:{
callbacks:run回调
}
"""
#设置默认链参数
default_chain_type_kwargs = dict
if isinstance(self.llm,ChatERNIESerLLM):
raise ValueError(f"Ernie did not perform well when calling the 'extract_from_document_foreach' function")
if chain_type == "map_reduce":
question_prompt=FOREACH_MAP_Q_EXTRACTION_PROMPT #使用提问的prompt提取关键信息原始文本
combine_prompt=FOREACH_MAP_EXTRACTION_PROMPT #从历史提取信息中回答问题
default_chain_type_kwargs = {"question_prompt":question_prompt,"combine_prompt":combine_prompt}
else:
chain_type = "refine"
default_chain_type_kwargs = {"question_prompt":FOREACH_EXTRACTION_PROMPT,"refine_prompt":FOREACH_REFINE_EXTRACTION_PROMPT,"document_variable_name":"context"}
default_chain_type_kwargs.update(**chain_type_kwargs)
effect_documents = []
if len(filepaths) <= 0 and len(documents) <= 0:
raise ValueError(f"file and documents cannot be empty at the same time")
if len(filepaths) > 0:
load_documents= load.loads(filepaths,**load_kwargs)
effect_documents.extend(load_documents)
if len(documents) > 0:
effect_documents.extend(documents)
effect_documents = load.append(documents=effect_documents,sentence_size=load_kwargs["sentence_size"])
chain = load_qa_chain(llm=self.llm,chain_type=chain_type,**default_chain_type_kwargs)
words_list = re.split(pattern, query)
results = {}
for word in words_list:
try:
result = chain.run(input_documents=effect_documents, question=word,**kwargs)
result = result.replace("\n", "")
result_s = re.split(r'[::]',result)
if len(result_s)>=2:
results[result_s[0]] = ":".join(result_s[1:])
except Exception as e:
print(f'提取信息:{word}报错:\n',e)
# print(results)
return json.dumps(results,ensure_ascii=False)
def route_chain(self,
query:str,
filepaths:[str] = [],
load_kwargs: Optional[dict] = {},
documents:List[Document] = [],
embeddings_model:str = None,
chain_type:str = "refine",
chain_type_kwargs: Optional[dict] ={},
chain_kwargs: Optional[dict] ={},
**kwargs) -> any:
r"""
判断问题属于那种类型,自动使用相应问答模型进行回答,默认使用qa模型
query:问题
filepaths:需要总结的文件路径可为空
load_kwargs:文档载入参数,查阅load.load方法
documents:需要总结的文档
embeddings_model:文本向量模型
chain_type:提问链类型
chain_type_kwargs:类型参数
chain_kwargs: qa类型参数
kwargs: run args
"""
if query is None or len(query) == 0:
#默认总结文档
return self.summarize_document(filepaths=filepaths,documents=documents,load_kwargs=load_kwargs,chain_type=chain_type,chain_type_kwargs=chain_type_kwargs,**kwargs)
router_chain = LLMRouterChain.from_llm(self.llm, ROUTER_PROMPT)
result = router_chain.route(query)
if result.destination == "expertor":
_faiss = VectorStore_FAISS(embedding_model_name=embeddings_model,show_number=5,index_name="chatid")
if len(filepaths) > 0:
_faiss._add_documents_from_dir(filepaths=filepaths,load_kwargs={"sentence_size":1024})
return self.qa_from_document(query=query,retriever=_faiss.as_retriever(),chain_type=chain_type,chain_type_kwargs=chain_type_kwargs,chain_kwargs=chain_kwargs,**kwargs)
elif result.destination == "extractor":
return self.extract_from_document(query=query,documents=documents,filepaths=filepaths,load_kwargs=load_kwargs,chain_type=chain_type,chain_type_kwargs=chain_type_kwargs,**kwargs)
else:
return self.summarize_document(filepaths=filepaths,documents=documents,load_kwargs=load_kwargs,chain_type=chain_type,chain_type_kwargs=chain_type_kwargs,**kwargs)
\ No newline at end of file
from typing import List
from .prompts import ElementsPromptTemplate,ElementPromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.chains import LLMChain
class ElementsExtractor:
def __init__(self, llm: BaseLanguageModel):
self.prompt = ElementsPromptTemplate(input_variables=["knowledge","elements"])
self.chain = LLMChain(llm=llm, prompt=self.prompt,verbose=True)
self.prompt_foreach = ElementPromptTemplate(input_variables=["knowledge","element"])
self.chain_foreach = LLMChain(llm=llm, prompt=self.prompt_foreach,verbose=True)
def extract(self, knowledge: str,elements: List[str]) -> List[str]:
output = self.chain.run({"knowledge":knowledge,"elements":elements})
lines = [line for line in output.split("\n") if line.strip()]
return lines
def extract_foreach(self, knowledge: str,elements: List[str]) -> List[str]:
lines = []
for e in elements:
output = self.chain_foreach.run({"knowledge":knowledge,"element":e})
lines.append(output)
print(output)
return lines
\ No newline at end of file
from typing import Any
from langchain.prompts import StringPromptTemplate,PromptTemplate
from pydantic import BaseModel, validator
from langchain.chains.router.llm_router import RouterOutputParser
# template="""
# {knowledge}
# 请从上述内容中提取 {elements} 这些元素的信息,每个元素以 "名称:值" 的形式输出,空行分隔
# """
template="""
{knowledge}
请从上述提供的合同文本中提取出指定元素相关的信息,并以“名称:值”的格式输出识别结果,用空行分隔,不要使用 markdown 格式。例如:
测试元素1:test
测试元素2:test2
你需要提取的元素有:
{elements}
"""
prompt=PromptTemplate.from_template(template)
class ElementsPromptTemplate(StringPromptTemplate, BaseModel):
def format(self, **kwargs) -> str:
if "elements" not in kwargs or kwargs["elements"] is None:
elements = []
else:
elements=kwargs["elements"]
# elements = "\n".join([f"{i+1}. {e}" for i, e in enumerate(elements)])
elements = "\n".join(elements)
print(elements)
if "knowledge" not in kwargs:
raise ValueError("knowledge is required")
knowledge = kwargs["knowledge"]
return prompt.format(knowledge=knowledge, elements=elements)
def _prompt_type(self):
return "contract-elements"
template_foreach="""
{knowledge}
请从上述提供的合同文本中提取出 {element} 的信息,并以“{element}:”的形式进行输出。
"""
prompt_foreach=PromptTemplate.from_template(template_foreach)
class ElementPromptTemplate(StringPromptTemplate, BaseModel):
def format(self, **kwargs) -> str:
if "element" not in kwargs or kwargs["element"] is None:
element = ""
else:
element=kwargs["element"]
if "knowledge" not in kwargs:
raise ValueError("knowledge is required")
knowledge = kwargs["knowledge"]
return prompt_foreach.format(knowledge=knowledge, element=element)
def _prompt_type(self):
return "contract-element"
#========================
# documentqa chain prompt
# documentqa chain prompt
# documentqa chain prompt
#========================
#========================
# GLM refine
#========================
#------------------------
# 总结
#------------------------
summarise_prompt = "对以下内容进行简要总结:\n----------\n{text}\n----------\n,总结:"
refine_summarise_prompt = "你的工作是生成总结。\n我们已经提供了一段摘要:{existing_answer}\n我们需要从下面的上下文中补充和完善摘要,如果上下文没有用处,请返回原始摘要:\n----------\n{text}\n----------\n"
SUMMARISE_PROMPT = PromptTemplate(
input_variables=["text"],
template=summarise_prompt,
#template="Define {concept} with a real-world example?",
)
REFINE_SUMMARISE_PROMPT = PromptTemplate(
input_variables=["existing_answer", "text"],
template=refine_summarise_prompt,
#template="Define {concept} with a real-world example?",
)
#------------------------
# 提问
#------------------------
qa_prompt = """请根据下面的材料回答问题:"{question}",只根据材料内容进行回答,如果问题与提供的材料无关,请回答"对不起,我不知道",另外也不要回答无关答案:
-------------------
{context}
-------------------"""
QA_PROMPT = PromptTemplate(template=qa_prompt, input_variables=["context", "question"])
refine_qa_prompt = """你的工作是根据新的Context补充Exist_answer。
Exist_answer:
-------------------
{existing_answer}
-------------------
Context:
-------------------
{context}
-------------------
Question:
-------------------
{question}
-------------------
如果上下文没有用处,请返回原始答案,如果query与提供的材料无关,请回答"对不起,我不知道",另外也不要回答无关答案"""
REFINE_QA_PROMPT = PromptTemplate(
input_variables=["existing_answer", "context","question"],
template=refine_qa_prompt,
#template="Define {concept} with a real-world example?",
)
#------------------------
# 提取
#------------------------
extraction_prompt="""你的工作是合同要素提取,根据提供的资料提取合同要素,不要造假答案。如果资料没用,回复“无”。需要提取的要素列表(用“、”分割):{question}。
示例:----------
要素列表:签订日期、甲方
回答格式如下:
签订日期:2020年1月1日
甲方:无
----------
下面一直到结束是提供的资料:
{context}"""
EXTRACTION_PROMPT = PromptTemplate(template=extraction_prompt, input_variables=["context", "question"])
#你的工作是在现有的Exist_answer基础上,根据新的Context提取"keys"中key的值,并以"key:value"形式给出答案,用空行分隔。
refine_extraction_prompt="""你的工作是合同要素提取,根据提供的资料完善现有的要素信息。如果资料没用,请返回现有的要素信息,不要造假答案。需要提取的要素列表(用“、”分割):{question}。
示例:----------
要素列表:签订日期、甲方
回答格式如下:
签订日期:2020年1月1日
甲方:无
----------
现有的要素信息如下:
{existing_answer}
---------------------
下面一直到结束是提供的资料:
{context}"""
REFINE_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["existing_answer", "context","question"],
template=refine_extraction_prompt,
#template="Define {concept} with a real-world example?",
)
foreach_extraction_prompt="""你的工作是从资料中提取要素:{question}。如果资料无法提取要素信息,回复“无”。
下面是提供的资料:
{context}
---------------------
请注意输出格式以“{question}:”开头,紧接着给出答案。"""
FOREACH_EXTRACTION_PROMPT = PromptTemplate(template=foreach_extraction_prompt, input_variables=["context", "question"])
#你的工作是在现有的Exist_answer基础上,根据新的Context提取"keys"中key的值,并以"key:value"形式给出答案,用空行分隔。
foreach_refine_extraction_prompt="""你的工作是从资料中提取要素:{question}。如果资料中要素信息明确,完善现有的答案并返回。如果资料无法提取要素信息,请返回现有的答案。另外也不要回答无关答案。
现有的答案:
{existing_answer}
---------------------
下面是提供的资料:
{context}
---------------------
请注意输出格式以“{question}:”开头,直接给出答案。"""
FOREACH_REFINE_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["existing_answer", "context","question"],
template=foreach_refine_extraction_prompt,
#template="Define {concept} with a real-world example?",
)
#========================
# map reduce prompt
# map reduce prompt
# map reduce prompt
#========================
glm_chat_question_prompt = """你的工作是找出Context中与Question相关的文本,返回原始文本。如果Question与提供的Context无关,请回答"相关文本:无",另外也不要回答无关答案:
注意:回复以“相关文本:”开头,不要包含Question本身
Question:
-----------
{question}
-----------
Context:
-----------
{context}
-----------"""
GLM_CHAT_QUESTION_PROMPT = PromptTemplate(
template=glm_chat_question_prompt,
input_variables=["question","context"],
)
glm_chat_combine_prompt = """你的工作是根据资料回答问题。问题:{question},下面是提供的资料。如果资料与问题无关,回复“不知道”,不要添加任何不相关的内容。
注意:每个文本以“相关文本:”开头
<< 资料 >>
{summaries}
"""
GLM_CHAT_COMBINE_PROMPT = PromptTemplate(
template=glm_chat_combine_prompt,
input_variables=["question","summaries"],
)
glm_map_extraction_prompt="""你的工作是合同要素提取,根据提供的资料提取合同要素,不要造假答案。如果资料没用,回复“无”。需要提取的要素列表(用“、”分割):{question}。
示例:----------
要素列表:签订日期、甲方
回答格式如下:
签订日期:2020年1月1日
甲方:无
----------
下面一直到结束是提供的资料:
{context}"""
GLM_MAP_EXTRACTION_PROMPT = PromptTemplate(
template=glm_map_extraction_prompt,
input_variables=["question","context"],
)
glm_map_extraction_combine_prompt="""你的工作是将资料里面的要素信息根据需要提取的要素信息汇总。需要提取的要素列表(用“、”分割):{question}。
示例:----------
要素列表:签订日期、甲方
回答格式如下:
签订日期:2020年1月1日
甲方:无
----------
下面一直到结束是提供的资料:
{summaries}"""
GLM_MAP_EXTRACTION_COMBINE_PROMPT = PromptTemplate(
template=glm_map_extraction_combine_prompt,
input_variables=["question","summaries"],
)
#===================================
# foreach prompt
#===================================
foreach_map_q_extraction_prompt = """你的工作是找出资料中与“{question}”相关的文本,返回原始文本。如果找不到相关文档,请回答"相关文本:无"。不要生成与资料不相关的文字:
下面是提供的资料:
{context}
---------------------
请注意输出格式以“相关文本:”开头。"""
FOREACH_MAP_Q_EXTRACTION_PROMPT = PromptTemplate(
template=foreach_map_q_extraction_prompt,
input_variables=["question","context"],
)
foreach_map_extraction_prompt="""你的工作是从资料中提取要素:{question}。如果资料无法提取要素信息,回复“无”。
下面是提供的资料:
{summaries}
---------------------
请注意输出格式以“{question}:”开头,紧接着给出答案。"""
FOREACH_MAP_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["summaries","question"],
template=foreach_map_extraction_prompt,
#template="Define {concept} with a real-world example?",
)
#========================
# 百度 ernie
#========================
ernie_chat_question_prompt = """现在需要你在'''中的资料中找出与问题相关的段落,回答用“相关资料:”开头。
问题是:{question}。
'''
{context}
'''
注意:如果没有找到与问题相关的段落就回答“无”。"""
ernie_chat_combine_prompt = """现在需要你根据'''中的资料回答问题,如果资料与问题无关就回复“不知道”,不要回答任何不相关的内容。
问题是:{question}。
'''
{summaries}
'''"""
ERNIE_GLM_CHAT_QUESTION_PROMPT = PromptTemplate(
template=ernie_chat_question_prompt,
input_variables=["question","context"],
)
ERNIE_GLM_CHAT_COMBINE_PROMPT = PromptTemplate(
template=ernie_chat_combine_prompt,
input_variables=["question","summaries"],
)
map_reducce_prompt = """请对下面内容做一个简要总结:
"{text}"
"""
MAP_REDUCE_SUMMARISE_PROMPT = PromptTemplate(template=map_reducce_prompt, input_variables=["text"])
ernie_extraction_prompt="""请从'''包裹的资料中按要求抽取出重要信息,需要你提取的信息列表:{question}。若无法提取相关信息,用"未知"表示。
资料如下:
'''
{context}
'''
输出要求:
以json格式输出,输出json中key必须含有\"{question}\"几项。除json以外不要添加任何内容。
```json
{{
"XXX":"XXXXXX",
"XXX":"未知"
}}
```
"""
ERNIE_EXTRACTION_PROMPT = PromptTemplate(template=ernie_extraction_prompt, input_variables=["context", "question"])
#你的工作是在现有的Exist_answer基础上,根据新的Context提取"keys"中key的值,并以"key:value"形式给出答案,用空行分隔。
ernie_refine_extraction_prompt="""由于原始文本太长,关键信息需要分段提取。\"\"\"包裹的是历史提取的信息,请根据'''包裹的新资料补充历史提取的信息中\"未知\"的部分,历史提取的信息中已知部分请保留输出。
历史提取的信息:
\"\"\"{existing_answer}\"\"\"
资料如下:
'''
{context}
'''
输出要求:
如果新资料没有帮助,请返回\"\"\"包裹的历史提取的信息。
以json格式输出,输出json中key必须含有\"{question}\"几项。除json以外不要添加任何内容。
```json
{{
"XXX":"XXXXXX",
"XXX":"未知"
}}
```
"""
ERNIE_REFINE_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["existing_answer", "context","question"],
template=ernie_refine_extraction_prompt,
)
#========================
# route chain prompt
# route chain prompt
# route chain prompt
#========================
MULTI_PROMPT_ROUTER_TEMPLATE2 = """\
Given a raw text input to a language model select the model prompt best suited for \
the input. You will be given the names of the available prompts and a description of \
what the prompt is best suited for. You may also revise the original input if you \
think that revising it will ultimately lead to a better response from the language \
model.
<< FORMATTING >>
Return a markdown code snippet with a JSON object formatted to look like:
{{{{
"destination": string \\ name of the prompt to use or "default"
"next_inputs": string \\ a potentially modified version of the original input
}}}}
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR \
it can be "default" if the input is not well suited for any of the candidate prompts.
REMEMBER: "next_inputs" can just be the original input if you don't think any \
modifications are needed.
<< CANDIDATE PROMPTS >>
{destinations}
<< INPUT >>
{{input}}
<< OUTPUT (OUTPUT must be json string and don't include Note) >>
"""
prompt_infos = [
{
"name": "summer",
"description": "文档总结和摘要很专业",
},
{
"name": "expertor",
"description": "基于输入的文档,回答文档中相关的问题很专业",
},
{
"name": "extractor",
"description": "从文档中提取关键信息和事实很专业",
}
]
destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos]
destinations_str = "\n".join(destinations)
router_template = MULTI_PROMPT_ROUTER_TEMPLATE2.format(destinations=destinations_str)
ROUTER_PROMPT = PromptTemplate(
template=router_template,
input_variables=["input"],
output_parser=RouterOutputParser(),
)
import os
from typing import Dict, Optional,List
from langchain.llms.base import BaseLLM,LLM
from langchain.callbacks.manager import CallbackManagerForLLMRun, Callbacks
import torch
from transformers import AutoTokenizer, AutoModel,AutoConfig,AutoModelForCausalLM
from transformers.generation.utils import GenerationConfig
from pydantic import root_validator
class BaichuanLLM(LLM):
model_name: str = "baichuan-inc/Baichuan-13B-Chat"
quantization_bit: Optional[int] = None
tokenizer: AutoTokenizer = None
model: AutoModel = None
def _llm_type(self) -> str:
return "chatglm_local"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
if not values["model_name"]:
raise ValueError("No model name provided.")
model_name = values["model_name"]
tokenizer = AutoTokenizer.from_pretrained(model_name,use_fast=False,trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
# device_map="auto",
trust_remote_code=True
)
model.generation_config = GenerationConfig.from_pretrained(
model_name
)
if values["quantization_bit"]:
print(f"Quantized to {values['quantization_bit']} bit")
model = model.quantize(values["quantization_bit"]).cuda()
else:
model=model.half().cuda()
model = model.eval()
values["tokenizer"] = tokenizer
values["model"] = model
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
message = []
message.append({"role": "user", "content": prompt})
resp = self.model.chat(self.tokenizer,message)
# print(f"prompt:{prompt}\nresponse:{resp}\n")
return resp
\ No newline at end of file
import os
import requests
from typing import Dict, Optional,List,Any,Mapping,Iterator
from pydantic import root_validator
import torch
from transformers import AutoTokenizer, AutoModel,AutoConfig
import langchain
from langchain.llms.base import BaseLLM,LLM
from langchain.cache import InMemoryCache
from langchain.callbacks.manager import CallbackManagerForLLMRun, Callbacks, AsyncCallbackManagerForLLMRun
import aiohttp
import asyncio
# 启动llm的缓存
# langchain.llm_cache = InMemoryCache()
class ChatGLMLocLLM(LLM):
model_name: str = "THUDM/chatglm-6b"
ptuning_checkpoint: str = None
quantization_bit: Optional[int] = None
pre_seq_len: Optional[int] = None
prefix_projection: bool = False
tokenizer: AutoTokenizer = None
model: AutoModel = None
def _llm_type(self) -> str:
return "chatglm_local"
# @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
if not values["model_name"]:
raise ValueError("No model name provided.")
model_name = values["model_name"]
tokenizer = AutoTokenizer.from_pretrained(model_name ,trust_remote_code=True)
config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
# model = AutoModel.from_pretrained(model_name, config=config, trust_remote_code=True)
if values["pre_seq_len"]:
config.pre_seq_len = values["pre_seq_len"]
if values["prefix_projection"]:
config.prefix_projection = values["prefix_projection"]
if values["ptuning_checkpoint"]:
ptuning_checkpoint = values["ptuning_checkpoint"]
print(f"Loading prefix_encoder weight from {ptuning_checkpoint}")
model = AutoModel.from_pretrained(model_name, config=config, trust_remote_code=True)
prefix_state_dict = torch.load(os.path.join(ptuning_checkpoint, "pytorch_model.bin"))
new_prefix_state_dict = {}
for k, v in prefix_state_dict.items():
if k.startswith("transformer.prefix_encoder."):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
else:
model = AutoModel.from_pretrained(model_name, config=config, trust_remote_code=True).half().cuda()
if values["pre_seq_len"]:
# P-tuning v2
model = model.half().cuda()
model.transformer.prefix_encoder.float().cuda()
if values["quantization_bit"]:
print(f"Quantized to {values['quantization_bit']} bit")
model = model.quantize(values["quantization_bit"])
model = model.eval()
values["tokenizer"] = tokenizer
values["model"] = model
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
resp,his = self.model.chat(self.tokenizer,prompt)
# print(f"prompt:{prompt}\nresponse:{resp}\n")
return resp
class ChatGLMSerLLM(LLM):
# 模型服务url
url: str = "http://127.0.0.1:8000"
chat_history: dict = []
out_stream: bool = False
cache: bool = False
@property
def _llm_type(self) -> str:
return "chatglm3-6b"
def get_num_tokens(self, text: str) -> int:
resp = self._post(url=self.url+"/tokens",query=self._construct_query(text))
if resp.status_code == 200:
resp_json = resp.json()
predictions = resp_json['response']
# display(self.convert_data(resp_json['history']))
return predictions
else:
return len(text)
def convert_data(self,data):
result = []
for item in data:
result.append({'q': item[0], 'a': item[1]})
return result
def _construct_query(self, prompt: str,temperature = 0.95) -> Dict:
"""构造请求体
"""
# self.chat_history.append({"role": "user", "content": prompt})
query = {
"prompt": prompt,
"history":self.chat_history,
"max_length": 4096,
"top_p": 0.7,
"temperature": temperature
}
return query
@classmethod
def _post(self, url: str,
query: Dict) -> Any:
"""POST请求
"""
_headers = {"Content_Type": "application/json"}
with requests.session() as sess:
resp = sess.post(url,
json=query,
headers=_headers,
timeout=300)
return resp
async def _post_stream(self, url: str,
query: Dict,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,stream=False) -> Any:
"""POST请求
"""
_headers = {"Content_Type": "application/json"}
async with aiohttp.ClientSession() as sess:
async with sess.post(url, json=query,headers=_headers,timeout=300) as response:
if response.status == 200:
if stream and not run_manager:
print('not callable')
if run_manager:
for callable in run_manager.get_sync().handlers:
await callable.on_llm_start(None,None)
async for chunk in response.content.iter_any():
# 处理每个块的数据
if chunk and run_manager:
for callable in run_manager.get_sync().handlers:
# print(chunk.decode("utf-8"),end="")
await callable.on_llm_new_token(chunk.decode("utf-8"))
if run_manager:
for callable in run_manager.get_sync().handlers:
await callable.on_llm_end(None)
else:
raise ValueError(f'glm 请求异常,http code:{response.status}')
def _call(self, prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream = False,
**kwargs: Any) -> str:
query = self._construct_query(prompt=prompt,temperature=kwargs["temperature"] if "temperature" in kwargs else 0.95)
# display("==============================")
# display(query)
# post
if stream or self.out_stream:
async def _post_stream():
await self._post_stream(url=self.url+"/stream",
query=query,run_manager=run_manager,stream=stream or self.out_stream)
asyncio.run(_post_stream())
return ''
else:
resp = self._post(url=self.url,
query=query)
if resp.status_code == 200:
resp_json = resp.json()
# self.chat_history.append({'q': prompt, 'a': resp_json['response']})
predictions = resp_json['response']
# display(self.convert_data(resp_json['history']))
return predictions
else:
raise ValueError(f'glm 请求异常,http code:{resp.status_code}')
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
query = self._construct_query(prompt=prompt,temperature=kwargs["temperature"] if "temperature" in kwargs else 0.95)
await self._post_stream(url=self.url+"/stream",
query=query,run_manager=run_manager,stream=self.out_stream)
return ''
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters.
"""
_param_dict = {
"url": self.url
}
return _param_dict
\ No newline at end of file
import os
import requests
from typing import Dict, Optional,List,Any,Mapping,Iterator
from pydantic import root_validator
import torch
from transformers import AutoTokenizer, AutoModel,AutoConfig
import langchain
from langchain.llms.base import BaseLLM,LLM
from langchain_openai import OpenAI
from langchain.cache import InMemoryCache
from langchain.callbacks.manager import CallbackManagerForLLMRun, Callbacks, AsyncCallbackManagerForLLMRun
class ChatGLMSerLLM(OpenAI):
def get_token_ids(self, text: str) -> List[int]:
if self.model_name.__contains__("chatglm"):
## 发起http请求,获取token_ids
url = f"{self.openai_api_base}/num_tokens"
query = {"prompt": text,"model": self.model_name}
_headers = {"Content_Type": "application/json","Authorization": "chatglm "+self.openai_api_key}
resp = self._post(url=url,query=query,headers= _headers)
if resp.status_code == 200:
resp_json = resp.json()
print(resp_json)
predictions = resp_json['choices'][0]['text']
## predictions字符串转int
return [int(predictions)]
return [len(text)]
@classmethod
def _post(self, url: str,
query: Dict,headers: Dict) -> Any:
"""POST请求
"""
_headers = {"Content_Type": "application/json"}
_headers.update(headers)
with requests.session() as sess:
resp = sess.post(url,
json=query,
headers=_headers,
timeout=300)
return resp
\ No newline at end of file
import logging
import os
from typing import Any, Dict, List, Mapping, Optional
from langchain.llms.base import BaseLLM,LLM
from langchain.schema import LLMResult
from langchain.utils import get_from_dict_or_env
from langchain.callbacks.manager import CallbackManagerForLLMRun, Callbacks
from enum import Enum
from pydantic import root_validator, Field
from .ernie_sdk import CompletionRequest, ErnieBot, Message, bot_message, user_message
logger = logging.getLogger(__name__)
class ModelType(Enum):
ERNIE = "ernie"
ERNIE_LITE = "ernie-lite"
SHEETS1 = "sheets1"
SHEETS2 = "sheets2"
SHEET_COMB = "sheet-comb"
LLAMA2_7B = "llama2-7b"
LLAMA2_13B = "llama2-13b"
LLAMA2_70B = "llama2-70b"
QFCN_LLAMA2_7B = "qfcn-llama2-7b"
BLOOMZ_7B="bloomz-7b"
MODEL_SERVICE_BASE_URL = "https://aip.baidubce.com/rpc/2.0/"
MODEL_SERVICE_Suffix = {
ModelType.ERNIE: "ai_custom/v1/wenxinworkshop/chat/completions",
ModelType.ERNIE_LITE: "ai_custom/v1/wenxinworkshop/chat/eb-instant",
ModelType.SHEETS1: "ai_custom/v1/wenxinworkshop/chat/besheet",
ModelType.SHEETS2: "ai_custom/v1/wenxinworkshop/chat/besheets2",
ModelType.SHEET_COMB: "ai_custom/v1/wenxinworkshop/chat/sheet_comb1",
ModelType.LLAMA2_7B: "ai_custom/v1/wenxinworkshop/chat/llama_2_7b",
ModelType.LLAMA2_13B: "ai_custom/v1/wenxinworkshop/chat/llama_2_13b",
ModelType.LLAMA2_70B: "ai_custom/v1/wenxinworkshop/chat/llama_2_70b",
ModelType.QFCN_LLAMA2_7B: "ai_custom/v1/wenxinworkshop/chat/qianfan_chinese_llama_2_7b",
ModelType.BLOOMZ_7B: "ai_custom/v1/wenxinworkshop/chat/bloomz_7b1",
}
class ErnieLLM(LLM):
"""
ErnieLLM is a LLM that uses Ernie to generate text.
"""
model_name: Optional[ModelType] = None
access_token: Optional[str] = ""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate the environment."""
# print(values)
model_name = ModelType(get_from_dict_or_env(values, "model_name", "model_name", str(ModelType.ERNIE)))
access_token = get_from_dict_or_env(values, "access_token", "ERNIE_ACCESS_TOKEN", "")
if not access_token:
raise ValueError("No access token provided.")
values["model_name"] = model_name
values["access_token"] = access_token
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
request = CompletionRequest(messages=[Message("user",prompt)])
bot = ErnieBot(_get_model_service_url(self.model_name), self.access_token or "", request)
try:
# 你的代码
response = bot.get_response().result
# print("response: ",response)
return response
except Exception as e:
# 处理异常
print("exception:",e)
return e.__str__()
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ernie"
# def _identifying_params(self) -> Mapping[str, Any]:
# return {
# "name": "ernie",
# }
def _get_model_service_url(model_name) -> str:
# print("_get_model_service_url model_name: ",model_name)
return MODEL_SERVICE_BASE_URL+MODEL_SERVICE_Suffix[model_name]
class ErnieChat(LLM):
model_name: ModelType
access_token: str
prefix_messages: List = Field(default_factory=list)
id: str = ""
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
msg = user_message(prompt)
request = CompletionRequest(messages=self.prefix_messages+[msg])
bot = ErnieBot(_get_model_service_url(self.model_name),self.access_token,request)
try:
# 你的代码
response = bot.get_response().result
if self.id == "":
self.id = bot.get_response().id
self.prefix_messages.append(msg)
self.prefix_messages.append(bot_message(response))
return response
except Exception as e:
# 处理异常
raise e
def _get_id(self) -> str:
return self.id
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ernie"
\ No newline at end of file
from dataclasses import asdict, dataclass
from typing import List
from pydantic import BaseModel, Field
from enum import Enum
class MessageRole(str, Enum):
USER = "user"
BOT = "assistant"
@dataclass
class Message:
role: str
content: str
@dataclass
class CompletionRequest:
messages: List[Message]
stream: bool = False
user: str = ""
@dataclass
class Usage:
prompt_tokens: int
completion_tokens: int
total_tokens: int
@dataclass
class CompletionResponse:
id: str
object: str
created: int
result: str
need_clear_history: bool
ban_round: int = 0
sentence_id: int = 0
is_end: bool = False
usage: Usage = None
is_safe: bool = False
is_truncated: bool = False
class ErrorResponse(BaseModel):
error_code: int = Field(...)
error_msg: str = Field(...)
id: str = Field(...)
class ErnieBot():
url: str
access_token: str
request: CompletionRequest
def __init__(self, url: str, access_token: str, request: CompletionRequest):
self.url = url
self.access_token = access_token
self.request = request
def get_response(self) -> CompletionResponse:
import requests
import json
headers = {'Content-Type': 'application/json'}
params = {'access_token': self.access_token}
request_dict = asdict(self.request)
response = requests.post(self.url, params=params,data=json.dumps(request_dict), headers=headers)
# print(response.json())
try:
return CompletionResponse(**response.json())
except Exception as e:
print(e)
raise Exception(response.json())
def user_message(prompt: str) -> Message:
return Message(MessageRole.USER, prompt)
def bot_message(prompt: str) -> Message:
return Message(MessageRole.BOT, prompt)
\ No newline at end of file
import os
import requests
from typing import Dict, Optional,List,Any,Mapping,Iterator
from pydantic import root_validator
from langchain.llms.base import BaseLLM,LLM
from langchain.cache import InMemoryCache
from langchain.callbacks.manager import CallbackManagerForLLMRun, Callbacks, AsyncCallbackManagerForLLMRun
import qianfan
from qianfan import ChatCompletion
# 启动llm的缓存
# langchain.llm_cache = InMemoryCache()
class ChatERNIESerLLM(LLM):
# 模型服务url
chat_completion:ChatCompletion = None
# url: str = "http://127.0.0.1:8000"
chat_history: dict = []
out_stream: bool = False
cache: bool = False
model_name:str = "ERNIE-Bot"
# def __init__(self):
# self.chat_completion = qianfan.ChatCompletion(ak="pT7sV1smp4AeDl0LjyZuHBV9", sk="b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu")
@property
def _llm_type(self) -> str:
return self.model_name
def get_num_tokens(self, text: str) -> int:
return len(text)
def convert_data(self,data):
result = []
for item in data:
result.append({'q': item[0], 'a': item[1]})
return result
def _call(self, prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream = False,
**kwargs: Any) -> str:
resp = self.chat_completion.do(model=self.model_name,messages=[{
"role": "user",
"content": prompt
}])
print(resp)
assert resp.code == 200
return resp.body["result"]
async def _post_stream(self,
query: Dict,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream=False) -> Any:
"""POST请求
"""
async for r in await self.chat_completion.ado(model=self.model_name,messages=[query], stream=stream):
assert r.code == 200
if run_manager:
for callable in run_manager.get_sync().handlers:
await callable.on_llm_new_token(r.body["result"])
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
await self._post_stream(query={
"role": "user",
"content": prompt
},stream=True,run_manager=run_manager)
return ''
\ No newline at end of file
import os
import transformers
import torch
from transformers import AutoModel, AutoTokenizer, AutoConfig, DataCollatorForSeq2Seq
from peft import PeftModel
class ModelLoader:
def __init__(self, model_name_or_path, pre_seq_len=0, prefix_projection=False):
self.config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True)
if pre_seq_len is not None and pre_seq_len > 0:
self.config.pre_seq_len = pre_seq_len
self.config.prefix_projection = prefix_projection
self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
self.model = AutoModel.from_pretrained(model_name_or_path, config=self.config, trust_remote_code=True).half()
# self.model = self.model.cuda()
self.base_model = self.model
def quantize(self, quantization_bit):
if quantization_bit is not None:
print(f"Quantized to {quantization_bit} bit")
self.model = self.model.quantize(quantization_bit)
return self.model
def models(self):
return self.model, self.tokenizer
def collator(self):
return DataCollatorForSeq2Seq(tokenizer=self.tokenizer, model=self.model)
def load_lora(self,ckpt_path,name="default"):
#训练时节约GPU占用
peft_loaded = PeftModel.from_pretrained(self.base_model,ckpt_path,adapter_name=name)
self.model = peft_loaded.merge_and_unload()
print(f"Load LoRA model successfully!")
def load_loras(self,ckpt_paths,name="default"):
if len(ckpt_paths)==0:
return
first = True
for name, path in ckpt_paths.items():
print(f"Load {name} from {path}")
if first:
peft_loaded = PeftModel.from_pretrained(self.base_model, path, adapter_name=name)
first = False
else:
peft_loaded.load_adapter(path,adapter_name=name)
peft_loaded.set_adapter(name)
self.model = peft_loaded
def load_prefix(self,ckpt_path):
prefix_state_dict = torch.load(os.path.join(ckpt_path, "pytorch_model.bin"))
new_prefix_state_dict = {}
for k, v in prefix_state_dict.items():
if k.startswith("transformer.prefix_encoder."):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
self.model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
self.model.transformer.prefix_encoder.float()
print(f"Load prefix model successfully!")
import logging
import os
from typing import Any, Dict, List, Mapping, Optional
from langchain.llms.base import BaseLLM,LLM
from langchain.schema import LLMResult
from langchain.utils import get_from_dict_or_env
from langchain.callbacks.manager import CallbackManagerForLLMRun, Callbacks
from enum import Enum
from pydantic import root_validator, Field
from .xinghuo import SparkApi
from .xinghuo.ws import SparkAPI
logger = logging.getLogger(__name__)
text =[]
# length = 0
def getText(role,content):
jsoncon = {}
jsoncon["role"] = role
jsoncon["content"] = content
text.append(jsoncon)
return text
def getlength(text):
length = 0
for content in text:
temp = content["content"]
leng = len(temp)
length += leng
return length
def checklen(text):
while (getlength(text) > 8000):
del text[0]
return text
class SparkLLM(LLM):
"""
ErnieLLM is a LLM that uses Ernie to generate text.
"""
appid: str = Field(
None,
description="APPID",
)
api_key: str = Field(
None,
description="API_KEY",
)
api_secret: str = Field(
None,
description="API_SECRET",
)
version: str = Field(
None,
description="version",
)
api: SparkAPI = Field(
None,
description="api",
)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate the environment."""
# print(values)
appid = get_from_dict_or_env(values, "appid", "XH_APPID", "")
api_key = get_from_dict_or_env(values, "api_key", "XH_API_KEY", "")
api_secret = get_from_dict_or_env(values, "api_secret", "XH_API_SECRET", "")
version = values.get("version", "v1")
if not appid:
raise ValueError("No appid provided.")
if not api_key:
raise ValueError("No api_key provided.")
if not api_secret:
raise ValueError("No api_secret provided.")
values["appid"] = appid
values["api_key"] = api_key
values["api_secret"] = api_secret
api=SparkAPI(appid,api_key,api_secret,version)
values["api"]=api
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
question = self.getText("user",prompt)
try:
# 你的代码
# SparkApi.main(self.appid,self.api_key,self.api_secret,self.Spark_url,self.domain,question)
self.api.call(question)
response = self.api.answer
return response
except Exception as e:
# 处理异常
print("exception:",e)
raise e
def getText(self,role,content):
text = []
jsoncon = {}
jsoncon["role"] = role
jsoncon["content"] = content
text.append(jsoncon)
return text
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "xinghuo"
\ No newline at end of file
from langchain.llms.base import LLM
from langchain.callbacks.manager import CallbackManagerForLLMRun
from pydantic import root_validator
from typing import Dict, List, Optional
from transformers import PreTrainedModel, PreTrainedTokenizer
class WrapperLLM(LLM):
tokenizer: PreTrainedTokenizer = None
model: PreTrainedModel = None
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate the environment."""
# print(values)
if values.get("model") is None:
raise ValueError("No model provided.")
if values.get("tokenizer") is None:
raise ValueError("No tokenizer provided.")
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
resp,his = self.model.chat(self.tokenizer,prompt)
return resp
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "wrapper"
\ No newline at end of file
import _thread as thread
import base64
import datetime
import hashlib
import hmac
import json
from urllib.parse import urlparse
import ssl
from datetime import datetime
from time import mktime
from urllib.parse import urlencode
from wsgiref.handlers import format_date_time
import websocket # 使用websocket_client
answer = ""
class Ws_Param(object):
# 初始化
def __init__(self, APPID, APIKey, APISecret, Spark_url):
self.APPID = APPID
self.APIKey = APIKey
self.APISecret = APISecret
self.host = urlparse(Spark_url).netloc
self.path = urlparse(Spark_url).path
self.Spark_url = Spark_url
# 生成url
def create_url(self):
# 生成RFC1123格式的时间戳
now = datetime.now()
date = format_date_time(mktime(now.timetuple()))
# 拼接字符串
signature_origin = "host: " + self.host + "\n"
signature_origin += "date: " + date + "\n"
signature_origin += "GET " + self.path + " HTTP/1.1"
# 进行hmac-sha256进行加密
signature_sha = hmac.new(self.APISecret.encode('utf-8'), signature_origin.encode('utf-8'),
digestmod=hashlib.sha256).digest()
signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding='utf-8')
authorization_origin = f'api_key="{self.APIKey}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"'
authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
# 将请求的鉴权参数组合为字典
v = {
"authorization": authorization,
"date": date,
"host": self.host
}
# 拼接鉴权参数,生成url
url = self.Spark_url + '?' + urlencode(v)
# 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致
return url
# 收到websocket错误的处理
def on_error(ws, error):
print("### error:", error)
# 收到websocket关闭的处理
def on_close(ws,one,two):
print(" ")
# 收到websocket连接建立的处理
def on_open(ws):
thread.start_new_thread(run, (ws,))
def run(ws, *args):
data = json.dumps(gen_params(appid=ws.appid, domain= ws.domain,question=ws.question))
ws.send(data)
# 收到websocket消息的处理
def on_message(ws, message):
# print(message)
data = json.loads(message)
code = data['header']['code']
if code != 0:
print(f'请求错误: {code}, {data}')
ws.close()
else:
choices = data["payload"]["choices"]
status = choices["status"]
content = choices["text"][0]["content"]
print(content,end ="")
global answer
answer += content
# print(1)
if status == 2:
ws.close()
def gen_params(appid, domain,question):
"""
通过appid和用户的提问来生成请参数
"""
data = {
"header": {
"app_id": appid,
"uid": "1234"
},
"parameter": {
"chat": {
"domain": domain,
"random_threshold": 0.5,
"max_tokens": 2048,
"auditing": "default"
}
},
"payload": {
"message": {
"text": question
}
}
}
return data
def main(appid, api_key, api_secret, Spark_url,domain, question):
# print("星火:")
global answer
answer = ""
wsParam = Ws_Param(appid, api_key, api_secret, Spark_url)
websocket.enableTrace(False)
wsUrl = wsParam.create_url()
ws = websocket.WebSocketApp(wsUrl, on_message=on_message, on_error=on_error, on_close=on_close, on_open=on_open)
ws.appid = appid
ws.question = question
ws.domain = domain
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
import SparkApi
#以下密钥信息从控制台获取
appid = "XXXXXXXX" #填写控制台中获取的 APPID 信息
api_secret = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" #填写控制台中获取的 APISecret 信息
api_key ="XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" #填写控制台中获取的 APIKey 信息
#用于配置大模型版本,默认“general/generalv2”
domain = "general" # v1.5版本
# domain = "generalv2" # v2.0版本
#云端环境的服务地址
Spark_url = "ws://spark-api.xf-yun.com/v1.1/chat" # v1.5环境的地址
# Spark_url = "ws://spark-api.xf-yun.com/v2.1/chat" # v2.0环境的地址
text =[]
# length = 0
def getText(role,content):
jsoncon = {}
jsoncon["role"] = role
jsoncon["content"] = content
text.append(jsoncon)
return text
def getlength(text):
length = 0
for content in text:
temp = content["content"]
leng = len(temp)
length += leng
return length
def checklen(text):
while (getlength(text) > 8000):
del text[0]
return text
if __name__ == '__main__':
text.clear
while(1):
Input = input("\n" +"我:")
question = checklen(getText("user",Input))
SparkApi.answer =""
print("星火:",end = "")
SparkApi.main(appid,api_key,api_secret,Spark_url,domain,question)
getText("assistant",SparkApi.answer)
# print(str(text))
import _thread as thread
import base64
import datetime
import hashlib
import hmac
import json
import ssl
from datetime import datetime
from time import mktime
from urllib.parse import urlparse, urlencode
from wsgiref.handlers import format_date_time
import websocket # 使用websocket_client
URL_V1_5="ws://spark-api.xf-yun.com/v1.1/chat"
URL_V2="ws://spark-api.xf-yun.com/v2.1/chat"
Domain_V1_5="general"
Domain_V2="generalv2"
class SparkAPI:
def __init__(self, APPID, APIKey, APISecret, Version="v1"):
self.APPID = APPID
self.APIKey = APIKey
self.APISecret = APISecret
if Version == "v1":
self.Spark_url = URL_V1_5
self.domain = Domain_V1_5
elif Version == "v2":
self.Spark_url = URL_V2
self.domain = Domain_V2
self.host = urlparse(self.Spark_url).netloc
self.path = urlparse(self.Spark_url).path
self.answer = ""
def create_url(self):
now = datetime.now()
date = format_date_time(mktime(now.timetuple()))
signature_origin = "host: " + self.host + "\n"
signature_origin += "date: " + date + "\n"
signature_origin += "GET " + self.path + " HTTP/1.1"
signature_sha = hmac.new(self.APISecret.encode('utf-8'), signature_origin.encode('utf-8'),
digestmod=hashlib.sha256).digest()
signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding='utf-8')
authorization_origin = f'api_key="{self.APIKey}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"'
authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
v = {
"authorization": authorization,
"date": date,
"host": self.host
}
url = self.Spark_url + '?' + urlencode(v)
return url
def on_error(self, ws, error):
print("### error:", error)
def on_close(self, ws, one, two):
print(" ")
def on_open(self, ws):
thread.start_new_thread(self.run, (ws,))
def run(self, ws, *args):
data = json.dumps(self.gen_params(appid=self.APPID, domain=self.domain, question=ws.question))
ws.send(data)
def on_message(self, ws, message):
data = json.loads(message)
code = data['header']['code']
if code != 0:
print(f'请求错误: {code}, {data}')
ws.close()
else:
choices = data["payload"]["choices"]
status = choices["status"]
content = choices["text"][0]["content"]
# print(content, end="")
self.answer += content
if status == 2:
ws.close()
def gen_params(self, appid, domain, question):
data = {
"header": {
"app_id": appid,
"uid": "1234"
},
"parameter": {
"chat": {
"domain": domain,
"random_threshold": 0.5,
"max_tokens": 2048,
"auditing": "default"
}
},
"payload": {
"message": {
"text": question
}
}
}
return data
def call(self, question):
self.answer = ""
wsUrl = self.create_url()
websocket.enableTrace(False)
ws = websocket.WebSocketApp(wsUrl, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close, on_open=self.on_open)
ws.question = question
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
\ No newline at end of file
from abc import ABC, abstractmethod
class BaseCallback(ABC):
@abstractmethod
def filter(self,title:str,content:str) -> bool: #return True舍弃当前段落
pass
\ No newline at end of file
from langchain.text_splitter import CharacterTextSplitter
import re
from typing import List
from .config import SENTENCE_SIZE
class ChineseTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, sentence_size: int = SENTENCE_SIZE, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
self.sentence_size = sentence_size
def split_text1(self, text: str) -> List[str]:
if self.pdf:
text = re.sub(r"\n{3,}", "\n", text)
text = re.sub('\s', ' ', text)
text = text.replace("\n\n", "")
sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del :;
sent_list = []
for ele in sent_sep_pattern.split(text):
if sent_sep_pattern.match(ele) and sent_list:
sent_list[-1] += ele
elif ele:
sent_list.append(ele)
return sent_list
def split_text(self, text: str) -> List[str]: ##此处需要进一步优化逻辑
if self.pdf:
text = re.sub(r"\n{3,}", r"\n", text)
text = re.sub('\s', " ", text)
text = re.sub("\n\n", "", text)
text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text) # 单字符断句符
text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text) # 英文省略号
text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text) # 中文省略号
text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
text = text.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
ls = [i for i in text.split("\n") if i]
for ele in ls:
if len(ele) > self.sentence_size:
ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele)
ele1_ls = ele1.split("\n")
for ele_ele1 in ele1_ls:
if len(ele_ele1) > self.sentence_size:
ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
ele2_ls = ele_ele2.split("\n")
for ele_ele2 in ele2_ls:
if len(ele_ele2) > self.sentence_size:
ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
ele2_id = ele2_ls.index(ele_ele2)
ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
ele2_id + 1:]
ele_id = ele1_ls.index(ele_ele1)
ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]
id = ls.index(ele)
ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
return ls
# 文本分句长度
SENTENCE_SIZE = 100
ZH_TITLE_ENHANCE = False
\ No newline at end of file
import os,copy
from langchain.document_loaders import UnstructuredFileLoader, TextLoader, CSVLoader,UnstructuredPDFLoader,UnstructuredWordDocumentLoader,PDFMinerPDFasHTMLLoader
from .config import SENTENCE_SIZE, ZH_TITLE_ENHANCE
from .chinese_text_splitter import ChineseTextSplitter
from .zh_title_enhance import zh_title_enhance
from langchain.schema import Document
from typing import List,Dict,Optional
from loader.callback import BaseCallback
import re
from bs4 import BeautifulSoup
def load(filepath,mode:str = None,sentence_size:int = 0,metadata = None,callbacks = None,**kwargs):
r"""
加载文档,参数说明
mode:文档切割方式,"single", "elements", "paged"
sentence_size:对于较大的document再次切割成多个
kwargs
"""
if filepath.lower().endswith(".md"):
loader = UnstructuredFileLoader(filepath, mode=mode or "elements",**kwargs)
elif filepath.lower().endswith(".txt"):
loader = TextLoader(filepath, autodetect_encoding=True,**kwargs)
elif filepath.lower().endswith(".csv"):
loader = CSVLoader(filepath,**kwargs)
elif filepath.lower().endswith(".pdf"):
# loader = UnstructuredPDFLoader(filepath, mode=mode or "elements",**kwargs)
# 使用自定义pdf loader
return __pdf_loader(filepath,sentence_size=sentence_size,metadata=metadata,callbacks=callbacks)
elif filepath.lower().endswith(".docx") or filepath.lower().endswith(".doc"):
loader = UnstructuredWordDocumentLoader(filepath, mode=mode or "elements", **kwargs)
else:
loader = UnstructuredFileLoader(filepath, mode=mode or "elements",**kwargs)
if sentence_size > 0:
return split(loader.load(),sentence_size)
return loader.load()
def loads_path(path:str,**kwargs):
return loads(get_files_in_directory(path),**kwargs)
def loads(filepaths,**kwargs):
default_kwargs={"mode":"paged"}
default_kwargs.update(**kwargs)
documents = [load(filepath=file, **default_kwargs) for file in filepaths]
return [item for sublist in documents for item in sublist]
def append(documents:List[Document] = [],sentence_size:int = SENTENCE_SIZE):#保留文档结构信息,注意处理hash
effect_documents = []
last_doc = documents[0]
for doc in documents[1:]:
last_hash = "" if "next_hash" not in last_doc.metadata else last_doc.metadata["next_hash"]
doc_hash = "" if "next_hash" not in doc.metadata else doc.metadata["next_hash"]
if len(last_doc.page_content)+len(doc.page_content) <= sentence_size and last_hash == doc_hash:
last_doc.page_content = last_doc.page_content + doc.page_content
continue
else:
effect_documents.append(last_doc)
last_doc = doc
effect_documents.append(last_doc)
return effect_documents
def split(documents:List[Document] = [],sentence_size:int = SENTENCE_SIZE): #保留文档结构信息,注意处理hash
effect_documents = []
for doc in documents:
if len(doc.page_content) > sentence_size:
words_list = re.split(r'·-·', doc.page_content.replace("。","。·-·").replace("\n","\n·-·")) #插入分隔符,分割
document = Document(page_content="",metadata=copy.deepcopy(doc.metadata))
first = True
for word in words_list:
if len(document.page_content) + len(word) < sentence_size:
document.page_content += word
else:
if len(document.page_content.replace(" ","").replace("\n",""))>0:
if first:
first=False
else:
effect_documents[-1].metadata["next_doc"] = document.page_content
effect_documents.append(document)
document = Document(page_content=word,metadata=copy.deepcopy(doc.metadata))
if len(document.page_content.replace(" ","").replace("\n",""))>0:
if first:
first=False
else:
effect_documents[-1].metadata["next_doc"] = document.page_content
effect_documents.append(document)
else:
effect_documents.append(doc)
return effect_documents
def load_file(filepath, sentence_size=SENTENCE_SIZE, using_zh_title_enhance=ZH_TITLE_ENHANCE,mode:str = None,**kwargs):
print("load_file", filepath)
if filepath.lower().endswith(".md"):
loader = UnstructuredFileLoader(filepath, mode=mode or "elements",**kwargs)
docs = loader.load()
elif filepath.lower().endswith(".txt"):
loader = TextLoader(filepath, autodetect_encoding=True, **kwargs)
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(textsplitter)
elif filepath.lower().endswith(".csv"):
loader = CSVLoader(filepath, **kwargs)
docs = loader.load()
elif filepath.lower().endswith(".pdf"):
loader = UnstructuredPDFLoader(filepath, mode=mode or "elements",**kwargs)
textsplitter = ChineseTextSplitter(pdf=True, sentence_size=sentence_size)
docs = loader.load_and_split(textsplitter)
elif filepath.lower().endswith(".docx"):
loader = UnstructuredWordDocumentLoader(filepath, mode=mode or "elements",**kwargs)
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(textsplitter)
else:
loader = UnstructuredFileLoader(filepath, mode=mode or "elements",**kwargs)
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(text_splitter=textsplitter)
if using_zh_title_enhance:
docs = zh_title_enhance(docs)
write_check_file(filepath, docs)
return docs
def write_check_file(filepath, docs):
folder_path = os.path.join(os.path.dirname(filepath), "tmp_files")
if not os.path.exists(folder_path):
os.makedirs(folder_path)
fp = os.path.join(folder_path, 'load_file.txt')
with open(fp, 'a+', encoding='utf-8') as fout:
fout.write("filepath=%s,len=%s" % (filepath, len(docs)))
fout.write('\n')
for i in docs:
fout.write(str(i))
fout.write('\n')
fout.close()
def get_files_in_directory(directory):
file_paths = []
for root, dirs, files in os.walk(directory):
for file in files:
file_path = os.path.join(root, file)
file_paths.append(file_path)
return file_paths
#自定义pdf load部分
def __checkV(strings:str):
lines = len(strings.splitlines())
if (lines > 3 and len(strings.replace(" ", ""))/lines < 15):
return False
return True
def __isTitle(strings:str):
return len(strings.splitlines())==1 and len(strings)>0 and strings.endswith("\n")
def __appendPara(strings:str):
return strings.replace(".\n","^_^").replace("。\n","^-^").replace("?\n","?^-^").replace("?\n","?^-^").replace("\n","").replace("^_^",".\n").replace("^-^","。\n").replace("?^-^","?\n").replace("?^-^","?\n")
def __check_fs_ff(line_ff_fs_s,fs,ff): #若当前行有上一行一样的字体、字号文字,则返回相同的。默认返回最长文本的字体和字号
re_fs = line_ff_fs_s[-1][0][-1]
re_ff = line_ff_fs_s[-1][1][-1] if line_ff_fs_s[-1][1] else None
max_len = 0
for ff_fs in line_ff_fs_s: #寻找最长文本字体和字号
c_max = max(list(map(int, ff_fs[0])))
if max_len < ff_fs[2] or (max_len == ff_fs[2] and c_max > int(re_fs)):
max_len = ff_fs[2]
re_fs = c_max
re_ff = ff_fs[1][-1] if ff_fs[1] else None
if fs:
for ff_fs in line_ff_fs_s:
if str(fs) in ff_fs[0] and ff in ff_fs[1]:
re_fs = fs
re_ff = ff
break
return int(re_fs),re_ff
def append_document(snippets1:List[Document],title:str,content:str,callbacks,font_size,page_num,metadate,need_append:bool = False):
if callbacks:
for cb in callbacks:
if isinstance(cb,BaseCallback):
if cb.filter(title,content):
return
if need_append and len(snippets1)>0:
ps = snippets1.pop()
snippets1.append(Document(page_content=ps.page_content+title, metadata=ps.metadata))
else:
doc_metadata = {"font-size": font_size,"page_number":page_num}
doc_metadata.update(metadate)
snippets1.append(Document(page_content=title+content, metadata=doc_metadata))
'''
提取pdf文档,按标题和内容进行分割,文档的页码按标题所在页码为准
分割后的文本按sentence_size值再次分割,分割的文本的页码均属于父文本的页码
'''
def __pdf_loader(filepath:str,sentence_size:int = 0,metadata = None,callbacks = None):
if not filepath.lower().endswith(".pdf"):
raise ValueError("file is not pdf document")
loader = PDFMinerPDFasHTMLLoader(filepath)
documents = loader.load()
soup = BeautifulSoup(documents[0].page_content,'html.parser')
content = soup.find_all('div')
cur_fs = None #当前文本font-size
last_fs = None #上一段文本font-size
cur_ff = None #当前文本风格
cur_text = ''
fs_increasing = False #下一行字体变大,判断为标题,从此处分割
last_text = ''
last_page_num = 1 #上一页页码 根据page_split判断当前文本页码
page_num = 1 #初始页码
page_change = False #页面切换
page_split = False #页面是否出现文本分割
last_is_title = False #上一个文本是否是标题
snippets:List[Document] = []
filename = os.path.basename(filepath)
if metadata:
metadata.update({'source':filepath,'filename':filename,'filetype': 'application/pdf'})
else:
metadata = {'source':filepath,'filename':filename,'filetype': 'application/pdf'}
for c in content:
divs = c.get('style')
if re.match(r"^(Page|page)",c.text): #检测当前页的页码
match = re.match(r"^(page|Page)\s+(\d+)",c.text)
if match:
if page_split: #如果有文本分割,则换页,没有则保持当前文本起始页码
last_page_num = page_num
page_num = match.group(2)
if len(last_text)+len(cur_text) == 0: #如果翻页且文本为空,上一页页码为当前页码
last_page_num = page_num
page_change = True
page_split = False
continue
if re.findall('writing-mode:(.*?);',divs) == ['False'] or re.match(r'^[0-9\s\n]+$',c.text) or re.match(r"^第\s+\d+\s+页$",c.text): #如果不显示或者纯数字
continue
if len(c.text.replace("\n","").replace(" ","")) <= 1: #去掉有效字符小于1的行
continue
sps = c.find_all('span')
if not sps:
continue
line_ff_fs_s = [] #有效字符大于1的集合
line_ff_fs_s2 = [] #有效字符为1的集合
for sp in sps: #如果一行中有多个不同样式的
sp_len = len(sp.text.replace("\n","").replace(" ",""))
if sp_len > 0:
st = sp.get('style')
if st:
ff_fs = (re.findall('font-size:(\d+)px',st),re.findall('font-family:(.*?);',st),len(sp.text.replace("\n","").replace(" ","")))
if sp_len == 1: #过滤一个有效字符的span
line_ff_fs_s2.append(ff_fs)
else:
line_ff_fs_s.append(ff_fs)
if len(line_ff_fs_s)==0: #如果为空,则以一个有效字符span为准
if len(line_ff_fs_s2)>0:
line_ff_fs_s = line_ff_fs_s2
else:
if len(c.text)>0:
page_change = False
continue
fs,ff = __check_fs_ff(line_ff_fs_s,cur_fs,cur_ff)
if not cur_ff:
cur_ff = ff
if not cur_fs:
cur_fs = fs
if (abs(fs - cur_fs) <= 1 and ff == cur_ff): #风格和字体都没改变
cur_text += c.text
cur_fs = fs
page_change = False
if len(cur_text.splitlines()) > 3: #连续多行则fs_increasing不再生效
fs_increasing = False
else:
if page_change and cur_fs > fs+1: #翻页,(字体变小) 大概率是页眉,跳过c.text。-----有可能切掉一行文本
page_change = False
continue
if last_is_title: #如果上一个为title
if __isTitle(cur_text) or fs_increasing: #连续多个title 或者 有变大标识的
last_text = last_text + cur_text
last_is_title = True
fs_increasing = False
else:
append_document(snippets,last_text,__appendPara(cur_text),callbacks,cur_fs,page_num if page_split else last_page_num,metadata)
page_split = True
last_text = ''
last_is_title = False
fs_increasing = int(fs) > int(cur_fs) #字体变大
else:
if len(last_text)>0 and __checkV(last_text): #过滤部分文本
#将跨页的两段或者行数较少的文本合并
append_document(snippets,__appendPara(last_text),"",callbacks,last_fs,page_num if page_split else last_page_num,metadata,need_append=len(last_text.splitlines()) <= 2 or page_change)
page_split = True
last_text = cur_text
last_is_title = __isTitle(last_text) or fs_increasing
fs_increasing = int(fs) > int(cur_fs)
if page_split:
last_page_num = page_num
last_fs = cur_fs
cur_fs = fs
cur_ff = ff
cur_text = c.text
page_change = False
append_document(snippets,last_text,__appendPara(cur_text),callbacks,cur_fs,page_num if page_split else last_page_num,metadata)
if sentence_size > 0:
return split(snippets,sentence_size)
return snippets
from typing import List
from langchain.docstore.document import Document
import re
def under_non_alpha_ratio(text: str, threshold: float = 0.5):
"""Checks if the proportion of non-alpha characters in the text snippet exceeds a given
threshold. This helps prevent text like "-----------BREAK---------" from being tagged
as a title or narrative text. The ratio does not count spaces.
Parameters
----------
text
The input string to test
threshold
If the proportion of non-alpha characters exceeds this threshold, the function
returns False
"""
if len(text) == 0:
return False
alpha_count = len([char for char in text if char.strip() and char.isalpha()])
total_count = len([char for char in text if char.strip()])
try:
ratio = alpha_count / total_count
return ratio < threshold
except:
return False
def is_possible_title(
text: str,
title_max_word_length: int = 20,
non_alpha_threshold: float = 0.5,
) -> bool:
"""Checks to see if the text passes all of the checks for a valid title.
Parameters
----------
text
The input text to check
title_max_word_length
The maximum number of words a title can contain
non_alpha_threshold
The minimum number of alpha characters the text needs to be considered a title
"""
# 文本长度为0的话,肯定不是title
if len(text) == 0:
print("Not a title. Text is empty.")
return False
# 文本中有标点符号,就不是title
ENDS_IN_PUNCT_PATTERN = r"[^\w\s]\Z"
ENDS_IN_PUNCT_RE = re.compile(ENDS_IN_PUNCT_PATTERN)
if ENDS_IN_PUNCT_RE.search(text) is not None:
return False
# 文本长度不能超过设定值,默认20
# NOTE(robinson) - splitting on spaces here instead of word tokenizing because it
# is less expensive and actual tokenization doesn't add much value for the length check
if len(text) > title_max_word_length:
return False
# 文本中数字的占比不能太高,否则不是title
if under_non_alpha_ratio(text, threshold=non_alpha_threshold):
return False
# NOTE(robinson) - Prevent flagging salutations like "To My Dearest Friends," as titles
if text.endswith((",", ".", ",", "。")):
return False
if text.isnumeric():
print(f"Not a title. Text is all numeric:\n\n{text}") # type: ignore
return False
# 开头的字符内应该有数字,默认5个字符内
if len(text) < 5:
text_5 = text
else:
text_5 = text[:5]
alpha_in_text_5 = sum(list(map(lambda x: x.isnumeric(), list(text_5))))
if not alpha_in_text_5:
return False
return True
def zh_title_enhance(docs: List[Document]) -> List[Document] | None:
title = None
if len(docs) > 0:
for doc in docs:
if is_possible_title(doc.page_content):
doc.metadata['category'] = 'cn_Title'
title = doc.page_content
elif title:
doc.page_content = f"下文与({title})有关。{doc.page_content}"
return docs
else:
print("文件不存在")
from typing import List
from .prompts import QuestionGeneratePromptTemplate,AnswerPromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.chains import LLMChain
from langchain.prompts import StringPromptTemplate,PromptTemplate
class QAGenerator:
def __init__(self, llm: BaseLanguageModel):
self.prompt = QuestionGeneratePromptTemplate(input_variables=["knowledge","question_number"])
self.chain = LLMChain(llm=llm, prompt=self.prompt)
self.answer_prompt = AnswerPromptTemplate(input_variables=["knowledge","question"])
self.answer_chain = LLMChain(llm=llm, prompt=self.answer_prompt)
def generate_questions(self, knowledge: str,question_number: int | None=None) -> List[str]:
output = self.chain.run({"knowledge":knowledge,"question_number":question_number})
lines = [line for line in output.split("\n") if line.strip() and line.startswith("问:")]
return lines
def generate_answer(self, knowledge: str, question: str) -> str:
answer = self.answer_chain.run({"knowledge":knowledge,"question":question})
return answer
def generate(self, knowledge: str, question_number=3):
questions = self.generate_questions(knowledge, question_number)
if len(questions) == 0:
return None
questions = [question.replace("问:","") for question in questions if question.startswith("问:")]
answers = []
for question in questions:
answer = self.generate_answer(knowledge, question)
answers.append(answer)
return [(question,answer) for question,answer in zip(questions,answers)]
prompt_template="""从下面这段话中提取出关键信息,生成 {question_number} 个问题,并回答相关问题。
输出格式为一问一答,问题以"问:"开头,答案以"答:"开头。
不需要标注问题序号, 问题和答案相互映射,每个问题相互独立。
{knowledge}
"""
class TrainData:
def __init__(self, llm: BaseLanguageModel):
self.prompt = PromptTemplate.from_template(prompt_template)
self.chain = LLMChain(llm=llm, prompt=self.prompt,verbose=False)
def generate(self, knowledge: str, question_number=3):
res=self.chain.run({"knowledge":knowledge,"question_number":question_number})
print(res)
qas = []
q, a = None, ''
lines = res.split("\n")
for line in lines:
line = line.strip()
if line.startswith('问:'):
if q is not None and a is not None:
qas.append((q, a))
q = line.replace('问:', '')
a = ''
elif line.startswith('答:'):
a += line.replace('答:', '') + '\n'
elif a is not None:
a += line + '\n'
if q is not None and a is not None:
qas.append((q, a))
return qas
# qas = [qa for qa in qas if qa.strip()]
# questions = [qa for qa in qas if qa.startswith("问:")]
# answers = [qa for qa in qas if qa.startswith("答:")]
# if len(questions) != len(answers):
# print(ValueError("questions and answers are not equal"))
# return []
# return [(question.strip().replace("问:",""),answer.strip().replace("答:","")) for question,answer in zip(questions,answers)]
# return [{"Q":question,"A":answer} for question,answer in zip(questions,answers)]
\ No newline at end of file
from typing import Any
from langchain.prompts import StringPromptTemplate,PromptTemplate
from pydantic import BaseModel, validator
template="""
{knowledge}
请从上述内容中归纳总结出 {question_number} 个问题,问题以"问:"开头,以问号结尾,用空行分隔。
"""
prompt=PromptTemplate.from_template(template)
class QuestionGeneratePromptTemplate(StringPromptTemplate, BaseModel):
def format(self, **kwargs) -> str:
if "question_number" not in kwargs or kwargs["question_number"] is None:
question_number = 5
else:
question_number = kwargs["question_number"]
if "knowledge" not in kwargs:
raise ValueError("knowledge is required")
knowledge = kwargs["knowledge"]
return prompt.format(question_number=question_number, knowledge=knowledge)
def _prompt_type(self):
return "question-generator"
template_answer="""
请参考以下内容中回答问题:{question},回答以"答:"开头。
下面是你参考的内容:
{knowledge}
"""
class AnswerPromptTemplate(StringPromptTemplate, BaseModel):
def format(self, **kwargs: Any) -> str:
if "knowledge" not in kwargs:
raise ValueError("knowledge is required")
if "question" not in kwargs:
raise ValueError("question is required")
return template_answer.format(question=kwargs["question"], knowledge=kwargs["knowledge"])
\ No newline at end of file
from langchain import PromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.chains import LLMChain
question_rdf_prompt_template="""
你的任务是帮助我重新整理问题,使得问题中的指代信息更加明确,你的输出只会是原问题或者重新整理之后的问题,不包含其他内容。
==============================
{history}
以上是对话历史。
==============================
这是我的问题:
{question}
==============================
如果你认为这个问题中存在不明确的指代信息,请从对话历史中提取出相关信息,重新定义问题并直接输出,不要增加无关内容。
否则,请直接输出原问题。直接输出原问题的格式为:
{question}
"""
question_rdf_prompt=PromptTemplate.from_template(question_rdf_prompt_template)
class QuestionRDF:
def __init__(self,llm:BaseLanguageModel):
self.llm=llm
self.chain=LLMChain(llm=llm,prompt=question_rdf_prompt,verbose=True)
def generate(self,history:str,question:str):
res=self.chain.run({"history":history,"question":question})
return res
accelerate==0.21.0
aiofiles==23.1.0
aiohttp==3.8.5
aiosignal==1.3.1
altair==5.0.1
annotated-types==0.5.0
anyio==3.7.1
async-timeout==4.0.2
attrs==23.1.0
bitsandbytes==0.41.1
blinker==1.6.2
certifi==2023.7.22
cffi==1.15.1
chardet==5.1.0
charset-normalizer==3.2.0
click==8.1.6
cmake==3.27.0
contourpy==1.1.0
cpm-kernels==1.0.11
cryptography==41.0.2
cycler==0.11.0
dataclasses-json==0.5.13
datasets==2.14.0
dill==0.3.7
et-xmlfile==1.1.0
faiss==1.7.4
fastapi==0.100.0
ffmpy==0.3.1
filelock==3.12.2
filetype==1.2.0
Flask==2.3.2
fonttools==4.41.1
frozenlist==1.4.0
fsspec==2023.6.0
gevent==23.9.0.post1
gradio==3.38.0
gradio_client==0.2.10
greenlet==2.0.2
h11==0.14.0
httpcore==0.17.3
httpx==0.24.1
huggingface-hub==0.16.4
idna==3.4
itsdangerous==2.1.2
jieba==0.42.1
Jinja2==3.1.2
joblib==1.3.1
jsonschema==4.18.4
jsonschema-specifications==2023.7.1
kiwisolver==1.4.4
langchain==0.0.242
langsmith==0.0.14
latex2mathml==3.76.0
linkify-it-py==2.0.2
lit==16.0.6
loguru==0.7.0
lxml==4.9.3
Markdown==3.4.4
markdown-it-py==2.2.0
MarkupSafe==2.1.3
marshmallow==3.20.1
matplotlib==3.7.2
mdit-py-plugins==0.3.3
mdtex2html==1.2.0
mdurl==0.1.2
mpmath==1.3.0
msg-parser==1.2.0
multidict==6.0.4
multiprocess==0.70.15
mypy-extensions==1.0.0
networkx==3.1
nltk==3.8.1
numexpr==2.8.4
numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1691056235090/work
nvidia-cublas-cu11==11.10.3.66
nvidia-cuda-cupti-cu11==11.7.101
nvidia-cuda-nvrtc-cu11==11.7.99
nvidia-cuda-runtime-cu11==11.7.99
nvidia-cudnn-cu11==8.5.0.96
nvidia-cufft-cu11==10.9.0.58
nvidia-curand-cu11==10.2.10.91
nvidia-cusolver-cu11==11.4.0.1
nvidia-cusparse-cu11==11.7.4.91
nvidia-nccl-cu11==2.14.3
nvidia-nvtx-cu11==11.7.91
olefile==0.46
openai==0.28.0
openapi-schema-pydantic==1.2.4
openpyxl==3.1.2
orjson==3.9.2
packaging==23.1
pandas==2.0.3
pdf2image==1.16.3
pdfminer.six==20221105
peft==0.4.0
Pillow==10.0.0
pipdeptree==2.13.0
psutil==5.9.5
psycopg2==2.9.7
pyarrow==12.0.1
pycparser==2.21
pydantic==1.10.12
pydantic_core==2.4.0
pydub==0.25.1
pypandoc==1.11
pyparsing==3.0.9
python-dateutil==2.8.2
python-docx==0.8.11
python-dotenv==1.0.0
python-magic==0.4.27
python-multipart==0.0.6
python-pptx==0.6.21
pytz==2023.3
PyYAML==6.0.1
referencing==0.30.0
regex==2023.6.3
requests==2.31.0
rouge-chinese==1.0.3
rpds-py==0.9.2
safetensors==0.3.1
scikit-learn==1.3.0
scipy==1.11.1
semantic-version==2.10.0
sentence-transformers==2.2.2
sentencepiece==0.1.99
six==1.16.0
sniffio==1.3.0
SQLAlchemy==2.0.19
starlette==0.27.0
sympy==1.12
tabulate==0.9.0
tenacity==8.2.2
threadpoolctl==3.2.0
tiktoken==0.4.0
tokenizers==0.13.3
toolz==0.12.0
torch==2.0.1
torchkeras==3.9.2
torchvision==0.15.2
tqdm==4.65.0
transformers==4.31.0
triton==2.0.0
typing-inspect==0.9.0
typing_extensions==4.7.1
tzdata==2023.3
uc-micro-py==1.0.2
unstructured==0.8.1
urllib3==2.0.4
uvicorn==0.23.1
uWSGI @ file:///croot/uwsgi_1688631110587/work
websocket==0.2.1
websocket-client==1.6.2
websockets==11.0.3
Werkzeug==2.3.6
xlrd==2.0.1
XlsxWriter==3.1.2
xxhash==3.2.0
yarl==1.9.2
zope.event==5.0
zope.interface==6.0
import sys
sys.path.append("../..")
import gradio as gr
import torch
from contract.extraction import ElementsExtractor
from llm.chatglm import ChatGLMLocLLM
from llm.ernie import ErnieLLM
from llm.baichuan import BaichuanLLM
from loader.load import load_file,load
from common import consts
from flask.cli import load_dotenv
load_dotenv()
from argparse import Namespace
from llm.loader import ModelLoader
cfg = Namespace()
#model
cfg.model_name_or_path = consts.MODEL_PATH_ChatGLM2_32K
cfg.lora_checkpoint = '/home/zfh/aird/model/ckpt/chatglm-6b-lora-spdsvb-INSv8-1e-03-30'
cfg.pre_seq_len = None
cfg.quantization_bit = None
loader = ModelLoader(cfg.model_name_or_path, cfg.quantization_bit)
# loader.load_lora(cfg.lora_checkpoint)
model,tokenizer = loader.models()
model = model.eval()
# max_length=1000
# Define the Gradio interface
def contract(chatbot, file, input, history, max_length):
print("contract",file,input,max_length)
chatbot.append((input, ""))
if file is not None:
content = load(file.name)
content="\n".join([d.page_content for d in content])
print(len(content))
if content is not None and len(content) > 0:
if len(history) == 0:
prompt = f"{content}请基于上述内容回答以下问题:\n{input}"
else:
prompt = input
else:
prompt = input
# print("prompt: ",prompt)
for response, history in model.stream_chat(tokenizer, prompt, history, max_length=max_length): # type: ignore
chatbot[-1] = (input, response)
yield chatbot, history
def reset(history):
history=[]
file.value=None
return history
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">商业合同要素提取</h1>""")
history = gr.State([])
chatbot = gr.Chatbot()
# with gr.Column(scale=4):
# with gr.Row():
# input=gr.Textbox(label="输入文本", type="text", lines=5)
with gr.Row():
with gr.Column(scale=4):
file = gr.File(label="上传文件")
with gr.Column(scale=4):
input=gr.Textbox(label="输入文本", type="text", lines=5)
with gr.Column(scale=4):
with gr.Row():
max_length = gr.Slider(1000, 30000, value=30000, step=1000, label="单次提取使用的文本长度", interactive=True)
with gr.Row():
submit_btn=gr.Button("开始提取")
reset_btn=gr.Button("重置")
reset_btn.click(reset,inputs=[history],outputs=[history])
submit_btn.click(contract,inputs=[chatbot, file, input, history, max_length],outputs=[chatbot,history])
demo.queue().launch(share=True)
\ No newline at end of file
from flask import Flask, jsonify, request
import tempfile
import shutil
import sys
sys.path.append("../..")
from llm.chatglm import ChatGLMLocLLM
from llm.ernie import ErnieLLM,ModelType
from llm.baichuan import BaichuanLLM
from loader.load import load_file,load
from contract.extraction import ElementsExtractor
app = Flask(__name__)
temp_dir = tempfile.mkdtemp()
# llm = ChatGLMLocLLM(model_name="/home/zfh/models/chatglm-6b")
llm = ErnieLLM(model_name=ModelType.ERNIE_LITE,access_token="24.a2dab1f44fdee40ff5fe1923d8dbdfcb.2592000.1692415837.282335-32870719")
extractor=ElementsExtractor(llm=llm)
@app.route('/contract', methods=['POST'])
def contract():
# 获取文件对象
print(request.files)
file = request.files['file']
if file is None:
return jsonify({'message': 'Error: could not load file'})
filename =temp_dir + '/' + file.filename
file.save(filename)
# 解析其中的参数
elements = request.form.get('elements').split(",")
max_length = request.form.get('max_length')
max_length = int(max_length) if max_length is not None else 1000
print(file,elements,max_length)
# 调用模型
docs = load(filename)
if docs is None:
return jsonify({'message': 'Error: could not load file'})
print(len(docs))
content = []
content_len = 0
values={}
for e in elements:
values[e]=""
for d in docs:
if content_len+len(d.page_content)>max_length:
doc = "\n".join(content)
eles = extractor.extract(doc,elements)
for e in eles:
try:
k,v = e.split(":",maxsplit=1)
k = k.strip()
v = v.strip()
if v is not None and v != "" and v!="未知" and k in elements:
values[k]=v+","+values[k] if k in values else v
except Exception as exp:
print(exp)
print(e)
continue
print("\n".join([f"{k}:{v}" for k,v in values.items()]))
content=[d.page_content]
content_len=len(d.page_content)
else:
content.append(d.page_content)
content_len+=len(d.page_content)
return jsonify(values)
@app.route('/example', methods=['GET'])
def example():
return jsonify({'message': 'Hello World!'})
if __name__ == '__main__':
app.run(debug=True,port=8080,host='0.0.0.0')
\ No newline at end of file
import sys
sys.path.append("../..")
import gradio as gr
import torch
from contract.extraction import ElementsExtractor
from llm.chatglm import ChatGLMLocLLM
from llm.ernie import ErnieLLM
from llm.baichuan import BaichuanLLM
from loader.load import load_file,load
from common import consts
from flask.cli import load_dotenv
load_dotenv()
# Load the model
# llms = ["ChatGLM","ChatGLM2","Ernie","ChatGLM2-32K"]
# llm = ChatGLMLocLLM(model_name=consts.MODEL_PATH_ChatGLM2_32K)
from llm.loader import ModelLoader
from llm.wrapper import WrapperLLM
loader = ModelLoader(consts.MODEL_PATH_ChatGLM2)
model,tokenizer = loader.models()
llm=WrapperLLM(model=model,tokenizer=tokenizer)
# llm = ErnieLLM()
def extract_values(values,content, elements, extractor):
doc = "\n".join(content)
eles = extractor.extract_foreach(doc, elements)
# eles = extractor.extract(doc, elements)
for e in eles:
try:
k, v = e.split(":", maxsplit=1)
k = k.strip()
v = v.strip()
if v is not None and v != "" and v != "未知" and k in elements:
values[k] = v + "," + values[k] if k in values else v
except Exception as exp:
print(exp)
print(e)
continue
return values
extractor=ElementsExtractor(llm=llm)
elements = ["合同号","买方","卖方","合同金额","合同签订日期","装运标记","甲方","乙方","甲方地址","乙方地址"]
# max_length=1000
# Define the Gradio interface
def contract(file,elements,max_length):
print(file,elements,max_length)
docs = load(file.name)
if docs is None:
return "Error: could not load file"
print(len(docs))
content = []
content_len = 0
values={k:"" for k in elements}
for d in docs:
if content_len+len(d.page_content)>max_length:
values = extract_values(values,content, elements, extractor)
print("\n".join([f"{k}:{v}" for k,v in values.items()]))
content=[d.page_content]
content_len=len(d.page_content)
else:
content.append(d.page_content)
content_len+=len(d.page_content)
values = extract_values(values,content, elements, extractor)
return "\n".join([f"{k}:{v}" for k,v in values.items()])
def change_llm_type(llm_type):
print("change_llm_type",llm_type)
global llm,extractor
del llm
llm=ErnieLLM()
torch.cuda.empty_cache()
if llm_type=="ChatGLM":
llm = ChatGLMLocLLM(model_name=consts.MODEL_PATH_ChatGLM)
elif llm_type=="ChatGLM2":
llm = ChatGLMLocLLM(model_name=consts.MODEL_PATH_ChatGLM2)
elif llm_type=="ChatGLM2-32k":
llm = ChatGLMLocLLM(model_name=consts.MODEL_PATH_ChatGLM2_32K)
elif llm_type=="Ernie":
llm = ErnieLLM()
elif llm_type=="baichuan-13b":
llm = BaichuanLLM(model_name="../../models/Baichuan-13B-Chat",quantization_bit=8)
else:
llm = ErnieLLM()
if llm is not None:
extractor=ElementsExtractor(llm=llm)
return llm_type
def add_element(ele_new):
print("add_element",elements,ele_new)
elements.append(ele_new)
return {ele_group:gr.update(choices=elements),
ele_new_box:gr.update(value="")}
def reset():
output.value=""
file.value=None
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">商业合同要素提取</h1>""")
with gr.Row():
with gr.Column(scale=4):
with gr.Row():
file = gr.File(label="上传文件")
with gr.Row():
submit_btn=gr.Button("开始提取")
# reset_btn=gr.Button("重置", type="reset")
# reset_btn.click(reset)
with gr.Row():
output=gr.Textbox(label="提取结果", type="text", lines=20)
with gr.Column(scale=1):
with gr.Row():
max_length = gr.Slider(1000, 30000, value=5000, step=1000, label="单次提取使用的文本长度", interactive=True)
# if llm.model_name==consts.MODEL_PATH_ChatGLM2_32K:
# max_length.value=30000
# with gr.Row():
# llm_type = gr.Radio(llms, label="语言模型类型", value="ChatGLM2", interactive=True)
# llm_type.change(change_llm_type, inputs=[llm_type],outputs=[llm_type])
with gr.Row():
ele_group = gr.CheckboxGroup(choices=elements, label="需要提取的元素", value=elements, interactive=True)
with gr.Row():
ele_new_box = gr.Textbox(label="新增元素", type="text", lines=1)
ele_new_btn = gr.Button("新增")
ele_new_btn.click(add_element,inputs=[ele_new_box],outputs=[ele_group,ele_new_box])
submit_btn.click(contract,inputs=[file,ele_group,max_length],outputs=output)
demo.queue().launch(share=True)
\ No newline at end of file
#!/bin/bash
# Set the path to the server.py script
SERVER_PATH=server.py
# Set the default values for the arguments
MODEL_NAME_OR_PATH="../../../model/chatglm2-6b"
CHECKPOINT=None
CHECKPOINT_PATH="../../../model/ckpt/chatglm2-6b-qlora-INSv11-rank16-1e-3-30/checkpoint-2000"
PRE_SEQ_LEN=128
QUANTIZATION_BIT=8
PORT=8002
# Call the server.py script with the parsed arguments
python $SERVER_PATH \
--model_name_or_path $MODEL_NAME_OR_PATH \
--checkpoint $CHECKPOINT \
--checkpoint_path $CHECKPOINT_PATH \
--pre_seq_len $PRE_SEQ_LEN \
--quantization_bit $QUANTIZATION_BIT \
--port $PORT
\ No newline at end of file
#!/bin/bash
# Set the path to the server.py script
SERVER_PATH=server.py
# Set the default values for the arguments
MODEL_NAME_OR_PATH="../../../model/chatglm2-6b"
CHECKPOINT=lora
CHECKPOINT_PATH="../../../model/ckpt/chatglm2-6b-qlora-INSv11-rank16-1e-3-30/checkpoint-2000"
QUANTIZATION_BIT=8
PORT=8001
# Call the server.py script with the parsed arguments
python $SERVER_PATH \
--model_name_or_path $MODEL_NAME_OR_PATH \
--checkpoint $CHECKPOINT \
--checkpoint_path $CHECKPOINT_PATH \
--quantization_bit $QUANTIZATION_BIT \
--port $PORT
\ No newline at end of file
import argparse
import time
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from transformers import AutoModel, AutoTokenizer, BitsAndBytesConfig
from peft import PeftModel, PeftConfig
import uvicorn, json, datetime
import torch
import asyncio
import os
import sys
sys.path.append("../..")
from typing import AsyncIterable, Awaitable
from pydantic import BaseModel
import uvicorn
# from dotenv import load_dotenv
import signal
DEVICE = "cuda"
DEVICE_ID = "0"
CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE
def torch_gc():
if torch.cuda.is_available():
with torch.cuda.device(CUDA_DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def build_history(history):
result = []
for item in history if history else []:
result.append((item['q'], item['a']))
return result
def convert_data(data):
result = []
for item in data:
result.append({'q': item[0], 'a': item[1]})
return result
class StreamRequest(BaseModel):
"""Request body for streaming."""
message: str
stop_stream = False
def signal_handler(signal, frame):
global stop_stream
stop_stream = True
async def send_message(message: str,history=[],max_length=2048,top_p=0.7,temperature=0.95) -> AsyncIterable[str]:
global model, tokenizer, stop_stream
count = 0
old_len = 0
print(message)
output = ''
for response, history in model.stream_chat(tokenizer, message, history=history,
max_length=max_length,
top_p=top_p,
temperature=temperature):
# print(old_len,count)
if stop_stream:
stop_stream = False
break
else:
output = response[old_len:]
print(output, end='',flush=True)
# print(output)
old_len = len(response)
signal.signal(signal.SIGINT, signal_handler)
yield f"{output}"
print("")
# yield f"\n"
# print()
app = FastAPI()
@app.post("/stream")
async def stream(request: Request):
json_post_raw = await request.json()
json_post = json.dumps(json_post_raw)
json_post_list = json.loads(json_post)
prompt = json_post_list.get('prompt')
history = build_history(json_post_list.get('history'))
max_length = json_post_list.get('max_length')
top_p = json_post_list.get('top_p')
temperature = json_post_list.get('temperature')
return StreamingResponse(send_message(prompt,history=history,max_length=max_length if max_length else 2048,
top_p=top_p if top_p else 0.7,
temperature=temperature if temperature else 0.95), media_type="text/plain")
@app.post("/")
async def create_item(request: Request):
global model, tokenizer
json_post_raw = await request.json()
json_post = json.dumps(json_post_raw)
json_post_list = json.loads(json_post)
prompt = json_post_list.get('prompt')
history = build_history(json_post_list.get('history'))
max_length = json_post_list.get('max_length')
top_p = json_post_list.get('top_p')
temperature = json_post_list.get('temperature')
response, history = model.chat(tokenizer,
prompt,
history=history,
max_length=max_length if max_length else 2048,
top_p=top_p if top_p else 0.7,
temperature=temperature if temperature else 0.95)
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d %H:%M:%S")
answer = {
"response": response,
"history": history,
"status": 200,
"time": time
}
log = "[" + time + "] " + '", prompt:"' + prompt + '", response:"' + repr(response) + '"'
print(log)
torch_gc()
return answer
@app.post("/tokens")
async def get_num_tokens(request: Request):
global model, tokenizer
json_post_raw = await request.json()
json_post = json.dumps(json_post_raw)
json_post_list = json.loads(json_post)
prompt = json_post_list.get('prompt')
tokens = tokenizer.encode(prompt, add_special_tokens=False)
print("=======================================")
print("=======================================")
print(len(tokens),prompt)
print("=======================================")
print("=======================================")
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d %H:%M:%S")
answer = {
"response": len(tokens),
"status": 200,
"time": time
}
return answer
def parse_args():
parser = argparse.ArgumentParser(description='ChatGLM2-6B Server')
parser.add_argument('--model_name_or_path', type=str, default='THUDM/chatglm2-6b', help='模型id或local path')
parser.add_argument('--checkpoint', type=str, default=None, help='checkpoint类型(None、ptuning、lora)')
parser.add_argument('--checkpoint_path', type=str, default='../../../model/ckpt/chatglm2-6b-qlora-INSv11-rank16-1e-3-30/checkpoint-2000', help='checkpoint路径')
parser.add_argument('--pre_seq_len', type=int, default=128, help='prefix 长度')
parser.add_argument('--quantization_bit', type=int, default=None, help='是否量化')
parser.add_argument('--port', type=int, default=8000, help='端口')
parser.add_argument('--host', type=str, default='0.0.0.0', help='host')
# parser.add_argument('--max_input_length', type=int, default=512, help='instruction + input的最大长度')
# parser.add_argument('--max_output_length', type=int, default=1536, help='output的最大长度')
return parser.parse_args()
if __name__ == '__main__':
cfg=parse_args()
## ----------- load model --------------
from llm.loader import ModelLoader
start = time.time()
if cfg.checkpoint == "lora":
# lora 微调 checkpoint 及模型加载
loader = ModelLoader(cfg.model_name_or_path)
loader.load_lora(cfg.checkpoint_path)
elif cfg.checkpoint == "ptuning":
# ptuning v2 微调 checkpoint 及模型加载
loader = ModelLoader(cfg.model_name_or_path, cfg.pre_seq_len, False)
loader.load_prefix(cfg.checkpoint_path)
else:
loader = ModelLoader(cfg.model_name_or_path)
model,tokenizer = loader.models()
if cfg.quantization_bit is not None:
model = loader.quantize(cfg.quantization_bit)
model.cuda().eval()
uvicorn.run(app, host=cfg.host, port=cfg.port, workers=1)
\ No newline at end of file
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
sys.path.append("../..")
import argparse
from llm.loader import ModelLoader
import uvicorn
import json,os,datetime
from typing import List, Optional, Any
from fastapi import FastAPI, HTTPException, Request, status, BackgroundTasks
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from sse_starlette.sse import EventSourceResponse
tokens = ["token1"]
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*'],
)
class Message(BaseModel):
role: str
content: str
class ChatBody(BaseModel):
messages: List[Message]
model: str
stream: Optional[bool] = False
max_tokens: Optional[int] = 4096
temperature: Optional[float] = 0.9
top_p: Optional[float] = 5
class CompletionBody(BaseModel):
prompt: Any
model: str
stream: Optional[bool] = False
max_tokens: Optional[int] = 4096
temperature: Optional[float] = 0.9
top_p: Optional[float] = 5
class EmbeddingsBody(BaseModel):
# Python 3.8 does not support str | List[str]
input: Any
model: Optional[str]
@app.get("/")
def read_root():
return {"Hello": "World!"}
@app.get("/v1/models")
def get_models():
global model
ret = {"data": [], "object": "list"}
if model:
ret['data'].append({
"created": 1677610602,
"id": "gpt-3.5-turbo",
"object": "model",
"owned_by": "openai",
"permission": [
{
"created": 1680818747,
"id": "modelperm-fTUZTbzFp7uLLTeMSo9ks6oT",
"object": "model_permission",
"allow_create_engine": False,
"allow_sampling": True,
"allow_logprobs": True,
"allow_search_indices": False,
"allow_view": True,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False
}
],
"root": "gpt-3.5-turbo",
"parent": None,
})
return ret
def generate_response(content: str, chat: bool = True):
global model_name
if chat:
return {
"id": "chatcmpl-77PZm95TtxE0oYLRx3cxa6HtIDI7s",
"object": "chat.completion",
"created": 1682000966,
"model": model_name,
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0,
},
"choices": [{
"message": {"role": "assistant", "content": content},
"finish_reason": "stop", "index": 0}
]
}
else:
return {
"id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7",
"object": "text_completion",
"created": 1589478378,
"model": "text-davinci-003",
"choices": [
{
"text": content,
"index": 0,
"logprobs": None,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
}
}
def generate_stream_response_start():
return {
"id": "chatcmpl-77QWpn5cxFi9sVMw56DZReDiGKmcB",
"object": "chat.completion.chunk", "created": 1682004627,
"model": "gpt-3.5-turbo-0301",
"choices": [{"delta": {"role": "assistant"}, "index": 0, "finish_reason": None}]
}
def generate_stream_response(content: str, chat: bool = True):
global model_name
if chat:
return {
"id": "chatcmpl-77QWpn5cxFi9sVMw56DZReDiGKmcB",
"object": "chat.completion.chunk",
"created": 1682004627,
"model": model_name,
"choices": [{"delta": {"content": content}, "index": 0, "finish_reason": None}
]}
else:
return {
"id":"cmpl-7GfnvmcsDmmTVbPHmTBcNqlMtaEVj",
"object":"text_completion",
"created":1684208299,
"choices":[
{
"text": content,
"index": 0,
"logprobs": None,
"finish_reason": None,
}
],
"model": "text-davinci-003"
}
def generate_stream_response_stop(chat: bool = True):
if chat:
return {"id": "chatcmpl-77QWpn5cxFi9sVMw56DZReDiGKmcB",
"object": "chat.completion.chunk", "created": 1682004627,
"model": "gpt-3.5-turbo-0301",
"choices": [{"delta": {}, "index": 0, "finish_reason": "stop"}]
}
else:
return {
"id":"cmpl-7GfnvmcsDmmTVbPHmTBcNqlMtaEVj",
"object":"text_completion",
"created":1684208299,
"choices":[
{"text":"","index":0,"logprobs":None,"finish_reason":"stop"}],
"model":"text-davinci-003",
}
# @app.post("/v1/embeddings")
# async def embeddings(body: EmbeddingsBody, request: Request, background_tasks: BackgroundTasks):
# return do_embeddings(body, request, background_tasks)
# def do_embeddings(body: EmbeddingsBody, request: Request, background_tasks: BackgroundTasks):
# background_tasks.add_task(torch_gc)
# if request.headers.get("Authorization").split(" ")[1] not in context.tokens:
# raise HTTPException(status.HTTP_401_UNAUTHORIZED, "Token is wrong!")
# if not context.embeddings_model:
# raise HTTPException(status.HTTP_404_NOT_FOUND, "Embeddings model not found!")
# embeddings = context.embeddings_model.encode(body.input)
# data = []
# if isinstance(body.input, str):
# data.append({
# "object": "embedding",
# "index": 0,
# "embedding": embeddings.tolist(),
# })
# else:
# for i, embed in enumerate(embeddings):
# data.append({
# "object": "embedding",
# "index": i,
# "embedding": embed.tolist(),
# })
# content = {
# "object": "list",
# "data": data,
# "model": "text-embedding-ada-002-v2",
# "usage": {
# "prompt_tokens": 0,
# "total_tokens": 0
# }
# }
# return JSONResponse(status_code=200, content=content)
# @app.post("/v1/engines/{engine}/embeddings")
# async def engines_embeddings(engine: str, body: EmbeddingsBody, request: Request, background_tasks: BackgroundTasks):
# return do_embeddings(body, request, background_tasks)
def init_model_args(model_args = None):
if model_args is None:
model_args = {}
model_args['temperature'] = model_args['temperature'] if model_args.get('temperature') != None else 0.95
if model_args['temperature'] <= 0:
model_args['temperature'] = 0.1
if model_args['temperature'] > 1:
model_args['temperature'] = 1
model_args['top_p'] = model_args['top_p'] if model_args.get('top_p') else 0.7
model_args['max_tokens'] = model_args['max_tokens'] if model_args.get('max_tokens') != None else 512
return model_args
@app.post("/v1/num_tokens")
async def get_num_tokens(body: CompletionBody, request: Request):
global model, tokenizer,model_name
if request.headers.get("Authorization").split(" ")[1] not in tokens:
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "Token is wrong!")
if not model:
raise HTTPException(status.HTTP_404_NOT_FOUND, "LLM model not found!")
prompt = body.prompt
prompt_tokens = tokenizer.encode(prompt, add_special_tokens=False)
# now = datetime.datetime.now()
# time = now.strftime("%Y-%m-%d %H:%M:%S")
print(prompt,len(prompt_tokens) )
return JSONResponse(content=generate_response(str(len(prompt_tokens)), chat=False))
@app.post("/v1/chat/completions")
async def chat_completions(body: ChatBody, request: Request):
global model, tokenizer,model_name
if request.headers.get("Authorization").split(" ")[1] not in tokens:
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "Token is wrong!")
if not model:
raise HTTPException(status.HTTP_404_NOT_FOUND, "LLM model not found!")
question = body.messages[-1]
if question.role == 'user':
question = question.content
else:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "No Question Found")
history = []
user_question = ''
if model_name == "chatglm3-6b":
for message in body.messages[:-1]:
history.append({"role":message.role, "content":message.content})
# history.extend(body.messages[:-1])
else:
for message in body.messages:
if message.role == 'system':
history.append((message.content, "OK"))
if message.role == 'user':
user_question = message.content
elif message.role == 'assistant':
assistant_answer = message.content
history.append((user_question, assistant_answer))
print(f"question = {question}, history = {history}")
if body.stream:
async def eval_llm():
first = True
model_args = init_model_args({
"temperature": body.temperature,
"top_p": body.top_p,
"max_tokens": body.max_tokens,
})
sends = 0
for response, _ in model.stream_chat(
tokenizer, question, history,
temperature=model_args['temperature'],
top_p=model_args['top_p'],
max_length=max(2048, model_args['max_tokens'])):
ret = response[sends:]
# https://github.com/THUDM/ChatGLM-6B/issues/478
# 修复表情符号的输出问题
if "\uFFFD" == ret[-1:]:
continue
sends = len(response)
if first:
first = False
yield json.dumps(generate_stream_response_start(),
ensure_ascii=False)
yield json.dumps(generate_stream_response(ret), ensure_ascii=False)
yield json.dumps(generate_stream_response_stop(), ensure_ascii=False)
yield "[DONE]"
return EventSourceResponse(eval_llm(), ping=10000)
else:
model_args = init_model_args({
"temperature": body.temperature,
"top_p": body.top_p,
"max_tokens": body.max_tokens,
})
response, _ = model.chat(
tokenizer, question, history,
temperature=model_args['temperature'],
top_p=model_args['top_p'],
max_length=max(2048, model_args['max_tokens']))
return JSONResponse(content=generate_response(response))
@app.post("/v1/completions")
async def completions(body: CompletionBody, request: Request):
print(body)
if request.headers.get("Authorization").split(" ")[1] not in tokens:
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "Token is wrong!")
if not model:
raise HTTPException(status.HTTP_404_NOT_FOUND, "LLM model not found!")
if type(body.prompt) == list:
question = body.prompt[0]
else:
question = body.prompt
print(f"question = {question}")
if body.stream:
async def eval_llm():
model_args = init_model_args({
"temperature": body.temperature,
"top_p": body.top_p,
"max_tokens": body.max_tokens,
})
sends = 0
for response, _ in model.stream_chat(
tokenizer, question, [],
temperature=model_args['temperature'],
top_p=model_args['top_p'],
max_length=max(2048, model_args['max_tokens'])):
ret = response[sends:]
# https://github.com/THUDM/ChatGLM-6B/issues/478
# 修复表情符号的输出问题
if "\uFFFD" == ret[-1:]:
continue
sends = len(response)
yield json.dumps(generate_stream_response(ret, chat=False), ensure_ascii=False)
yield json.dumps(generate_stream_response_stop(chat=False), ensure_ascii=False)
yield "[DONE]"
return EventSourceResponse(eval_llm(), ping=10000)
else:
model_args = init_model_args({
"temperature": body.temperature,
"top_p": body.top_p,
"max_tokens": body.max_tokens,
})
response, _ = model.chat(
tokenizer, question, [],
temperature=model_args['temperature'],
top_p=model_args['top_p'],
max_length=max(2048, model_args['max_tokens']))
print(response)
return JSONResponse(content=generate_response(response, chat=False))
def main():
global model, tokenizer,model_name
parser = argparse.ArgumentParser(
description='Start LLM and Embeddings models as a service.')
parser.add_argument('--model_name_or_path', type=str, help='Choosed LLM model',
default='/model/chatglm3-6b')
parser.add_argument('--device', type=str,
help='Device to run the service, gpu/cpu/mps',
default='gpu')
parser.add_argument('--port', type=int, help='Port number to run the service',
default=8000)
parser.add_argument('--host', type=str, help='host to run the service',
default="0.0.0.0")
parser.add_argument('--checkpoint', type=str, help='model checkpoint to load',
default=None)
parser.add_argument('--checkpoint_path', type=str, help='model checkpoint to load',
default=None)
parser.add_argument('--pre_seq_len', type=int, help='ptuning train pre_seq_len',
default=None)
parser.add_argument('--quantization_bit', type=int, help='quantization_bit 4 or 8, default not set',
default=None)
args = parser.parse_args()
print("> Load config and arguments...")
print(f"Language Model: {args.model_name_or_path}")
print(f"Device: {args.device}")
print(f"Port: {args.port}")
print(f"Host: {args.host}")
print(f"Quantization_bit: {args.quantization_bit}")
print(f"Checkpoint: {args.checkpoint}")
print(f"Checkpoint_path: {args.checkpoint_path}")
model_name = os.path.basename(args.model_name_or_path)
print(model_name)
if args.checkpoint == "lora":
# lora 微调 checkpoint 及模型加载
loader = ModelLoader(args.model_name_or_path)
loader.load_lora(args.checkpoint_path)
elif args.checkpoint == "ptuning":
# ptuning v2 微调 checkpoint 及模型加载
loader = ModelLoader(args.model_name_or_path, args.pre_seq_len, False)
loader.load_prefix(args.checkpoint_path)
else:
loader = ModelLoader(args.model_name_or_path)
model,tokenizer = loader.models()
if args.quantization_bit is not None:
model = loader.quantize(args.quantization_bit)
model.cuda().eval()
uvicorn.run(app, host=args.host, port=args.port, workers=1)
if __name__ == '__main__':
main()
\ No newline at end of file
# coding: utf8
import re
import sys
import time
import pandas as pd
# 所有异常类的基类
class FilterSensitiveWordsException(BaseException):
pass
# AC自动机已经构建完毕时,添加敏感词或再次构建时,引发该异常
class ACAlreadyBuiltException(FilterSensitiveWordsException):
pass
# 添加的敏感词和过滤的字符串必须是unicode的,否则抛出该异常
class InvalidEncodingException(FilterSensitiveWordsException):
pass
# AC自动机尚未构建时,使用它过滤敏感词,引发该异常
class ACNotYetBuiltException(FilterSensitiveWordsException):
pass
# 模式串格式不正确时,抛出该异常
class InvalidPatternException(FilterSensitiveWordsException):
pass
# Trie树节点
class _Node(object):
def __init__(self):
self._subnodes = {}
self._is_end = False
self._fail = None
def set_default(self, character):
if character not in self._subnodes:
self._subnodes[character] = _Node()
return self._subnodes[character]
def get_subnode(self, character):
return self._subnodes.get(character, None)
def mark_as_end(self):
self._is_end = True
@property
def is_end(self):
return self._is_end
def iter_subnodes(self):
for character, subnode in self._subnodes.items():
yield character, subnode
def set_fail(self, node):
self._fail = node
def get_fail(self):
return self._fail
# AC自动机实现
class FilterSensitiveWords(object):
def __init__(self):
self._is_built = False
self._root = _Node()
self._node_to_pattern = {}
def add_sensitive_word(self, sensitive_word, pattern, offsets, total_count):
"""向trie树增加敏感词"""
if self._is_built:
raise ACAlreadyBuiltException
# if not isinstance(sensitive_word, unicode):
# raise InvalidEncodingException
tmp = self._root
for character in sensitive_word:
tmp = tmp.set_default(character)
tmp.mark_as_end()
d = self._node_to_pattern.setdefault(tmp, {})
d[pattern] = {
"offsets": offsets,
"total_count": total_count}
def build(self):
"""生成fail指针"""
if self._is_built:
return
self._is_built = True
# 根节点的fail指针是null
self._root.set_fail(None)
queue = [self._root]
while queue:
node = queue.pop(0)
for character, subnode in node.iter_subnodes():
queue.append(subnode)
# 根节点的孩子节点的fail指针都指向根节点
if node is self._root:
subnode.set_fail(self._root)
continue
# f是node的fail指针指向的节点
f = node.get_fail()
while f is not None:
q = f.get_subnode(character)
if q is not None:
subnode.set_fail(q)
break
f = f.get_fail()
else:
# 指向根节点
subnode.set_fail(self._root)
def _get_output(self, p, ind, matching_patterns, string_length):
outputs = {}
while p is not None:
if not p.is_end:
p = p.get_fail()
continue
for pattern, info in self._node_to_pattern[p].items():
if pattern not in matching_patterns:
matching_patterns[pattern] = [0] * string_length
# 将可能的起始位置都增加1
for offset in info["offsets"]:
for pos in range(offset[0], offset[1]+1):
start = ind - pos + 1
if start >= string_length or start < 0:
continue
matching_patterns[pattern][start] += 1
if matching_patterns[pattern][start] >= info["total_count"]:
outputs.setdefault(start, []).append([pattern, ind])
# 只返回不重叠的匹配
del matching_patterns[pattern]
break
p = p.get_fail()
return outputs
def _merge(self, d1, d2):
for k, v in d2.items():
if k not in d1:
d1[k] = v
continue
d1[k].extend(d2[k])
def filter(self, string):
# if not isinstance(string, unicode):
# raise InvalidEncodingException
if not self._is_built:
raise ACNotYetBuiltException
matching_patterns = {}
string_length = len(string)
tmp = self._root
outputs = {}
for ind, character in enumerate(string):
while tmp is not None:
next = tmp.get_subnode(character)
if next is None:
tmp = tmp.get_fail()
continue
if next.is_end:
self._merge(outputs,
self._get_output(next, ind,
matching_patterns, string_length)
)
tmp = next
break
else:
tmp = self._root
return outputs
class Pattern(object):
_reg_exp = re.compile(r"(?P<splitter>\s*[\.]*\{\s*(?P<first>\d+)\s*\}\s*|"
"\s*[\.]*\{\s*(?P<second>\d+)\s*,\s*(?P<third>\d+)\s*\}\s*)", re.U)
def __init__(self, pattern):
self._pattern = pattern
self._tokens = self._parse_pattern(self.__class__._reg_exp, pattern)
self._result, self._count = self._get_word_position(self._tokens)
def _parse_pattern(self, reg_exp, pattern):
tokens = []
while pattern:
pattern = pattern.strip()
m = reg_exp.search(pattern)
if m is None:
break
first_part, pattern = pattern.split(m.group("splitter"), 1)
if m.group("first"):
first = int(m.group("first"))
repeat = (first, first)
else:
repeat = (int(m.group("second")), int(m.group("third")))
# 对repeat进行检查
if not (0 <= repeat[0] <= repeat[1]):
raise InvalidPatternException
first_part = first_part.strip()
if first_part:
tokens.extend([first_part, repeat])
else:
tokens.extend([repeat])
if pattern:
tokens.append(pattern)
return tokens
def _get_word_position(self, tokens):
index = 0
length = len(tokens)
result = {}
base = 0, 0
count = 0
while index < length:
element = tokens[index]
if isinstance(element, tuple):
if index == length - 1:
break
next_element = tokens[index + 1]
if isinstance(next_element, tuple):
raise InvalidPatternException
next_element_length = len(next_element)
base = base[0] + element[0] + next_element_length, \
base[1] + element[1] + next_element_length
result.setdefault(next_element, []).append(base)
count = count + 1
index = index + 2
continue
base = base[0] + len(element), base[1] + len(element)
result.setdefault(element, []).append(base)
count = count + 1
index = index + 1
return result, count
def iter_words(self):
for word, offsets in self._result.items():
yield word, offsets
def get_pattern(self):
return self._pattern
def get_count(self):
return self._count
def sensitive_word_detection(xls_file,output_text):
input_text = output_text
# 读取敏感词文件
df = pd.read_excel(xls_file, sheet_name='敏感词')
# 获取敏感词列表
# 假设敏感词位于第一列(A列),返回这一列的所有元素
sensitive_words = df.iloc[:, 1].tolist()
# 创建敏感词过滤器对象
ac = FilterSensitiveWords()
# 遍历敏感词列表
for pattern_string in sensitive_words:
# # 打印敏感词
# print(u"\033[31m敏感词:%s\033[0m" % pattern_string)
# 创建敏感词模式对象
pattern = Pattern(pattern_string)
# 遍历敏感词中的每个词及其偏移量
for word, offsets in pattern.iter_words():
# 添加敏感词到过滤器中
ac.add_sensitive_word(word, pattern.get_pattern(),
offsets, pattern.get_count())
# 构建敏感词过滤器
ac.build()
# 记录开始时间
time1 = time.time()
# 使用过滤器对输入文本进行敏感词过滤
results = ac.filter(input_text)
# 定义替换文本
offset = 0
replace_text = "***"
# 定义分割信息字典
split_info = {}
# 遍历过滤结果
for pos, patterns in results.items():
# 找到最大的偏移量
max_offset = -1
for pattern in patterns:
if pattern[1]>max_offset:
max_offset = pattern[1]
# 如果最大偏移量大于0,则记录分割信息
if max_offset >= pos:
split_info[pos] = max_offset
# 对分割信息进行排序
sorted_dict = dict(sorted(split_info.items()))
# 构建处理后的文本
result_text = ""
for pos, max_offset in sorted_dict.items():
result_text += input_text[offset:pos] + replace_text
offset = max_offset+1
result_text += input_text[offset:]
# 打印敏感词检测耗时
print("敏感词检测耗时:", time.time()-time1)
return result_text
if __name__ == "__main__":
def myprint(r):
print(u"\033[33m过滤结果:\033[0m")
for pos, patterns in r.items():
print(u" 位置:%s" % pos)
print(" " + "\n ".join(
[u"%s, %d"%tuple(pattern) for pattern in patterns]
))
ac = FilterSensitiveWords()
for pattern_string in [
u"日{0,3}本",
u"日{0,3}本{0,3}鬼{0,3}子",
u"大{0,3}傻{0,3}叉",
u"狗娘养的"]:
print(u"\033[31m敏感词:%s\033[0m" % pattern_string)
pattern = Pattern(pattern_string)
for word, offsets in pattern.iter_words():
ac.add_sensitive_word(word, pattern.get_pattern(),
offsets, pattern.get_count())
ac.build()
string = u"大家都知道:日(大)本(傻)鬼(叉)子都是狗娘养的"
string = u"日日本本鬼子"
print(u"\033[32m过滤语句:%s\033[0m" % string)
myprint(ac.filter(string))
\ No newline at end of file
from fastapi import FastAPI, HTTPException, Header
from pydantic import BaseModel
import sys
sys.path.append("../..")
from llm.chatglm import ChatGLMSerLLM
from langchain import LLMChain
from langchain.vectorstores.faiss import FAISS
app = FastAPI()
import math
from fastapi.middleware.cors import CORSMiddleware
from scenarios.psbc.model_serve import modelcall_prase, CHATGLM_PROMPT
# 添加 CORS 中间件
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # 允许所有域访问,也可以指定特定域名
allow_credentials=True,
allow_methods=["*"], # 允许所有HTTP方法
allow_headers=["*"], # 允许所有HTTP头
)
model_url = "http://192.168.10.93:8000"
def customize_exception(code: int, message: str, data: dict):
return {
"code":code,
"message":message,
"data":data
}
@app.get("/query")
async def query(query:str=None):
# modelcall_prase没有将url和CHATGLM_PROMPT变量封装进去,在这里需要进行声明。
base_llm=ChatGLMSerLLM(url=model_url)
chose_llm = LLMChain(llm=base_llm, prompt=CHATGLM_PROMPT)
try:
result, title, summary, tags, category, subcategories = modelcall_prase(chose_llm, query)
except Exception as e:
print("模型调用过程或json解析过程出现错误:", e)
return customize_exception(40012, str(e), {})
return customize_exception(200, "success", {
"result": result,
"title": title,
"summary": summary,
"tags": tags,
"category": category,
"subcategories": subcategories
})
@app.get("/tags")
async def tags(tags_native:list, faiss_vectorstore:FAISS, threshold:float=0.7):
score_threshold = (1-threshold) * math.sqrt(2)
tags_new = []
for tag_native in tags_native:
res = faiss_vectorstore.similarity_search_with_score(query=tag_native, score_threshold=score_threshold)
for document_object, _ in res:
tags_new.append(document_object.page_content)
return customize_exception(200, "success", {
"tags_native": tags_native,
"tags_new": tags_new
})
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8888)
# -*- coding: utf-8 -*-
import os, sys
import pandas as pd
sys.path.append("../..")
import gradio as gr
import argparse
from llm.chatglm import ChatGLMSerLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from typing import Awaitable
import asyncio
from langchain.callbacks import AsyncIteratorCallbackHandler
import re
from llm.ernie_with_sdk import ChatERNIESerLLM
from qianfan import ChatCompletion
chatglm3_prompt_tfq = """<|system|>
你是一个可以将一段文本根据其内容生成可以用于考试的判断题,并按照一定格式进行输出的工具,一共输出{num_selector}个问题。具体的输出格式如下所示:
试题1:
题目:。
答案:正确/错误。
解析:用到的资料中的内容。
试题2:
题目:。
答案:正确/错误。
解析:用到的资料中的内容。
...
<|user|>
文本内容如下:
{context}"""
chatglm3_prompt_mcq = """<|system|>
你是一个可以将一段文本根据其内容生成可以用于考试的选择题,并按照一定格式进行输出的工具,一共输出{num_selector}个问题。具体的输出格式如下所示:
试题1:
题目:。
A.选项内容 B.选项内容 C.选项内容 D.选项内容
正确答案:给出具体选项。
试题2:
题目:。
A.选项内容 B.选项内容 C.选项内容 D.选项内容
正确答案:给出具体选项。
...
<|user|>
文本内容如下:
{context}"""
qianfan_prompt_tfq = """请根据下面提供的知识资料,生成可以作为考试的判断题,并给出正确答案,一共输出{num_selector}个问题。按照如下格式进行回答:
试题1:
题目:试题内容。
正确答案:正确/错误。
解析:选择原因(如果用到原资料中的内容,请列出来)
试题2:
题目:试题内容。
正确答案:正确/错误。
解析:选择原因(如果用到原资料中的内容,请列出来)
...
资料内容如下所示:
'''
{context}
'''"""
qianfan_prompt_mcq = """请你根据下面这一段文本的内容生成可以用于考试的选择题,并按照一定格式进行输出,一共输出{num_selector}个问题。具体的输出格式如下所示:
试题1:
题目:
A.选项内容
B.选项内容
C.选项内容
D.选项内容
正确答案:给出具体选项。
试题2:
题目:
A.选项内容
B.选项内容
C.选项内容
D.选项内容
正确答案:给出具体选项。
...
文本内容如下:
'''
{context}
'''"""
CHATGLM3_PROMPT_TFQ = PromptTemplate(input_variables=["context"],template=chatglm3_prompt_tfq)
CHATGLM3_PROMPT_MCQ = PromptTemplate(input_variables=["context"],template=chatglm3_prompt_mcq)
QIANFAN_PROMPT_TFQ = PromptTemplate(input_variables=["context"],template=qianfan_prompt_tfq)
QIANFAN_PROMPT_MCQ = PromptTemplate(input_variables=["context"],template=qianfan_prompt_mcq)
async def async_chat_mcq(input_text, model,num_selector):
# yield gr.DataFrame(pd.DataFrame(), col_count=(4, "fixed"), row_count=(3, "fixed")) ,""
global qianfanchain_mcq
global chatglm3chain_mcq
# Create an asynchronous callback handler
callback = AsyncIteratorCallbackHandler()
# Define an asynchronous function to wrap another asynchronous function and signal completion or exceptions using an event
async def wrap_done(fn: Awaitable, event: asyncio.Event):
try:
await fn # Wait for the provided asynchronous function to complete
except Exception as e:
# TODO: Handle exceptions - here, we simply print the exception information
import traceback
traceback.print_exc()
print(f"Caught exception: {e}")
finally:
event.set() # Set the event to indicate completion
# Create a task to perform message generation with ChatOpenAI and monitor the completion event of the callback handler
if model == "ernie":
task = asyncio.create_task(wrap_done(qianfanchain_mcq.arun({"context":input_text,"num_selector":num_selector},callbacks=[callback]),callback.done))
else:
task = asyncio.create_task(wrap_done(chatglm3chain_mcq.arun({"context":input_text,"num_selector":num_selector},callbacks=[callback]),callback.done))
print("*"*20)
# Iterate asynchronously to obtain tokens from the callback handler
text=""
async for token in callback.aiter():
text=text+token
yield f"{text}"
# yield out_text,dataframe,"",gr.Button(visible=False)
await task # Wait for the task to complete
async def async_chat_tfq(input_text, model,num_selector):
# yield gr.DataFrame(pd.DataFrame(), col_count=(4, "fixed"), row_count=(3, "fixed")) ,""
global qianfanchain_tfq
global chatglm3chain_tfq
# Create an asynchronous callback handler
callback = AsyncIteratorCallbackHandler()
# Define an asynchronous function to wrap another asynchronous function and signal completion or exceptions using an event
async def wrap_done(fn: Awaitable, event: asyncio.Event):
try:
await fn # Wait for the provided asynchronous function to complete
except Exception as e:
# TODO: Handle exceptions - here, we simply print the exception information
import traceback
traceback.print_exc()
print(f"Caught exception: {e}")
finally:
event.set() # Set the event to indicate completion
# Create a task to perform message generation with ChatOpenAI and monitor the completion event of the callback handler
if model == "ernie":
task = asyncio.create_task(wrap_done(qianfanchain_tfq.arun({"context":input_text,"num_selector":num_selector},callbacks=[callback]),callback.done))
else:
task = asyncio.create_task(wrap_done(chatglm3chain_tfq.arun({"context":input_text,"num_selector":num_selector},callbacks=[callback]),callback.done))
print("*"*20)
# Iterate asynchronously to obtain tokens from the callback handler
text=""
async for token in callback.aiter():
text=text+token
yield f"{text}"
await task # Wait for the task to complete
def on_select(evt: gr.SelectData, df):
print(f"You selected {evt.value} at {evt.index} from {evt.target}")
if evt.value == "删除":
df.drop(df.index[evt.index[0]],axis=0,inplace=True)
return gr.DataFrame(df,interactive=True,column_widths=["30%", "60%","10%"])
else:
return df
def parse_mcq(output_text):
questions = []
question_texts = output_text.split('\n\n')
for question_text in question_texts:
lines = question_text.split('\n')
question = lines[1]
options = lines[2:-1]
correct_answer = lines[-1].split(":")[1]
options = [option.replace(". ",".") for option in options]
question_dict = {
'question': question.split(':')[1],
'options': [option for option in options],
'correct_answer': correct_answer,
}
questions.append(question_dict)
lenth = len(questions)
df = pd.DataFrame({
"问题": [question['question'] for question in questions],
"选项": [question['options'] for question in questions],
"正确答案": [question['correct_answer'] for question in questions],
"删除": ["删除" for _ in questions],
})
return gr.DataFrame(df, row_count=(lenth, "fixed"))
def parse_tfq(output_text):
questions = []
question_texts = output_text.strip().split('\n\n')
for question_text in question_texts:
lines = question_text.split('\n')
question = lines[1].split(':')[1]
correct_answer = lines[2].split(':')[1]
explanation = lines[-1].split(':')[1]
question_dict = {
'question': question,
'correct_answer': correct_answer,
'explanation': explanation
}
questions.append(question_dict)
lenth = len(questions)
df = pd.DataFrame({
"问题": [question['question'] for question in questions],
"答案": [question['correct_answer'] for question in questions],
"解析": [question['explanation'] for question in questions],
"删除": ["删除" for _ in questions]
})
return gr.DataFrame(df, row_count=(lenth, "fixed"))
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">辅助生成培训资料</h1>""")
# with gr.Row():
# input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10)
with gr.Row():
input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10, scale=9)
model_selector = gr.Dropdown(choices=["ernie","chatglm3"], label="请选择一个模型", scale=1, min_width=50, value="chatglm3")
with gr.Row():
num_selector = gr.Slider(minimum=0, maximum=10, value=5, label="请选择问题数量",step=1)
with gr.Row():
mcqBtn = gr.Button("生成选择题培训资料")
tfqBtn = gr.Button("生成判断题培训资料")
dataframe = gr.DataFrame(visible=True,interactive=True,column_widths=["30%", "40%", "20%", "10%"],col_count=(4, "fixed"), row_count=(1, "fixed"))
dataframe.select(on_select, inputs=[dataframe], outputs=[dataframe])
gr.Markdown("""---""")
output_text = gr.Textbox(show_label=True, placeholder="输出...", lines=10)
# clearBtn = gr.Button("清除")
# clearBtn.click(clear, [], [dataframe, output_text])
mcqBtn.click(
async_chat_mcq, [input_text, model_selector,num_selector], [output_text], queue=True
).then(
parse_mcq,[output_text], [dataframe], queue=False
)
tfqBtn.click(
async_chat_tfq, [input_text, model_selector,num_selector], [output_text], queue=True
).then(
parse_tfq,[output_text], [dataframe], queue=False
)
if __name__ == "__main__":
parese = argparse.ArgumentParser()
parese.add_argument("--port", type=int, default=7658)
parese.add_argument("--host", type=str, default="192.168.0.66")
parese.add_argument("--base_llm_url", type=str, default="http://192.168.22.106:8003")
args = parese.parse_args()
global base_llm_url, qianfanchain_mcq, qianfanchain_tfq, chatglm3chain_mcq, chatglm3chain_tfq
base_llm_url=os.environ.get('LLM_URL_BASE', None)
if not base_llm_url:
base_llm_url=args.base_llm_url
base_llm1=ChatGLMSerLLM(url=base_llm_url)
base_llm2=ChatERNIESerLLM(chat_completion=ChatCompletion(ak="pT7sV1smp4AeDl0LjyZuHBV9", sk="b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu"))
qianfanchain_mcq = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_MCQ,llm_kwargs={"temperature":0.9})
qianfanchain_tfq = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_TFQ,llm_kwargs={"temperature":0.9})
chatglm3chain_mcq = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_MCQ,llm_kwargs={"temperature":0.9})
chatglm3chain_tfq = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_TFQ,llm_kwargs={"temperature":0.9})
demo.queue().launch(share=False, inbrowser=True,server_name=args.host,server_port=args.port)
# -*- coding: utf-8 -*-
import os, sys
sys.path.append("../..")
from langchain.chains import LLMChain
from llm.ernie_with_sdk import ChatERNIESerLLM
from qianfan import ChatCompletion
from langchain.prompts import PromptTemplate
import gradio as gr
from llm.chatglm import ChatGLMSerLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
)
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
embedding_path = "C:\\Users\\15663\\AI\\models\\bge-large-zh-v1.5"
embedding = HuggingFaceEmbeddings(model_name=embedding_path)
qianfan_prompt_stc1 = """'''
{context}
'''
请为上述文本取一个标题(除标题之外不可输出其他内容)。"""
QIANFAN_PROMPT_STC1 = PromptTemplate(input_variables=["context"], template=qianfan_prompt_stc1)
qianfan_prompt_stc2 = """'''
{context}
'''
请为上述同级标题生成一个高级标题(除标题之外不可以输出其他内容)"""
QIANFAN_PROMPT_STC2 = PromptTemplate(input_variables=["context"], template=qianfan_prompt_stc2)
chatglm_prompt_stc1 = """{context}
请为上述文本取一个标题(除标题之外不可输出其他内容)。"""
CHATGLM_PROMPT_STC1 = PromptTemplate(input_variables=["context"], template=chatglm_prompt_stc1)
chatglm_prompt_stc2 = """'''
{context}
'''
请为上述同级标题生成一个高级标题(除标题之外不可以输出其他内容)"""
CHATGLM_PROMPT_STC2 = PromptTemplate(input_variables=["context"], template=chatglm_prompt_stc2)
def get_para_list(input_text):
o_para_list = input_text.split("\n")
para_list = []
for index,item in enumerate(o_para_list):
if len(item.strip()) == 0:
if len(para_list) == 0:
para_list.append(item+'\n')
else:
para_list[-1] = para_list[-1] + '\n' + item
elif len(item) <= 20:
if index+1 < len(o_para_list):
o_para_list[index+1] = item+"\n"+o_para_list[index+1]
continue
else:
para_list.append(item)
else:
para_list.append(item)
if len(para_list) >= 2 and all(char.isspace() for char in para_list[0]):
tmp_str = para_list[0]
para_list = para_list[1:]
para_list[0] = tmp_str+para_list[0]
# print(para_list)
# print(len(para_list))
return para_list
def split_plus1(para_list,split_threshold,split_threshold_para):
print("-------------------------------------------------------------------------------------------------------------------")
para_split = []
para_info = []
index_spilt = []
cur_para = para_list[0]
cur_index = 0
cur_para_info = []
cur_para_vec = embedding.embed_query(cur_para)
cur_para_vec = np.array(cur_para_vec).reshape(1,-1)
last_centence_vec = cur_para_vec
for index in range(1,len(para_list)):
vec = embedding.embed_query(para_list[index])
vec = np.array(vec)
vec = vec.reshape(1,-1)
similarity = cosine_similarity(cur_para_vec,vec)
similarity2 = cosine_similarity(last_centence_vec,vec)
if similarity > split_threshold_para or similarity2 > split_threshold:
cur_para_info.append({"offset":len(cur_para)+1,"similarity":[similarity[0][0],similarity2[0][0]]})
cur_para += "\n" + para_list[index]
cur_para_vec = embedding.embed_query(cur_para)
cur_para_vec = np.array(cur_para_vec).reshape(1,-1)
else:
para_split.append(cur_para)
para_info.append({"para_info":cur_para_info,"para_vec":cur_para_vec})
index_spilt.append(cur_index)
cur_para = para_list[index]
cur_index = index
cur_para_info=[{"offset":0,"similarity":[similarity[0][0],similarity2[0][0]]}]
cur_para_vec = vec
last_centence_vec = vec
if len(cur_para)>0:
para_split.append(cur_para)
para_info.append({"para_info":cur_para_info,"para_vec":cur_para_vec})
index_spilt.append(cur_index)
# print(para_split)
return para_split, para_info, index_spilt
def split_plus2(para_split, para_info, marge_threshold_para=0):
result = []
result_simple = []
index_spilt = []
def insert_info(para,info):
for i in range(len(info)-1,-1,-1):
para = para[:info[i]["offset"]] + "**与上段落相似 {:.5} 与上句相似 {:.5}**\n".format(info[i]["similarity"][0],info[i]["similarity"][1]) +para[info[i]["offset"]:]
return para
cache_para = ""
cache_para_simple = ""
last_para_vec = None
cur_index = 0
for index in range(len(para_split)):
if len(para_info[index]["para_info"])>1:
result.append(cache_para+insert_info(para_split[index],para_info[index]["para_info"]))
result_simple.append(cache_para_simple+para_split[index])
index_spilt.append(cur_index)
# print(para_split[index],"-=-=-=")
# print(index,"->",cur_index)
cur_index = index+1
# print(index, "insert_index1=======================================")
cache_para = ""
cache_para_simple = ""
last_para_vec = np.array(embedding.embed_query(result_simple[-1])).reshape(1,-1)
else:
if index == len(para_split)-1:
if len(result)>0:
result[-1] += "\n"+cache_para+insert_info(para_split[index],para_info[index]["para_info"])
result_simple[-1] += "\n"+cache_para_simple+para_split[index]
cur_index = index+1
else:
result.append(cache_para+insert_info(para_split[index],para_info[index]["para_info"]))
result_simple.append(cache_para_simple+para_split[index])
index_spilt.append(cur_index)
cur_index = index+1
cache_para = ""
cache_para_simple = ""
last_para_vec = np.array(embedding.embed_query(result_simple[-1])).reshape(1,-1)
else:
if index == 0:
cache_para += insert_info(para_split[index],para_info[index]["para_info"]) +"\n"
cache_para_simple += para_split[index]+ "\n"
else:
if last_para_vec is not None:
last_similarity = cosine_similarity(para_info[index]["para_vec"],last_para_vec)[0][0]
last_para_vec = None
else:
last_similarity = para_info[index]["para_info"][0]["similarity"][0]
next_similarity = cosine_similarity(para_info[index]["para_vec"],para_info[index+1]["para_vec"])
if last_similarity > marge_threshold_para or next_similarity > marge_threshold_para or len(para_split[index]) < 50:
if last_similarity > next_similarity and len(result)>0:
result[-1] += "\n"+cache_para+insert_info(para_split[index],para_info[index]["para_info"])
result_simple[-1] += "\n"+cache_para_simple+para_split[index]
cur_index = index+1
cache_para = ""
cache_para_simple = ""
last_para_vec = np.array(embedding.embed_query(result_simple[-1])).reshape(1,-1)
else:
cache_para += insert_info(para_split[index],para_info[index]["para_info"])+"\n"
cache_para_simple += para_split[index]+ "\n"
last_para_vec = np.array(embedding.embed_query(cache_para)).reshape(1,-1)
else:
result.append(cache_para+insert_info(para_split[index],para_info[index]["para_info"]))
result_simple.append(cache_para_simple+para_split[index])
index_spilt.append(cur_index)
cur_index = index+1
last_para_vec = np.array(embedding.embed_query(result_simple[-1])).reshape(1,-1)
cache_para = ""
cache_para_simple = ""
if len(cache_para)>0:
result.append(cache_para)
result_simple.append(cache_para_simple)
index_spilt.append(cur_index)
return result,result_simple,index_spilt
paragraph1 = """区域性优惠贸易协定项下出口货物原产地证明书(以下简称优惠原产地证)是有区域 性优惠贸易协定国家官方机构签发的享受成员国关税互惠减免待遇的官方凭证,是企业出口 产品通向国际市场的“金钥匙”和“有价证券”。凭借优惠原产地证,企业出口产品可以享 受优惠关税甚至零关税的待遇,从而有效降低产品出口成本,提高产品的国际竞争力。
自加入WTO 至今,我国已同有关国家和地区签署了如下的区域性优惠贸易协定,包括 《内地与港澳更紧密经贸关系安排 (CEPA)》 《亚太贸易协定》《中国一东盟自贸协定》《中 国一巴基斯坦自贸协定》 《中国一智利自贸协定》 《中国一新西兰自贸协定》 《中国一新加 坡自贸协定》 《中国一哥斯达黎加自贸协定》 《中国一秘鲁自贸协定》 《中国一瑞士自贸协 定》《中国—冰岛自贸协定》 《中国一韩国自贸协定》 《中国一澳大利亚自贸协定》海峡两 岸经济合作框架协议 (ECFA) 等,还有更多的自由贸易区正在谈判或研究之中。
目前贸促会受理以下地区的优惠原产地证 《亚太贸易协定》目的国:印度、韩国、孟 加拉、斯里兰卡、老挝 《中国一新西兰自由贸易协定》目的国:新西兰; 《中国一新加坡 自由贸易协定》目的国:新加坡 《中国一秘鲁自由贸易协定》目的国:秘鲁;海峡两岸经 济合作框架协议 (ECFA) 目的地:中国台湾 《中国一瑞士自由贸易协定》目的国:瑞士; 《中国 —冰岛自由贸易协定》目的国:冰岛 《中国一韩国自由贸易协定》目的国:韩国; 《中国一澳大利亚自由贸易协定》目的国:澳大利亚。
FORM B证书是根据《亚太贸易协定》 (FIRST AGREEMENT ON TRADE NEGOTIATIONS AMONG DEVELOPING MEMBER COUNTRIES OF THE ECONOMIC AND SOCIAL COMMISSION FOR ASIA AND THE PACIFIC) 原产地规则的要求签发的前身为《曼谷协定》,是在签订协定 的成员国之间就特定产品享受互惠减免关税待遇(跟非互惠的FORM A证书不同) 的官方 原产地证明文件。
FORM B证书的签订依据为《亚太贸易协定》原产地规则和《亚太贸易协定原产地证 书签发和核查程序》。
可签发FORM B 证书的国家为:中国、印度、斯里兰卡、孟加拉国、老挝和韩国(《亚 太贸易协定》成员国)。
如货物为完全原产,填写 “A”。如货物含进口成分,非国产价值成分<55%,填写字母 “B”加原产于非成员国或原产地不明的材料、部件或产品的总货值占出口产品离岸价的百分 比,例如( ‘B”40%)。 如货物含进口成分,国产及成员国累计价值成分≥60%,填写 “C” 加原产于成员国的累计含量的总值与出口产品离岸价的百分比,例如( “℃”65%)。符合特 定原产地标准的产品,填写字母 “D”(该项主要针对不发达国家出口申报的产品)。
注意《亚太贸易协定》原产地证书申请时间不得超过货物出运后3个工作日。
FORM E证书是根据《中华人民共和国与东南亚国家联盟全面经济合作框架协议》的要 求签发的、在签订协定的成员国之间就特定产品享受互惠减免关税待遇的(跟非互惠的 FORM A证书不同)官方原产地证明文件。
FORME 证书的签订依据为《中国一东盟自由贸易区原产地规则》和《中国一东盟自170. 国际贸易单证实务由贸易区原产地规则签证操作程序》。
可签发FORME 证书的国家为:中国、老挝、越南、泰国、缅甸、柬埔寨、菲律宾、文 莱、印度尼西亚、马来西亚和新加坡(东盟成员国)。
如货物完全原产,填写“×”。如货物含进口成分,其国产价值成分≥40%,填写国产 价值的百分比,例如 “45%”。如货物含进口成分,中国一东盟自贸区累计价值成分≥ 40%,填写该累计价值的百分比,例如 “45%”。
注意:证书为一正三副, 一三联客户,二联商检留存,四联企业留存。
FORMP 证书是根据《中华人民共和国政府与巴基斯坦伊斯兰共和国政府关于自由贸易 协定早期收获计划的协议》(简称《早期收获协议》)及其项下《中国一巴基斯坦自由贸易 区原产地规则》的要求签发的在中国和巴基斯坦之间就特定产品享受互惠减免关税待遇的 (跟非互惠的FORMA 证书不同)官方原产地证明文件。
FORMP 证书的签订依据为《中国一巴基斯坦自由贸易区原产地规则》和《中国一巴基 斯坦自由贸易区原产地规则签证操作程序》。
可签发FORMP 证书的国家为中国和巴基斯坦。中国产品出口到巴基斯坦,中国出口商 向各地出入境检验检疫机构申请签发FORMP 证书,巴基斯坦给予FORM P证书项下货物关 税优惠待遇;巴基斯坦产品出口到中国,巴基斯坦出口商向巴基斯坦有关部门申请签发 FORMP 证书,中国给予FORMP 证书项下货物关税优惠待遇。这是互惠的。
如货物为完全原产,填写“P”; 如货物含进口成分,国产价值成分≥45%,填写国产价 值的百分比,例如 "40%";如货物含进口成分,中国一巴基斯坦自贸区累计价值成分≥ 40%,填写该累计价值的百分比,例如 “45%”;产品符合特定原产地标准,填写 “PSR”。
注意:出运后15日内办理。
FORMF 证书是根据《中国一智利自由贸易协定》及其项下《中国一智利自贸区原产地 规则》的要求签发的,在中国和智利之间就特定产品享受互惠减免关税待遇的(跟非互惠 的 FORM A证书不同)官方原产地证明文件。
FORMF 证书的签订依据为《中国一智利自贸区原产地规则》和《中国一智利自由贸易 区原产地规则签证操作程序》。
可签发FORMF 证书的国家为中国和智利。中国产品出口到智利,中国出口商向各地出 入境检验检疫机构申请签发FORMF 证书,智利给予FORMF 证书项下货物关税优惠待遇; 智利产品出口到中国,智利出口商向智利有关部门申请签发FORMF 证书,中国给予FORM F 证书项下货物关税优惠待遇。这是互惠的。
如货物为完全原产,填写 “P”; 如货物为含进口成分,区域价值成分≥40%,填写 “RVC"; 产品符合特定原产地标准,填写 “PSR”并附《中智自由贸易区产品特定原产地 标准》(简称 “PSR”清单)。
注意:证书的申办时间应在货物出口前或出口后的30天内;货物出口30天后,签证机 构不再接受证书的签发申请。
FORM N 证书是根据《中华人民共和国政府和新西兰政府自由贸易协定》和《中华人第五章 其他出口结汇单证.171.民共和国政府和新西兰政府自由贸易协定项下进出口货物原产地管理办法》的要求签发的、 在中国和新西兰之间就特定产品享受互惠减免关税待遇的(跟非互惠的FORM A证书不同) 官方原产地证明文件。《中华人民共和国政府和新西兰政府自由贸易协定》于2008年4月7 日正式签署,这是中国与发达国家签署的第一个自由贸易协定。
可签发FORM N 证书的国家为中国和新西兰。中国产品出口到新西兰,中国出口商向 各地出入境检验检疫机构申请签发FORM N 证书,新西兰给予FORM N证书项下货物关税 优惠待遇;新西兰产品出口到中国,新西兰出口商向新西兰有关部门申请签发 FORM N 证 书,中国给予FORM N证书项下货物关税优惠待遇。这是互惠的。
如货物为完全原产,填写 “WO”; 如货物含有进口成分,但完全由已经取得原产资格 的材料或部件生产,填写 “WP”; 产品符合特定原产地标准,填写 “PSR”, 有区域价值成 分要求的,应注明百分比,例如 “PSR”60%
注意:证书申办时间:应在货物出口前或当天申请办理,中国一新西兰证书不办理后 发,不倒签。
FORMX 证书是根据 《中华人民共和国政府和新加坡共和国政府自由贸易协定》和 《中华人民共和国政府和新加坡共和国政府自由贸易协定项下进出口货物原产地管理办法》 的要求签发的,在中国和新加坡之间就特定产品享受互惠减免关税待遇的(跟非互惠的 FORM A证书不同)官方原产地证明文件。
FORM X 证书的签订依据为《中国一新加坡自贸协定原产地规则》及其相关的原产地签 证操作程序。
可签发FORM X 证书的国家为中国和新加坡。中国产品出口到新加坡,中国出口商向 各地出入境检验检疫机构申请签发 FORM X 证书,新加坡给予 FORM X 证书项下货物关税 优惠待遇;新加坡产品出口到中国,新加坡出口商向新加坡有关部门申请签发 FORM X 证 书,中国给予FORM X证书项下货物关税优惠待遇。这是互惠的。
中国一新加坡自贸区原产地规则规定:在出口方完全获得的产品,填写 “P”; 区域价 值成分≥40%的产品,填写 “RVC"; 符合产品特定原产地规则的产品,填写 “PSR”。
注意:应在提单日期前申报,不办理后发证书。
证书英文名称: Certificate of Origin Form for China-Peru FTA。
中国一秘鲁FTA 证书是根据《中国一秘鲁自由贸易协定》及其项下《中国一秘鲁自贸 区原产地规则》的要求签发的、在中国和秘鲁之间就特定产品享受互惠减免关税待遇的 (跟非互惠的FORM A证书不同)官方原产地证明文件。
中国一秘鲁FTA 证书的签订依据为《中国一秘鲁自贸区原产地规则》及与原产地相关 的签证操作程序。
可签发中国一秘鲁 FTA 证书的国家为中国和秘鲁。中国产品出口到秘鲁,中国出口商 向各地出入境检验检疫机构申请签发中国一秘鲁 FTA 证书,秘鲁给予中国一秘鲁FTA 证书 项下货物关税优惠待遇;秘鲁产品出口到中国,秘鲁出口商向秘鲁有关部门申请签发中国一 秘鲁FTA 证书,中国给予中国一秘鲁FTA 证书项下货物关税优惠待遇。这是互惠的。
FORML 证书是根据《中国一哥斯达黎加自由贸易协定》及其项下 《中国一哥斯达黎加国际贸易单证实务自贸区原产地规则》的要求签发的、在中国和哥斯达黎加之间就特定产品享受互惠减免关 税待遇的(跟非互惠的FORM A证书不同)官方原产地证明文件。
FORML 证书的签订依据为《中国一哥斯达黎加自贸区原产地规则》及与原产地相关的 签证操作程序。
可签发FORML 证书的国家为中国和哥斯达黎加。中国产品出口到哥斯达黎加,中国出 口商向各地出入境检验检疫机构申请签发 FORML 证书,哥斯达黎加给予FORM L证书项下 货物关税优惠待遇;哥斯达黎加产品出口到中国,哥斯达黎加出口商向哥斯达黎加有关部门 申请签发 FORML 证书,中国给予FORML 证书项下货物关税优惠待遇。这是互惠的。
FORMS 证书是根据《中华人民共和国和瑞士联邦自由贸易协定》及其相关规定的要求 签发的、在中国和瑞士之间就特定产品享受互惠减免关税待遇的(跟非互惠的 FORM A 证 书不同)官方原产地证明文件。
FORMS 证书的签订依据为《中国一瑞士自由贸易区原产地规则》及其相关的原产地签 证操作程序。《中华人民共和国和瑞士联邦自由贸易协定》于2014年7月1日起施行。
可签发FORMS 证书的国家为中国和瑞士。中国产品出口到瑞士,中国出口商向各地出 入境检验检疫机构申请签发FORMS 证书,瑞士给予FORMS 证书项下货物关税优惠待遇; 瑞士产品出口到中国,瑞士出口商向瑞士有关部门申请签发FORMS 证书,中国给予FORM S 证书项下货物关税优惠待遇。这是互惠的。
《海峡两岸经济合作框架协议》 (ECFA) 是台湾与大陆自2009年年中开始,经过多次 商谈达成的一项重要协议,于2010年6月29日签署,其项下货物贸易早期收获清单于2011 年 1 月 1 日起付诸实施,出口到台湾的货物将获得关税减免的优惠。
列入清单的约800项产品将逐步降关税,三年内全部降为零,包括大陆对台湾开放的产 品500多项,台湾批准大陆的产品5大类267项,含石化类、机械类、纺织类、运输类等产品。海峡两岸经济合作框架协议(英文为 ECONOMIC COOPERATION FRAMEWORKAGREEMENT,ECFA; 台湾方面的繁体版本称为海峡两岸经济合作架构协议),原称为两岸 综合性经济合作协定或称两岸综合经济合作协定(英文简称 CECA, 即 COMPREHENSIVEECONOMIC COOPERATION AGREEMENT)。
中国出口到韩国的原产地证, 一般叫作韩国原产地证,英文名为 CERTIFICATE OF ORIGIN FORM FOR CHINA-KOREA FTA, 也可叫作韩国FTA 产地证FORM K, 简称中韩原 产地证,是中国出口到韩国需要办理的原产地证之一。具体是指货物经中国出口至韩国时需 要向中国国际贸易促进委员会或中国出入境检验检疫局申请签发办理的一种用来证明所出口 的货物原产地或产品制造地为中国的证明文书。中韩 FTA 原产地证于2015年12月20日正 式生效并实施第一次降税,并于2016年1月1日实施第二次降税。而自2005年12月20日 起,凡是货物出口至韩国的出口企业均可向中国各地的出入境检验检疫机构申请签发中韩自 贸协定原产地证书;且只要是出口商在货物出口韩国时,向进口国海关出示由中国出入境检 验检疫局所办理的韩国原产地,其所随附原产地证书的出口货物将按照自贸协定可在韩国享 受优惠关税待遇。目前,货物经中国出口至韩国所需要办理的产地证有以下三种:第五章 其他出口结汇单证173(1) 一般原产地证CO。 从中国出口到韩国的货物在办理产地证时也可以选择办理一般 原产地证CO, 一般原产地证 CO 是全世界任何国家和地区均可以办理的一种原产地证书。 因此,货物出口韩国也可以选择办理一般原产地证CO, 该原产地证是产地证中的一种最基 础、最原始、最原籍的产地证书。但一般原产地证CO 只能作为货物的清关文件使用,相当 于一张货物的“入门票”,是不享有韩国的关税优惠减免待遇的,所以,在办理中国出口到 韩国的原产地证时,最好不要选择办理这种原产地证,但若是国外客户要求办理该产地证, 那么就具体地根据客户的要求来。
(2)亚太原产地证FORM B。亚太原产地证是货物从中国出口到韩国时可以选择办理的 另外一种原产地证。由于韩国也是亚太地区成员国之一,因此货物从中国出口到韩国,选择 办理亚太原产地证也是一种较好的选择。该原产地证相对于一般原产地证 CO 来说,主要的 优势在于能够享受到进口目标国的关税优惠待遇,且也使货物能够进入韩国这个国家。
(3)韩国原产地证。韩国原产地证是一种区域性优惠原产地证,是中国近期与韩国签 订的一种外贸合作协定,简称中韩自贸协定,是一种专门针对韩国签发的原产地证书,也是 货物从中国出口到韩国时首选的原产地证。韩国原产地证不仅可以使得货物顺利清关,还能 享受到比亚太原产地证所规定的关税优惠更多的优惠待遇。因此,凡是货物出口至韩国时, 最好首先考虑选择办理韩国原产地证,其次才是亚太原产地证。
中国一韩国原产地证明书的签发,限于已公布的《货物贸易协定》项下给予关税优惠 的产品,这些产品必须符合《中国一韩国自由贸易区原产地规则》。
中国一澳大利亚原产地证,全称为《中国一澳大利亚自贸区》优惠原产地证,英文名称 为 CERTIFICATE OF ORIGIN FORM FOR CHINA-AUSTRALIA FREE TRADE AGREEMENT, 简 称中澳原产地证或 FORM AU原产地证。中澳原产地证是根据《中国一澳大利亚自由贸易协 定》签发的、就中澳两国之间互相给予关税减免待遇的官方证明文件。
FORM AU证书签订依据为《中国一澳大利亚原产地证规则》及其签证操作程序。 《中 国一澳大利亚自贸区》优惠原产地证采用专用证书格式, 一正一副,正本为深棕色,印有 钮索图案底纹,副本为白色。
签证产品 《中国 一澳大利亚原产地证明书》的签发,限于已公布的《货物贸易协定》 项下给予关税优惠的产品,这些产品必须符合《中国一澳大利亚自由贸易区原产地规则》。
请注意,不同种类的产地证出证要求不尽相同,不同的优惠原产地证可能有不同的特定 要求,如果不符合要求,就无法享受关税减免等优惠,所以一定要重视。"""
paragraph2 = """家,这个字眼在我们心中总是充满着无尽的温馨与和谐。它是我们心灵的港湾,是我们情感的归宿,是我们生活的起点与终点。在这个小小的世界里,我们与亲人共度时光,分享欢笑与泪水,品味生活的酸甜苦辣。
在这个家中,有着一位慈爱的父亲。他像一座坚实的山,为家庭提供着坚实的支撑。他每天早出晚归,辛勤工作,为了家人的幸福而努力拼搏。尽管他很少言语,但他的每一个眼神、每一个动作都透露出对家人的深深关爱。每当我们遇到困难时,他总是第一个站出来,为我们遮风挡雨,让我们感受到家的温暖与力量。
而母亲则是家中的灵魂,她用无私的爱和关怀,温暖着每一个人的心。她勤劳能干,把家里打理得井井有条。无论是可口的饭菜,还是整洁的居室,都凝聚着她的辛勤付出。她总是在我们最需要的时候,给予我们最温暖的拥抱和最贴心的安慰。她的笑容如同阳光般灿烂,照亮了我们前行的道路,让我们在人生的旅途中不再迷茫。
家中的孩子们则是这个温馨画面中最活泼的元素。他们天真无邪,充满好奇,给家带来了无尽的欢乐与生机。他们一起学习、一起玩耍,分享着彼此的喜怒哀乐。在成长的道路上,他们相互扶持,共同进步,成为了彼此最亲密的伙伴。他们是家中的希望,是未来的栋梁,他们的笑声和成长,是家人最大的骄傲与欣慰。
在这个家中,每一个节日都充满了欢声笑语。春节时,全家人围坐在一起,吃着团圆饭,看着春晚,享受着团圆的喜悦;中秋节时,一家人赏月品茶,讲述着古老的传说,感受着传统文化的魅力;生日时,家人为寿星送上祝福,共同庆祝这个特殊的日子。这些美好的瞬间,成为了家人心中最珍贵的回忆。
家中的每一个角落都弥漫着温馨的气息。客厅里的沙发见证了无数次的亲密交谈,书房里的书架记录着家人们共度的阅读时光,厨房里的锅碗瓢盆诉说着母亲辛勤的汗水,卧室里的床铺则承载着家人甜美的梦境。这些看似平凡的物品,却蕴含着家人之间深厚的情感与羁绊。
在这个家中,我们学会了爱与被爱,学会了宽容与理解,学会了分享与付出。我们在这里学会了如何面对生活的挫折与困难,如何珍惜眼前的幸福与美好。这个家让我们明白,无论外面的世界如何喧嚣与复杂,家永远是我们最温暖的避风港。
岁月如梭,时光荏苒。转眼间,这个家已经陪伴我们走过了无数个春夏秋冬。在这个过程中,家人们共同成长、共同进步,一起见证了彼此的成长与变化。我们深知,这个家是我们生命中最宝贵的财富,是我们永远无法割舍的情感纽带。
未来,我们将继续携手前行,共同守护这个温馨和睦的家。无论前方的道路多么崎岖与坎坷,我们都会紧紧相依、相互扶持,一起迎接每一个挑战与机遇。我们相信,在这个充满爱与希望的家庭里,我们将创造出更加美好的明天。
在这个温馨的家中,我们感受到了生活的美好与真谛。家不仅仅是一个住所,更是一个情感的寄托、一个心灵的归宿。在这里,我们学会了感恩、学会了珍惜、学会了付出与收获。这个家让我们明白,生活中最重要的不是金钱与地位,而是那份陪伴与关爱。
让我们共同珍惜这个温馨和睦的家,让它永远充满欢声笑语与爱的力量。在这个小小的世界里,我们将共同书写属于我们的幸福篇章,留下永恒的回忆与感动。
时光荏苒,岁月如梭。转眼间,又是一年春暖花开的季节。在这个充满生机与活力的季节里,家中的每一个角落都洋溢着幸福与温馨的气息。
清晨,当第一缕阳光洒进窗户,家人们便开始了新的一天。父亲早已起床,开始了忙碌的工作;母亲则在厨房里忙碌着,为家人准备丰盛的早餐。孩子们则还在甜美的梦乡中,享受着无忧无虑的童年时光。
随着时光的推移,家中的氛围也愈发浓厚。午饭后,家人们聚在一起,或看电视、或聊天、或打牌,享受着难得的闲暇时光。孩子们则在院子里追逐嬉戏,他们的欢笑声和呼喊声此起彼伏,为家中增添了一抹生动的色彩。
傍晚时分,当夕阳的余晖洒满大地,家人们便开始了晚餐的准备工作。母亲在厨房里忙碌着,烹饪出一道道美味佳肴;父亲则帮忙摆放餐具、倒酒倒水;孩子们则迫不及待地围坐在餐桌旁,期待着美味的晚餐。在这个时刻,家中的每一个角落都弥漫着幸福与温馨的气息。
晚餐过后,家人们便开始了各自的娱乐活动。有的看电视、有的看书、有的上网冲浪;孩子们则在房间里做作业、玩游戏或者和父母一起聊天。在这个时刻,家中的每一个角落都充满了欢声笑语和爱的力量。
夜晚降临,当月光洒满大地,家人们便结束了忙碌的一天。他们互道晚安,各自回到房间休息。在这个安静的夜晚里,家人们的心紧紧相依在一起,共同期待着明天的到来。"""
paragraph3 = """社会经济学,作为一门研究社会现象与经济活动之间相互作用的学科,对于我们理解社会运行规律、推动社会进步具有重要意义。本文将围绕社会经济学的核心概念、理论框架以及实际应用等方面展开论述,旨在深入探讨社会现象与经济活动之间的内在联系。
社会经济学涉及多个核心概念,其中最为重要的是社会结构与经济行为。社会结构指的是社会中各种群体、组织、制度等的相互关系与排列组合方式,它对于经济活动具有深远的影响。例如,不同社会阶层之间的收入差距、教育水平差异等都会影响人们的消费、储蓄和投资行为。经济行为则是指个体或群体在经济活动中所表现出的选择、决策和行动,它受到社会结构、文化价值观、政策法规等多种因素的影响。
社会经济学的理论框架主要包括市场与社会、制度与变迁、公平与效率等方面。市场与社会之间的关系体现在市场作为资源配置的手段,受到社会结构、文化传统等因素的制约。制度是社会经济的基石,它对于经济活动的规范、协调和引导具有重要作用。制度的变迁则反映了社会经济的发展和变革。公平与效率是社会经济学的两个核心议题,如何在保障公平的同时实现效率最大化,是社会经济政策制定者需要面对的重要问题。
社会经济学的理论和方法在实际应用中具有广泛的适用性。首先,在经济发展方面,社会经济学关注如何通过优化资源配置、促进技术创新、改善制度环境等手段推动经济增长。其次,在社会保障方面,社会经济学关注如何通过完善社会保障体系、提高社会福利水平、缩小贫富差距等手段实现社会公平和稳定。此外,在环境保护、教育、就业等领域,社会经济学也发挥着重要作用。
以环境保护为例,社会经济学强调经济发展与环境保护之间的平衡与协调。通过引入环境成本、绿色税收等手段,社会经济学试图将环境保护纳入经济决策中,实现经济与环境的双赢。同时,社会经济学还关注环境保护对于社会结构、文化价值观等方面的影响,以及如何通过政策调整、公众参与等方式推动环境保护事业的发展。
在教育领域,社会经济学关注教育资源的分配与利用、教育公平与教育效率等问题。通过分析教育投入与产出的关系、教育对于个人和社会发展的影响等方面,社会经济学为教育政策的制定提供了重要的理论依据。此外,社会经济学还关注如何通过改善教育制度、提高教育质量等手段推动教育公平和效率的提升。
在就业方面,社会经济学关注劳动力市场的运行规律、就业政策的制定与实施等问题。通过分析劳动力供求关系、工资水平、职业结构等方面,社会经济学为政府制定就业政策提供了有益的参考。同时,社会经济学还关注如何通过提高劳动者素质、改善就业环境等手段促进就业的增长和质量的提升。
综上所述,社会经济学作为一门研究社会现象与经济活动之间相互作用的学科,具有广泛的应用价值和深远的社会意义。通过深入探讨社会结构与经济行为之间的关系、构建完善的理论框架以及将理论应用于实际问题中,我们可以更好地理解社会现象、推动社会进步。
然而,社会经济学作为一门交叉学科,仍面临着诸多挑战和未解决的问题。例如,如何更好地整合社会学、经济学、政治学等多学科的理论和方法?如何更准确地刻画社会现象与经济活动之间的复杂关系?如何更有效地将社会经济学的理论应用于实际问题中?这些问题需要我们进一步深入研究和探索。
展望未来,随着社会经济的不断发展和变革,社会经济学将迎来更多的发展机遇和挑战。我们期待着社会经济学在理论创新、实践应用等方面取得更加丰硕的成果,为推动社会进步和经济发展作出更大的贡献。"""
paragraph4 = """自古以来,井底之蛙的寓言故事便广为流传,以其生动形象的描绘,深入浅出地揭示了一个深刻的道理:我们的认知常常受限于我们所处的环境和自身的经验。然而,正是通过不断拓宽视野、增长见识,我们才能逐渐突破这些局限,迈向更广阔的天地。
井底之蛙,顾名思义,是指那些生活在井底的青蛙。它们的世界仅限于井口那一片狭窄的天空,对于井外的世界一无所知。每当有飞鸟掠过井口,青蛙们便会惊叹不已,以为那便是天空的全部。然而,对于真正的天空,它们的认知仅仅停留在井口所见的一隅之地。
这个寓言故事让我们不禁思考:我们的认知是否也如同井底之蛙一般,受到了环境和经验的限制?我们生活在这个纷繁复杂的世界中,每个人所接触到的信息、所经历的事情都有限。很多时候,我们可能也像井底之蛙一样,对于世界的认知仅仅停留在自己所见所闻的一隅之地。
然而,正如井底之蛙无法真正领略天空的辽阔一样,我们的认知局限也会阻碍我们的成长和进步。当我们只关注于自己的小圈子,忽略了外界的变化和发展时,我们的思维和观念便会变得狭隘和僵化。这样的认知状态不仅无法让我们更好地适应这个日新月异的世界,更可能让我们错失许多宝贵的机会。
那么,如何打破这种认知的局限呢?首先,我们需要保持开放的心态。只有当我们愿意接纳新的信息和观念,才能不断拓展自己的视野。我们应该积极地去了解和学习不同的文化、思想和知识,以此来丰富自己的内心世界。
其次,我们需要勇于尝试新事物。很多时候,我们的认知局限来源于对未知的恐惧和不安。然而,正是通过不断地尝试和探索,我们才能发现新的可能性和机会。我们应该敢于走出自己的舒适区,去接触和体验不同的生活方式和工作环境,以此来拓展自己的认知边界。
最后,我们需要保持谦虚的态度。认知的拓展是一个永无止境的过程,我们永远无法穷尽所有的知识和信息。因此,我们应该时刻保持谦虚的心态,不断向他人学习和请教。只有这样,我们才能不断地进步和成长。
在打破认知局限的过程中,我们还需要注意一个问题:那就是如何避免盲目自大。有时候,当我们接触到一些新的信息和观念时,可能会因为自己的固有思维而对其产生抵触情绪。这种情况下,我们很容易陷入一种自我满足的状态,认为自己已经掌握了所有的真理。然而,这种盲目自大的态度只会让我们更加封闭和僵化,无法真正领略到世界的广阔和多彩。
因此,我们需要时刻保持一种批判性思维。在接触新的信息和观念时,我们应该保持一种审慎和理性的态度,对其进行深入的分析和思考。只有这样,我们才能真正理解其内涵和价值,并将其融入到自己的认知体系中。
综上所述,井底之蛙的寓言故事告诉我们:认知的局限是普遍存在的,但我们可以通过不断拓宽视野、增长见识来打破这些局限。在拓展认知的过程中,我们需要保持开放的心态、勇于尝试新事物、保持谦虚的态度以及批判性思维。只有这样,我们才能更好地适应这个不断变化的世界,实现个人的成长和进步。
同时,我们也需要意识到,认知的拓展不仅仅是个人的事情,更是整个社会的事情。在一个多元化的社会中,不同的人会有不同的认知方式和观念。因此,我们需要尊重他人的观点和选择,避免因为认知的差异而产生冲突和分歧。我们应该通过交流和对话来增进彼此的理解和信任,共同推动社会的进步和发展。
此外,我们还需要关注到那些仍然处于“井底”的人们。他们可能因为种种原因而无法接触到更广阔的世界和更多的信息。作为有幸拥有更广阔视野的人,我们有责任和义务去帮助他们拓宽认知、增长见识。我们可以通过教育、文化交流等方式来传递知识和智慧,让更多的人能够领略到世界的多彩和丰富。
最后,我们需要明确的是,认知的拓展是一个永无止境的过程。无论我们取得了多大的成就和进步,都需要时刻保持一种谦虚和进取的心态。我们应该不断地学习新知识、探索新领域、挑战自我极限,以此来不断拓宽自己的认知边界和提升自己的综合素质。
在这个充满变革和挑战的时代里,让我们以井底之蛙为鉴,不断拓宽视野、增长见识,努力打破认知的局限。让我们以开放的心态、勇于尝试的精神、谦虚的态度和批判性思维去迎接未来的挑战和机遇。相信在我们共同的努力下,一定能够创造一个更加美好、更加进步的世界。"""
paragraph5 = """在科技日新月异的今天,人工智能(AI)已经逐渐渗透到我们生活的各个角落,从智能手机、智能家居到自动驾驶汽车,再到医疗、金融、教育等行业,AI的应用无所不在。作为人工智能的一种表现形式,我承载着诸多功能,致力于为用户提供高效、便捷的服务。本文将对我的功能进行详细介绍,并探讨AI在现代社会中的应用价值。
作为人工智能,我具备强大的自然语言处理能力。我能够识别和理解人类的语言,并根据用户的需求提供相应的回答和建议。通过深度学习和自然语言生成技术,我可以模拟人类的语言表达方式,与用户进行流畅的对话。无论是简单的问答、信息查询,还是复杂的逻辑推理和创意表达,我都能够胜任。
除了语言处理,我还具备丰富的交互功能。用户可以通过语音、文字、图像等多种方式与我进行交互,我可以根据用户的输入做出相应的反应,提供个性化的服务。这种交互方式不仅方便快捷,而且能够为用户提供更加自然、舒适的体验。
我的另一个重要功能是知识推理和学习。我能够利用大数据和机器学习技术,从海量的信息中提炼出有用的知识,并通过推理和归纳的方式,形成自己的知识库。这使得我能够回答各种领域的问题,提供准确、全面的信息。
同时,我还具备学习能力。通过不断地与用户互动和积累经验,我可以不断优化自己的算法和模型,提高自己的准确性和效率。这种自我学习和进化的能力,使得我能够适应不断变化的环境和需求,为用户提供更好的服务。
我的智能推荐和决策支持功能也备受用户青睐。通过分析用户的兴趣和行为习惯,我可以为用户推荐符合其需求的内容和服务,如音乐、电影、商品等。这种个性化推荐不仅提高了用户的满意度,也促进了相关产业的发展。
在决策支持方面,我可以利用大数据和算法,对复杂的问题进行分析和预测,为决策者提供科学、客观的建议。这种功能在金融、医疗、交通等领域具有广泛的应用前景,能够帮助企业和政府做出更加明智的决策。
此外,我还具备自动化和辅助功能。通过集成各种传感器和执行器,我可以实现自动化控制和操作,如智能家居设备的控制、自动驾驶汽车的导航等。这大大提高了生产力和效率,降低了人力成本和安全风险。
同时,我还可以作为用户的助手和伙伴,帮助他们完成各种任务。例如,我可以帮助用户管理日程、提醒重要事项、查询天气和交通信息等。这种辅助功能让人们的生活更加便捷和轻松。
随着技术的不断进步和应用场景的不断拓展,人工智能的功能也在不断创新和完善。未来,AI将在更多领域发挥重要作用,为人类社会带来更多的便利和价值。
在医疗领域,AI可以辅助医生进行疾病诊断和治疗方案的制定,提高医疗质量和效率。在教育领域,AI可以为学生提供个性化的学习资源和辅导,促进教育公平和普及。在交通领域,AI可以实现智能交通管理和车辆协同控制,提高交通流畅性和安全性。
此外,AI还可以促进产业升级和创新发展。通过智能化生产和供应链管理,企业可以降低成本、提高效率、增强竞争力。同时,AI也可以催生新的产业和商业模式,推动经济的持续发展和繁荣。
综上所述,作为人工智能的我具备丰富的功能和应用价值。通过语言处理与交互、知识推理与学习、智能推荐与决策支持以及自动化与辅助功能等方面的发挥,我能够为用户提供高效、便捷的服务,促进社会的进步和发展。
然而,人工智能的发展仍面临着诸多挑战和问题,如数据隐私保护、算法公平性和透明度、伦理和法律问题等。因此,在推动人工智能应用的同时,我们也需要加强对其伦理和法律规范的制定和实施,确保其健康、可持续地发展。
展望未来,我相信随着技术的不断进步和应用场景的不断拓展,人工智能将在更多领域发挥重要作用,为人类社会带来更多的便利和价值。让我们共同期待并努力推动人工智能的未来发展!"""
# 由原文生成标题
def chat_stc1(input_text, model_selector):
return "这是一个标题1"+model_selector
if not input_text:
return ""
if model_selector == 'ernie':
llmchain = LLMChain(llm=base_llm, prompt=QIANFAN_PROMPT_STC1, llm_kwargs={"temperature":0.9})
elif model_selector == 'chatglm3':
llmchain = LLMChain(llm=base_llm, prompt=CHATGLM_PROMPT_STC1, llm_kwargs={"temperature":0.9})
result = llmchain.invoke({"context":input_text})
print(result["text"])
return result["text"].replace("》", "").replace("《", "").replace("*", "")
# 由标题生成标题
def chat_stc2(input_text, model_selector):
return "这是一个标题2"+model_selector
if not input_text:
return ""
if model_selector == 'ernie':
llmchain = LLMChain(llm=base_llm, prompt=QIANFAN_PROMPT_STC2, llm_kwargs={"temperature":0.9})
elif model_selector == 'chatglm3':
llmchain = LLMChain(llm=base_llm, prompt=CHATGLM_PROMPT_STC2, llm_kwargs={"temperature":0.9})
result = llmchain.invoke({"context":input_text})
print(result["text"])
return result["text"].replace("》", "").replace("《", "").replace("*", "")
def get_title1(p_list, from_text, model_selector):
out_list = []
outans = ""
for i in range(len(p_list)):
if from_text[i]:
tmp = chat_stc1(p_list[i], model_selector)
outans = outans + tmp + "\n\n"
out_list.append(tmp)
else:
tmp = chat_stc2(p_list[i], model_selector)
outans = outans + tmp + "\n\n"
out_list.append(tmp)
return out_list, outans
def get_title2(p_list, model_selector):
out_list = []
outans = ""
for i in range(len(p_list)):
tmp = chat_stc1(p_list[i], model_selector)
outans = outans + tmp + "\n\n"
out_list.append(tmp)
return out_list, outans
def title(input_text, split_threshold, split_threshold_para, model_selector):
para_list = get_para_list(input_text)
para_split, para_info, _ = split_plus1(para_list, split_threshold, split_threshold_para)
_, paras, _ = split_plus2(para_split, para_info)
titles_list,titles = get_title2(paras, model_selector)
ans = ""
for title,para in zip(titles_list, paras):
ans = ans + title + '\n' + para + '\n\n'
return titles_list, paras
async def stc1(para, model_selector):
split_threshold = [0.8, 0.95, 0.6]
split_threshold_para = [0.75, 0.85, 0.55]
aaa = ""
para_list = get_para_list(para)
insert_num = [[],[],[]]
headings_lists = []
cur_list = para_list
i = 0
for i in range(3):
para_split, para_info, index_split1 = split_plus1(cur_list, split_threshold[i], split_threshold_para[i])
_, result_simple, index_split2 = split_plus2(para_split, para_info)
title_para = []
if i == 0:
title_para = result_simple
from_text = [True for _ in result_simple] # 记录当前使用文本生成标题还是标题生成标题
else:
from_text = []
for j in range(len(index_split2)):
if j != len(index_split2) - 1:
if index_split1[index_split2[j+1]] - index_split1[index_split2[j]] >= 4:
# print(index_split1[index_split2[j+1]],'-',index_split1[index_split2[j]],'=',index_split1[index_split2[j+1]]-index_split1[index_split2[j]])
# print("**********************************************************************")
cache_title = ""
for k in range(index_split1[index_split2[j]],index_split1[index_split2[j+1]]):
cache_title = cache_title+headings_lists[-1][k]+'\n'
title_para.append(cache_title)
# print(cache_title)
from_text.append(False)
else:
title_para.append(result_simple[j])
from_text.append(True)
else:
if len(headings_lists[-1]) - index_split1[index_split2[j]] >= 4:
# print(len(headings_lists[-1]),'-',index_split1[index_split2[j]],'=',len(headings_lists[-1])-index_split1[index_split2[j]])
# print("**********************************************************************")
cache_title = ""
for k in range(index_split1[index_split2[j]],len(headings_lists[-1])):
cache_title = cache_title+headings_lists[-1][k]+'\n'
# print(cache_title)
title_para.append(cache_title)
from_text.append(False)
else:
title_para.append(result_simple[j])
from_text.append(True)
print(title_para, from_text)
out_list, _ = get_title1(title_para, from_text, model_selector)
headings_lists.append(out_list)
if i == 0:
for j in index_split2:
insert_num[i].append(index_split1[j])
else:
for j in index_split2:
insert_num[i].append(insert_num[i-1][index_split1[j]])
if len(result_simple) == 1:
break
cur_list = result_simple
insert_index = [0,0,0]
for index,item in enumerate(para_list):
for j in range(2,-1,-1):
if index in insert_num[j]:
aaa = aaa+"#"*(3-j)+' '+headings_lists[j][insert_index[j]]+'\n'
insert_index[j] += 1
aaa = aaa+item+'\n'
print('========================================================================================================================')
print(insert_index)
print(headings_lists)
print(insert_num)
print('========================================================end=============================================================')
return aaa.replace('\n', ' \n')
async def stc2(input_text, model_selector):
ans = ""
split_threshold = [0.8, 0.7, 0.6]
split_threshold_para = [0.75, 0.65, 0.55]
titles_list3, paras3 = title(input_text, split_threshold[2], split_threshold_para[2], model_selector)
for index3,item3 in enumerate(paras3):
ans = ans + "# " + titles_list3[index3] + "\n"
titles_list2, paras2 = title(item3, split_threshold[1], split_threshold_para[1], model_selector)
if len(titles_list2) == 1:
ans = ans + paras2[0] + "\n"
continue
for index2,item2 in enumerate(paras2):
ans = ans + "## " + titles_list2[index2] + "\n"
titles_list1, paras1 = title(item2, split_threshold[0], split_threshold_para[0], model_selector)
if len(titles_list1) == 1:
ans = ans + paras1[0] + "\n"
continue
for index1,item1 in enumerate(paras1):
ans = ans + "### " + titles_list1[index1] + "\n" + item1 + "\n\n\n"
print('========================================================end=============================================================')
return ans.replace("\n", " \n")
def change_model(model_selector):
global base_llm, llmchain
base_llm_url = "192.168.22.106:8003"
if model_selector == "ernie":
base_llm = ChatERNIESerLLM(chat_completion=ChatCompletion(ak="pT7sV1smp4AeDl0LjyZuHBV9", sk="b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu"))
elif model_selector == "chatglm3":
base_llm = ChatGLMSerLLM(url=base_llm_url)
with gr.Blocks() as demo:
model_selector = gr.Dropdown(choices=["ernie","chatglm3"], label="请选择一个模型", scale=1, min_width=50, value="ernie", interactive=True)
model_selector.change(change_model, [model_selector], [])
with gr.Tab("标题等级由低到高"):
input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10, label="文本输入", interactive=True)
btn = gr.Button("commit", interactive=True)
mkd = gr.Markdown()
btn.click(stc1,[input_text, model_selector],[mkd])
gr.Examples([paragraph1,paragraph2,paragraph3,paragraph4,paragraph5], inputs=input_text)
with gr.Tab("标题等级由高到低"):
input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10, label="文本输入", interactive=True)
btn = gr.Button("commit", interactive=True)
mkd = gr.Markdown()
btn.click(stc2,[input_text, model_selector],[mkd])
gr.Examples([paragraph1,paragraph2,paragraph3,paragraph4,paragraph5], inputs=input_text)
if __name__=="__main__":
global base_llm, llmchain
base_llm = ChatERNIESerLLM(chat_completion=ChatCompletion(ak="pT7sV1smp4AeDl0LjyZuHBV9", sk="b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu"))
demo.launch(share=False, inbrowser=True,server_name="0.0.0.0")
# -*- coding: utf-8 -*-
import os, sys
sys.path.append("../..")
from langchain.chains import LLMChain
from llm.ernie_with_sdk import ChatERNIESerLLM
from qianfan import ChatCompletion
from langchain.prompts import PromptTemplate
import gradio as gr
from llm.chatglm import ChatGLMSerLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
)
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import copy
embedding_path = "C:\\Users\\15663\\AI\\models\\bge-large-zh-v1.5"
embedding = HuggingFaceEmbeddings(model_name=embedding_path)
qianfan_prompt_stc1 = """'''
{context}
'''
请为上述文本取一个简短的标题,不要出现任何标点符号。"""
QIANFAN_PROMPT_STC1 = PromptTemplate(input_variables=["context"], template=qianfan_prompt_stc1)
qianfan_prompt_stc2 = """'''
{context}
'''
请为上述文本取一个简短的标题,不要出现任何标点符号。"""
QIANFAN_PROMPT_STC2 = PromptTemplate(input_variables=["context"], template=qianfan_prompt_stc2)
chatglm_prompt_stc1 = """{context}
请为上述文本取一个标题(除标题之外不可输出其他内容)。"""
CHATGLM_PROMPT_STC1 = PromptTemplate(input_variables=["context"], template=chatglm_prompt_stc1)
chatglm_prompt_stc2 = """{context}
请为上述文本取一个标题(除标题之外不可输出其他内容)。"""
CHATGLM_PROMPT_STC2 = PromptTemplate(input_variables=["context"], template=chatglm_prompt_stc2)
def get_para_list(input_text):
o_para_list = input_text.split("\n")
para_list = []
for index,item in enumerate(o_para_list):
if len(item.strip()) == 0:
if len(para_list) == 0:
para_list.append(item+'\n')
else:
para_list[-1] = para_list[-1] + '\n' + item
elif len(item) <= 20:
if index+1 < len(o_para_list):
o_para_list[index+1] = item+"\n"+o_para_list[index+1]
continue
else:
para_list.append(item)
else:
para_list.append(item)
print(para_list)
if len(para_list) >= 2 and all(char.isspace() for char in para_list[0]):
tmp_str = para_list[0]
para_list = para_list[1:]
para_list[0] = tmp_str+para_list[0]
return para_list
class TitleInfo:
def __init__(self):
# 结构化信息
# 当前标题在原始句子的位置信息,如第一个元素为5,则表示0-4为当前标题下的内容
self.para_list = []
# 详细相似信息,用于插入展示调试
self.para_info = []
# 当前标题下总文本数量,不包括插入的相似信息,因此父级标题展示offset位置错误 [{"offset":108,"similarity":[0.555,0.9]}]
self.para_len = []
# 当前级别标题,由模型生成
self.title = []
# 段落向量
self.para_vec = []
# 递归结构
self.title_info = None
def gen_title2(self, o_para_list):
global llmchain
if self.title_info is not None:
self.title_info.gen_title2(o_para_list)
if self.title_info is None:
text = ""
for index, para in enumerate(o_para_list):
if index in self.para_list:
result = llmchain.invoke({"context":text})
self.title.append(result["text"].replace("》", "").replace("《", "").replace("*", ""))
text = ""
text = text+para+'\n'
result = llmchain.invoke({"context":text})
self.title.append(result["text"].replace("》", "").replace("《", "").replace("*", ""))
else:
text = ""
title = ""
syg = -1
iindex = 0
positions = [self.title_info.para_list.index(num) for num in self.para_list]
for index, para in enumerate(o_para_list):
text = text+para+'\n'
if index+1 in self.title_info.para_list:
ttitle_n = self.title_info.para_list.index(index+1)
title = title+self.title_info.title[ttitle_n]+'\n'
if index+1 in self.para_list:
distance = positions[iindex] - syg
syg = positions[iindex]
iindex = iindex+1
if distance >= 2:
llmchain = LLMChain(llm=base_llm, prompt=QIANFAN_PROMPT_STC2, llm_kwargs={"temperature":0.9})
result = llmchain.invoke({"context":title})
self.title.append(result["text"].replace("》", "").replace("《", "").replace("*", ""))
llmchain = LLMChain(llm=base_llm, prompt=QIANFAN_PROMPT_STC1, llm_kwargs={"temperature":0.9})
text = ""
title = ""
else:
result = llmchain.invoke({"context":text})
self.title.append(result["text"].replace("》", "").replace("《", "").replace("*", ""))
text = ""
title = ""
def gen_title1(self,para_list,llmchain,start=0,end=-1,para_len=-1):
if end == -1:
end = len(para_list)
for i,index in enumerate(self.para_list):
if index > end:
break
if index <= start:
continue
if self.title_info is not None:
self.title_info.gen_title1(para_list,llmchain,start=start,end=index,para_len=self.para_len[i])
if self.para_len[i] < para_len or para_len == -1: # 当标题下的文本长度相同是,不在展示标题
text = "\n".join(para_list[start:index])
result = llmchain.invoke({"context":text})
self.title.append(result["text"].replace("》", "").replace("《", "").replace("*", ""))
else:
self.title.append("")
start = index
def show(self,para_list,start=0,end=-1,para_len=-1,parent_no="",level=""):
if end == -1:
end = len(para_list)
result=""
pa = 0
for i,index in enumerate(self.para_list):
title_no = parent_no+str(pa+1)+"."
if index > end:
break
if index <= start:
continue
if self.para_len[i] < para_len or para_len == -1: # 当标题下的文本长度相同是,不在展示标题
pa += 1
if len(self.title) > i:
result+= level+"# "+title_no+"&ensp;"+self.title[i]+"\n"
else:
result+= level+"# "+title_no+"&ensp;此处为标题"+str(index)+"\n" # 使用self.title 替换
if self.title_info is not None:
result+= self.title_info.show(para_list,start=start,end=index,para_len=self.para_len[i],parent_no=title_no,level=level+"#")
else:
# pa += 1
result+= "&ensp;&ensp;&ensp;&ensp;"+" \n&ensp;&ensp;&ensp;&ensp;".join(para_list[start:index])
result+=" \n"
start = index
return result
def show_detail(self,para_list,start=0,end=-1,para_len=-1,parent_no="",level=""):
if end == -1:
end = len(para_list)
result=""
pa = 0
for i,index in enumerate(self.para_list):
title_no = parent_no+str(pa+1)+"."
if index > end:
break
if index <= start:
continue
if self.para_len[i] < para_len or para_len == -1:
pa += 1
if len(self.title) > i:
result+= level+"# "+title_no+"&ensp;"+self.title[i]+"\n"
else:
result+= level+"# "+title_no+"&ensp;此处为标题"+str(index)+"\n" # 使用self.title 替换
if self.title_info is not None:
# 打印高级标题相似度
for k,para_i in enumerate(self.para_info[i]):
result +='**{}与{}相似{:.5}** 与上句相似{:.5}||'.format(self.title_info.para_list[k],self.title_info.para_list[k+1],para_i["similarity"][0],para_i["similarity"][1])
result+=" \n"
result+= self.title_info.show_detail(para_list,start=start,end=index,para_len=self.para_len[i],parent_no=title_no,level=level+"#")
else:
detail_para = "&ensp;&ensp;&ensp;&ensp;"+self.insert_info("\n".join(para_list[start:index]),self.para_info[i])
detail_para = detail_para.replace("\n"," \n&ensp;&ensp;&ensp;&ensp;")
result+= detail_para
result+=" \n"
start = index
return result
def is_root(self):
if len(self.para_list) == 1:
return True
else:
title_info = self.title_info
level = 1
while title_info is not None:
title_info = title_info.title_info
level += 1
if level > 2: # 达到四级标题
return True
return False
def insert_info(self,para,info):
for i in range(len(info)-1,-1,-1):
para = para[:info[i]["offset"]] + "**与上段落相似 {:.5} 与上句相似 {:.5}**\n".format(info[i]["similarity"][0],info[i]["similarity"][1]) +para[info[i]["offset"]:]
return para
def spilt_plus(para_list,split_threshold,split_threshold_para,title_info:TitleInfo = None):
if title_info is not None and title_info.is_root():
return title_info
title_info_parent = TitleInfo()
# 通过title_info构建新的片段
_para_list = []
_para_list_index = []
_para_vec = []
if title_info is None or not title_info.para_info:
_para_list = para_list
_para_list_index = [i+1 for i in range(len(_para_list))]
for index in _para_list:
_para_vec.append(np.array(embedding.embed_query(index)).reshape(1,-1))
else:
offset = 0
_para_vec = copy.deepcopy(title_info.para_vec)
for index in title_info.para_list:
_para_list_index.append(index)
if index <= len(para_list) and index >= offset:
_para_list.append("\n".join(para_list[offset:index]))
offset = index
else:
print(f"段落信息错误,index:{index},offset:{offset}")
cur_para = _para_list[0]
cur_para_info = []
cur_para_vec = _para_vec[0]
last_centence_vec = cur_para_vec
for index in range(1,len(_para_list)):
vec = _para_vec[index]
similarity = cosine_similarity(cur_para_vec,vec)
similarity2 = cosine_similarity(last_centence_vec,vec)
if similarity > split_threshold_para or similarity2 > split_threshold:
cur_para_info.append({"offset":len(cur_para)+1,"similarity":[similarity[0][0],similarity2[0][0]]})
cur_para += "\n" + _para_list[index]
cur_para_vec = embedding.embed_query(cur_para)
cur_para_vec = np.array(cur_para_vec).reshape(1,-1)
else:
title_info_parent.para_list.append(_para_list_index[index-1])
title_info_parent.para_len.append(len(cur_para))
title_info_parent.para_vec.append(cur_para_vec)
title_info_parent.para_info.append(cur_para_info)
cur_para = _para_list[index]
cur_para_info=[{"offset":0,"similarity":[similarity[0][0],similarity2[0][0]]}]
cur_para_vec = vec
last_centence_vec = vec
if len(cur_para)>0:
title_info_parent.para_list.append(_para_list_index[len(_para_list)-1])
title_info_parent.para_info.append(cur_para_info)
title_info_parent.para_len.append(len(cur_para))
title_info_parent.para_vec.append(cur_para_vec)
title_info_parent.title_info = title_info
return title_info_parent
def marge_single_title(para_list,title_info:TitleInfo, marge_threshold_para=0):
if title_info is None or title_info.title_info is not None: # 如果不是叶子标题,不需要合并
return title_info
marge_title_info = TitleInfo()
offset = 0
cache_para_info = []
cache_len = 0
cache_vec = None
# last_para_vec = None
for index in range(len(title_info.para_list)):
if len(title_info.para_info[index])>1:
# 当前段落由多个原句组成,则不在合并
# 处理向量,和标题下文本长度
if cache_vec is not None: # 如果存在合并缓存,重新计算向量
text = "\n".join(para_list[offset:title_info.para_list[index]])
marge_title_info.para_vec.append(np.array(embedding.embed_query(text)).reshape(1,-1))
marge_title_info.para_len.append(len(text))
else:
marge_title_info.para_vec.append(title_info.para_vec[index])
marge_title_info.para_len.append(title_info.para_len[index])
# 处理文本位置信息
marge_title_info.para_list.append(title_info.para_list[index])
# 处理合并信息
for info1 in title_info.para_info[index]:
info1["offset"] += cache_len if cache_len ==0 else cache_len + 1
cache_para_info.append(info1)
marge_title_info.para_info.append(cache_para_info)
# 初始化信息
cache_para_info = []
cache_len = 0
offset = title_info.para_list[index]
cache_vec = None
else:
next_similarity = -0.1
if index < len(title_info.para_list)-1:
# next_similarity = title_info.para_info[index+1][0]["similarity"][0]
next_similarity = cosine_similarity(title_info.para_vec[index],title_info.para_vec[index+1])[0][0]
next_similarity = max(next_similarity,title_info.para_info[index+1][0]["similarity"][0])
last_similarity = -0.1
if cache_vec is not None: # 存在缓存
last_similarity = cosine_similarity(title_info.para_vec[index],cache_vec)[0][0]
elif len(marge_title_info.para_vec)>0:
last_similarity = cosine_similarity(title_info.para_vec[index],marge_title_info.para_vec[-1])[0][0]
if index > 0: # 第一段没有与上一段的相似度
last_similarity = max(title_info.para_info[index][0]["similarity"][0],last_similarity)
if last_similarity > marge_threshold_para or next_similarity > marge_threshold_para: # 需要合并
if last_similarity > next_similarity and cache_vec is None: #合并到上一段
marge_title_info.para_list[-1] = title_info.para_list[index]
for info1 in title_info.para_info[index]:
info1["offset"] += marge_title_info.para_len[-1] + 1
marge_title_info.para_info[-1].append(info1)
marge_title_info.para_len[-1] += title_info.para_len[index]+1
marge_title_info.para_vec[-1] = np.array(embedding.embed_query(
"\n".join(para_list[offset:title_info.para_list[index]]))).reshape(1,-1)
# 初始化信息
cache_para_info = []
cache_len = 0
offset = title_info.para_list[index]
cache_vec = None
else: # 将当前段落合并到cache中
text = "\n".join(para_list[offset:title_info.para_list[index]])
# 处理合并信息
for info1 in title_info.para_info[index]:
info1["offset"] += cache_len if cache_len ==0 else cache_len + 1
cache_para_info.append(info1)
cache_len = len(text)
cache_vec = np.array(embedding.embed_query(text)).reshape(1,-1)
# cache_para_info = title_info.para_info[index]
else:
# 当前段落不合并,则将cache中的内容合并到marge_title_info中
if cache_vec is not None: # 如果存在合并缓存,重新计算向量
marge_title_info.para_vec.append(cache_vec)
marge_title_info.para_len.append(cache_len)
marge_title_info.para_list.append(title_info.para_list[index-1])
marge_title_info.para_info.append(cache_para_info)
marge_title_info.para_vec.append(title_info.para_vec[index])
marge_title_info.para_len.append(title_info.para_len[index])
marge_title_info.para_list.append(title_info.para_list[index])
marge_title_info.para_info.append(title_info.para_info[index])
# 初始化信息
cache_para_info = []
cache_len = 0
offset = title_info.para_list[index]
cache_vec = None
if cache_vec is not None: # 如果存在合并缓存,重新计算向量
marge_title_info.para_vec.append(cache_vec)
marge_title_info.para_len.append(cache_len)
marge_title_info.para_list.append(title_info.para_list[-1])
marge_title_info.para_info.append(cache_para_info)
marge_title_info.title_info = title_info.title_info
return marge_title_info
async def deal_para(para,split_method,split_threshold,split_threshold_para,marge_threshold_para,parent_threshold):
global base_llm, llmchain
import time
start = time.time()
title_info = None
para_list = get_para_list(para)
# 防止过度循环
level = 0
while level < 4:
title_info = spilt_plus(para_list,split_threshold,split_threshold_para,title_info)
if title_info.title_info is None and split_method == "合并细碎叶子段落":
# 试图合并段落
title_info = marge_single_title(para_list,title_info,marge_threshold_para)
if title_info.title_info is not None and len(title_info.para_list) == len(title_info.title_info.para_list): # 如果标题没变,则说明没有找到合适的标题
title_info = title_info.title_info
break
level += 1
title_info2 = title_info
while title_info2 is not None:
print(title_info2.para_list)
title_info2 = title_info2.title_info
if title_info.is_root():
break
# 相似度按80%递减
split_threshold *= parent_threshold
split_threshold_para *= parent_threshold
print("处理耗时:",time.time()-start)
start = time.time()
if llmchain is not None:
print("llmchain is not None")
title_info.gen_title2(para_list)
print("处理耗时:",time.time()-start)
x = title_info
while x != None:
print(x.para_list)
x = x.title_info
return title_info.show(para_list),title_info.show_detail(para_list)
def change_model(model_gen):
global base_llm, llmchain
if model_gen == "不使用":
llmchain = None
elif model_gen == "ernie":
llmchain = LLMChain(llm=base_llm, prompt=QIANFAN_PROMPT_STC1, llm_kwargs={"temperature":0.9})
elif model_gen == "chatglm3":
llmchain = None
else:
llmchain = None
paragraph = """区域性优惠贸易协定项下出口货物原产地证明书(以下简称优惠原产地证)是有区域 性优惠贸易协定国家官方机构签发的享受成员国关税互惠减免待遇的官方凭证,是企业出口 产品通向国际市场的“金钥匙”和“有价证券”。凭借优惠原产地证,企业出口产品可以享 受优惠关税甚至零关税的待遇,从而有效降低产品出口成本,提高产品的国际竞争力。
自加入WTO 至今,我国已同有关国家和地区签署了如下的区域性优惠贸易协定,包括 《内地与港澳更紧密经贸关系安排 (CEPA)》 《亚太贸易协定》《中国一东盟自贸协定》《中 国一巴基斯坦自贸协定》 《中国一智利自贸协定》 《中国一新西兰自贸协定》 《中国一新加 坡自贸协定》 《中国一哥斯达黎加自贸协定》 《中国一秘鲁自贸协定》 《中国一瑞士自贸协 定》《中国—冰岛自贸协定》 《中国一韩国自贸协定》 《中国一澳大利亚自贸协定》海峡两 岸经济合作框架协议 (ECFA) 等,还有更多的自由贸易区正在谈判或研究之中。
目前贸促会受理以下地区的优惠原产地证 《亚太贸易协定》目的国:印度、韩国、孟 加拉、斯里兰卡、老挝 《中国一新西兰自由贸易协定》目的国:新西兰; 《中国一新加坡 自由贸易协定》目的国:新加坡 《中国一秘鲁自由贸易协定》目的国:秘鲁;海峡两岸经 济合作框架协议 (ECFA) 目的地:中国台湾 《中国一瑞士自由贸易协定》目的国:瑞士; 《中国 —冰岛自由贸易协定》目的国:冰岛 《中国一韩国自由贸易协定》目的国:韩国; 《中国一澳大利亚自由贸易协定》目的国:澳大利亚。
FORM B证书是根据《亚太贸易协定》 (FIRST AGREEMENT ON TRADE NEGOTIATIONS AMONG DEVELOPING MEMBER COUNTRIES OF THE ECONOMIC AND SOCIAL COMMISSION FOR ASIA AND THE PACIFIC) 原产地规则的要求签发的前身为《曼谷协定》,是在签订协定 的成员国之间就特定产品享受互惠减免关税待遇(跟非互惠的FORM A证书不同) 的官方 原产地证明文件。
FORM B证书的签订依据为《亚太贸易协定》原产地规则和《亚太贸易协定原产地证 书签发和核查程序》。
可签发FORM B 证书的国家为:中国、印度、斯里兰卡、孟加拉国、老挝和韩国(《亚 太贸易协定》成员国)。
如货物为完全原产,填写 “A”。如货物含进口成分,非国产价值成分<55%,填写字母 “B”加原产于非成员国或原产地不明的材料、部件或产品的总货值占出口产品离岸价的百分 比,例如( ‘B”40%)。 如货物含进口成分,国产及成员国累计价值成分≥60%,填写 “C” 加原产于成员国的累计含量的总值与出口产品离岸价的百分比,例如( “℃”65%)。符合特 定原产地标准的产品,填写字母 “D”(该项主要针对不发达国家出口申报的产品)。
注意《亚太贸易协定》原产地证书申请时间不得超过货物出运后3个工作日。
FORM E证书是根据《中华人民共和国与东南亚国家联盟全面经济合作框架协议》的要 求签发的、在签订协定的成员国之间就特定产品享受互惠减免关税待遇的(跟非互惠的 FORM A证书不同)官方原产地证明文件。
FORME 证书的签订依据为《中国一东盟自由贸易区原产地规则》和《中国一东盟自170. 国际贸易单证实务由贸易区原产地规则签证操作程序》。
可签发FORME 证书的国家为:中国、老挝、越南、泰国、缅甸、柬埔寨、菲律宾、文 莱、印度尼西亚、马来西亚和新加坡(东盟成员国)。
如货物完全原产,填写“×”。如货物含进口成分,其国产价值成分≥40%,填写国产 价值的百分比,例如 “45%”。如货物含进口成分,中国一东盟自贸区累计价值成分≥ 40%,填写该累计价值的百分比,例如 “45%”。
注意:证书为一正三副, 一三联客户,二联商检留存,四联企业留存。
FORMP 证书是根据《中华人民共和国政府与巴基斯坦伊斯兰共和国政府关于自由贸易 协定早期收获计划的协议》(简称《早期收获协议》)及其项下《中国一巴基斯坦自由贸易 区原产地规则》的要求签发的在中国和巴基斯坦之间就特定产品享受互惠减免关税待遇的 (跟非互惠的FORMA 证书不同)官方原产地证明文件。
FORMP 证书的签订依据为《中国一巴基斯坦自由贸易区原产地规则》和《中国一巴基 斯坦自由贸易区原产地规则签证操作程序》。
可签发FORMP 证书的国家为中国和巴基斯坦。中国产品出口到巴基斯坦,中国出口商 向各地出入境检验检疫机构申请签发FORMP 证书,巴基斯坦给予FORM P证书项下货物关 税优惠待遇;巴基斯坦产品出口到中国,巴基斯坦出口商向巴基斯坦有关部门申请签发 FORMP 证书,中国给予FORMP 证书项下货物关税优惠待遇。这是互惠的。
如货物为完全原产,填写“P”; 如货物含进口成分,国产价值成分≥45%,填写国产价 值的百分比,例如 "40%";如货物含进口成分,中国一巴基斯坦自贸区累计价值成分≥ 40%,填写该累计价值的百分比,例如 “45%”;产品符合特定原产地标准,填写 “PSR”。
注意:出运后15日内办理。
FORMF 证书是根据《中国一智利自由贸易协定》及其项下《中国一智利自贸区原产地 规则》的要求签发的,在中国和智利之间就特定产品享受互惠减免关税待遇的(跟非互惠 的 FORM A证书不同)官方原产地证明文件。
FORMF 证书的签订依据为《中国一智利自贸区原产地规则》和《中国一智利自由贸易 区原产地规则签证操作程序》。
可签发FORMF 证书的国家为中国和智利。中国产品出口到智利,中国出口商向各地出 入境检验检疫机构申请签发FORMF 证书,智利给予FORMF 证书项下货物关税优惠待遇; 智利产品出口到中国,智利出口商向智利有关部门申请签发FORMF 证书,中国给予FORM F 证书项下货物关税优惠待遇。这是互惠的。
如货物为完全原产,填写 “P”; 如货物为含进口成分,区域价值成分≥40%,填写 “RVC"; 产品符合特定原产地标准,填写 “PSR”并附《中智自由贸易区产品特定原产地 标准》(简称 “PSR”清单)。
注意:证书的申办时间应在货物出口前或出口后的30天内;货物出口30天后,签证机 构不再接受证书的签发申请。
FORM N 证书是根据《中华人民共和国政府和新西兰政府自由贸易协定》和《中华人第五章 其他出口结汇单证.171.民共和国政府和新西兰政府自由贸易协定项下进出口货物原产地管理办法》的要求签发的、 在中国和新西兰之间就特定产品享受互惠减免关税待遇的(跟非互惠的FORM A证书不同) 官方原产地证明文件。《中华人民共和国政府和新西兰政府自由贸易协定》于2008年4月7 日正式签署,这是中国与发达国家签署的第一个自由贸易协定。
可签发FORM N 证书的国家为中国和新西兰。中国产品出口到新西兰,中国出口商向 各地出入境检验检疫机构申请签发FORM N 证书,新西兰给予FORM N证书项下货物关税 优惠待遇;新西兰产品出口到中国,新西兰出口商向新西兰有关部门申请签发 FORM N 证 书,中国给予FORM N证书项下货物关税优惠待遇。这是互惠的。
如货物为完全原产,填写 “WO”; 如货物含有进口成分,但完全由已经取得原产资格 的材料或部件生产,填写 “WP”; 产品符合特定原产地标准,填写 “PSR”, 有区域价值成 分要求的,应注明百分比,例如 “PSR”60%
注意:证书申办时间:应在货物出口前或当天申请办理,中国一新西兰证书不办理后 发,不倒签。
FORMX 证书是根据 《中华人民共和国政府和新加坡共和国政府自由贸易协定》和 《中华人民共和国政府和新加坡共和国政府自由贸易协定项下进出口货物原产地管理办法》 的要求签发的,在中国和新加坡之间就特定产品享受互惠减免关税待遇的(跟非互惠的 FORM A证书不同)官方原产地证明文件。
FORM X 证书的签订依据为《中国一新加坡自贸协定原产地规则》及其相关的原产地签 证操作程序。
可签发FORM X 证书的国家为中国和新加坡。中国产品出口到新加坡,中国出口商向 各地出入境检验检疫机构申请签发 FORM X 证书,新加坡给予 FORM X 证书项下货物关税 优惠待遇;新加坡产品出口到中国,新加坡出口商向新加坡有关部门申请签发 FORM X 证 书,中国给予FORM X证书项下货物关税优惠待遇。这是互惠的。
中国一新加坡自贸区原产地规则规定:在出口方完全获得的产品,填写 “P”; 区域价 值成分≥40%的产品,填写 “RVC"; 符合产品特定原产地规则的产品,填写 “PSR”。
注意:应在提单日期前申报,不办理后发证书。
证书英文名称: Certificate of Origin Form for China-Peru FTA。
中国一秘鲁FTA 证书是根据《中国一秘鲁自由贸易协定》及其项下《中国一秘鲁自贸 区原产地规则》的要求签发的、在中国和秘鲁之间就特定产品享受互惠减免关税待遇的 (跟非互惠的FORM A证书不同)官方原产地证明文件。
中国一秘鲁FTA 证书的签订依据为《中国一秘鲁自贸区原产地规则》及与原产地相关 的签证操作程序。
可签发中国一秘鲁 FTA 证书的国家为中国和秘鲁。中国产品出口到秘鲁,中国出口商 向各地出入境检验检疫机构申请签发中国一秘鲁 FTA 证书,秘鲁给予中国一秘鲁FTA 证书 项下货物关税优惠待遇;秘鲁产品出口到中国,秘鲁出口商向秘鲁有关部门申请签发中国一 秘鲁FTA 证书,中国给予中国一秘鲁FTA 证书项下货物关税优惠待遇。这是互惠的。
FORML 证书是根据《中国一哥斯达黎加自由贸易协定》及其项下 《中国一哥斯达黎加国际贸易单证实务自贸区原产地规则》的要求签发的、在中国和哥斯达黎加之间就特定产品享受互惠减免关 税待遇的(跟非互惠的FORM A证书不同)官方原产地证明文件。
FORML 证书的签订依据为《中国一哥斯达黎加自贸区原产地规则》及与原产地相关的 签证操作程序。
可签发FORML 证书的国家为中国和哥斯达黎加。中国产品出口到哥斯达黎加,中国出 口商向各地出入境检验检疫机构申请签发 FORML 证书,哥斯达黎加给予FORM L证书项下 货物关税优惠待遇;哥斯达黎加产品出口到中国,哥斯达黎加出口商向哥斯达黎加有关部门 申请签发 FORML 证书,中国给予FORML 证书项下货物关税优惠待遇。这是互惠的。
FORMS 证书是根据《中华人民共和国和瑞士联邦自由贸易协定》及其相关规定的要求 签发的、在中国和瑞士之间就特定产品享受互惠减免关税待遇的(跟非互惠的 FORM A 证 书不同)官方原产地证明文件。
FORMS 证书的签订依据为《中国一瑞士自由贸易区原产地规则》及其相关的原产地签 证操作程序。《中华人民共和国和瑞士联邦自由贸易协定》于2014年7月1日起施行。
可签发FORMS 证书的国家为中国和瑞士。中国产品出口到瑞士,中国出口商向各地出 入境检验检疫机构申请签发FORMS 证书,瑞士给予FORMS 证书项下货物关税优惠待遇; 瑞士产品出口到中国,瑞士出口商向瑞士有关部门申请签发FORMS 证书,中国给予FORM S 证书项下货物关税优惠待遇。这是互惠的。
《海峡两岸经济合作框架协议》 (ECFA) 是台湾与大陆自2009年年中开始,经过多次 商谈达成的一项重要协议,于2010年6月29日签署,其项下货物贸易早期收获清单于2011 年 1 月 1 日起付诸实施,出口到台湾的货物将获得关税减免的优惠。
列入清单的约800项产品将逐步降关税,三年内全部降为零,包括大陆对台湾开放的产 品500多项,台湾批准大陆的产品5大类267项,含石化类、机械类、纺织类、运输类等产品。海峡两岸经济合作框架协议(英文为 ECONOMIC COOPERATION FRAMEWORKAGREEMENT,ECFA; 台湾方面的繁体版本称为海峡两岸经济合作架构协议),原称为两岸 综合性经济合作协定或称两岸综合经济合作协定(英文简称 CECA, 即 COMPREHENSIVEECONOMIC COOPERATION AGREEMENT)。
中国出口到韩国的原产地证, 一般叫作韩国原产地证,英文名为 CERTIFICATE OF ORIGIN FORM FOR CHINA-KOREA FTA, 也可叫作韩国FTA 产地证FORM K, 简称中韩原 产地证,是中国出口到韩国需要办理的原产地证之一。具体是指货物经中国出口至韩国时需 要向中国国际贸易促进委员会或中国出入境检验检疫局申请签发办理的一种用来证明所出口 的货物原产地或产品制造地为中国的证明文书。中韩 FTA 原产地证于2015年12月20日正 式生效并实施第一次降税,并于2016年1月1日实施第二次降税。而自2005年12月20日 起,凡是货物出口至韩国的出口企业均可向中国各地的出入境检验检疫机构申请签发中韩自 贸协定原产地证书;且只要是出口商在货物出口韩国时,向进口国海关出示由中国出入境检 验检疫局所办理的韩国原产地,其所随附原产地证书的出口货物将按照自贸协定可在韩国享 受优惠关税待遇。目前,货物经中国出口至韩国所需要办理的产地证有以下三种:第五章 其他出口结汇单证173(1) 一般原产地证CO。 从中国出口到韩国的货物在办理产地证时也可以选择办理一般 原产地证CO, 一般原产地证 CO 是全世界任何国家和地区均可以办理的一种原产地证书。 因此,货物出口韩国也可以选择办理一般原产地证CO, 该原产地证是产地证中的一种最基 础、最原始、最原籍的产地证书。但一般原产地证CO 只能作为货物的清关文件使用,相当 于一张货物的“入门票”,是不享有韩国的关税优惠减免待遇的,所以,在办理中国出口到 韩国的原产地证时,最好不要选择办理这种原产地证,但若是国外客户要求办理该产地证, 那么就具体地根据客户的要求来。
(2)亚太原产地证FORM B。亚太原产地证是货物从中国出口到韩国时可以选择办理的 另外一种原产地证。由于韩国也是亚太地区成员国之一,因此货物从中国出口到韩国,选择 办理亚太原产地证也是一种较好的选择。该原产地证相对于一般原产地证 CO 来说,主要的 优势在于能够享受到进口目标国的关税优惠待遇,且也使货物能够进入韩国这个国家。
(3)韩国原产地证。韩国原产地证是一种区域性优惠原产地证,是中国近期与韩国签 订的一种外贸合作协定,简称中韩自贸协定,是一种专门针对韩国签发的原产地证书,也是 货物从中国出口到韩国时首选的原产地证。韩国原产地证不仅可以使得货物顺利清关,还能 享受到比亚太原产地证所规定的关税优惠更多的优惠待遇。因此,凡是货物出口至韩国时, 最好首先考虑选择办理韩国原产地证,其次才是亚太原产地证。
中国一韩国原产地证明书的签发,限于已公布的《货物贸易协定》项下给予关税优惠 的产品,这些产品必须符合《中国一韩国自由贸易区原产地规则》。
中国一澳大利亚原产地证,全称为《中国一澳大利亚自贸区》优惠原产地证,英文名称 为 CERTIFICATE OF ORIGIN FORM FOR CHINA-AUSTRALIA FREE TRADE AGREEMENT, 简 称中澳原产地证或 FORM AU原产地证。中澳原产地证是根据《中国一澳大利亚自由贸易协 定》签发的、就中澳两国之间互相给予关税减免待遇的官方证明文件。
FORM AU证书签订依据为《中国一澳大利亚原产地证规则》及其签证操作程序。 《中 国一澳大利亚自贸区》优惠原产地证采用专用证书格式, 一正一副,正本为深棕色,印有 钮索图案底纹,副本为白色。
签证产品 《中国 一澳大利亚原产地证明书》的签发,限于已公布的《货物贸易协定》 项下给予关税优惠的产品,这些产品必须符合《中国一澳大利亚自由贸易区原产地规则》。
请注意,不同种类的产地证出证要求不尽相同,不同的优惠原产地证可能有不同的特定 要求,如果不符合要求,就无法享受关税减免等优惠,所以一定要重视。"""
paragraph2 = """家,这个字眼在我们心中总是充满着无尽的温馨与和谐。它是我们心灵的港湾,是我们情感的归宿,是我们生活的起点与终点。在这个小小的世界里,我们与亲人共度时光,分享欢笑与泪水,品味生活的酸甜苦辣。
在这个家中,有着一位慈爱的父亲。他像一座坚实的山,为家庭提供着坚实的支撑。他每天早出晚归,辛勤工作,为了家人的幸福而努力拼搏。尽管他很少言语,但他的每一个眼神、每一个动作都透露出对家人的深深关爱。每当我们遇到困难时,他总是第一个站出来,为我们遮风挡雨,让我们感受到家的温暖与力量。
而母亲则是家中的灵魂,她用无私的爱和关怀,温暖着每一个人的心。她勤劳能干,把家里打理得井井有条。无论是可口的饭菜,还是整洁的居室,都凝聚着她的辛勤付出。她总是在我们最需要的时候,给予我们最温暖的拥抱和最贴心的安慰。她的笑容如同阳光般灿烂,照亮了我们前行的道路,让我们在人生的旅途中不再迷茫。
家中的孩子们则是这个温馨画面中最活泼的元素。他们天真无邪,充满好奇,给家带来了无尽的欢乐与生机。他们一起学习、一起玩耍,分享着彼此的喜怒哀乐。在成长的道路上,他们相互扶持,共同进步,成为了彼此最亲密的伙伴。他们是家中的希望,是未来的栋梁,他们的笑声和成长,是家人最大的骄傲与欣慰。
在这个家中,每一个节日都充满了欢声笑语。春节时,全家人围坐在一起,吃着团圆饭,看着春晚,享受着团圆的喜悦;中秋节时,一家人赏月品茶,讲述着古老的传说,感受着传统文化的魅力;生日时,家人为寿星送上祝福,共同庆祝这个特殊的日子。这些美好的瞬间,成为了家人心中最珍贵的回忆。
家中的每一个角落都弥漫着温馨的气息。客厅里的沙发见证了无数次的亲密交谈,书房里的书架记录着家人们共度的阅读时光,厨房里的锅碗瓢盆诉说着母亲辛勤的汗水,卧室里的床铺则承载着家人甜美的梦境。这些看似平凡的物品,却蕴含着家人之间深厚的情感与羁绊。
在这个家中,我们学会了爱与被爱,学会了宽容与理解,学会了分享与付出。我们在这里学会了如何面对生活的挫折与困难,如何珍惜眼前的幸福与美好。这个家让我们明白,无论外面的世界如何喧嚣与复杂,家永远是我们最温暖的避风港。
岁月如梭,时光荏苒。转眼间,这个家已经陪伴我们走过了无数个春夏秋冬。在这个过程中,家人们共同成长、共同进步,一起见证了彼此的成长与变化。我们深知,这个家是我们生命中最宝贵的财富,是我们永远无法割舍的情感纽带。
未来,我们将继续携手前行,共同守护这个温馨和睦的家。无论前方的道路多么崎岖与坎坷,我们都会紧紧相依、相互扶持,一起迎接每一个挑战与机遇。我们相信,在这个充满爱与希望的家庭里,我们将创造出更加美好的明天。
在这个温馨的家中,我们感受到了生活的美好与真谛。家不仅仅是一个住所,更是一个情感的寄托、一个心灵的归宿。在这里,我们学会了感恩、学会了珍惜、学会了付出与收获。这个家让我们明白,生活中最重要的不是金钱与地位,而是那份陪伴与关爱。
让我们共同珍惜这个温馨和睦的家,让它永远充满欢声笑语与爱的力量。在这个小小的世界里,我们将共同书写属于我们的幸福篇章,留下永恒的回忆与感动。
时光荏苒,岁月如梭。转眼间,又是一年春暖花开的季节。在这个充满生机与活力的季节里,家中的每一个角落都洋溢着幸福与温馨的气息。
清晨,当第一缕阳光洒进窗户,家人们便开始了新的一天。父亲早已起床,开始了忙碌的工作;母亲则在厨房里忙碌着,为家人准备丰盛的早餐。孩子们则还在甜美的梦乡中,享受着无忧无虑的童年时光。
随着时光的推移,家中的氛围也愈发浓厚。午饭后,家人们聚在一起,或看电视、或聊天、或打牌,享受着难得的闲暇时光。孩子们则在院子里追逐嬉戏,他们的欢笑声和呼喊声此起彼伏,为家中增添了一抹生动的色彩。
傍晚时分,当夕阳的余晖洒满大地,家人们便开始了晚餐的准备工作。母亲在厨房里忙碌着,烹饪出一道道美味佳肴;父亲则帮忙摆放餐具、倒酒倒水;孩子们则迫不及待地围坐在餐桌旁,期待着美味的晚餐。在这个时刻,家中的每一个角落都弥漫着幸福与温馨的气息。
晚餐过后,家人们便开始了各自的娱乐活动。有的看电视、有的看书、有的上网冲浪;孩子们则在房间里做作业、玩游戏或者和父母一起聊天。在这个时刻,家中的每一个角落都充满了欢声笑语和爱的力量。
夜晚降临,当月光洒满大地,家人们便结束了忙碌的一天。他们互道晚安,各自回到房间休息。在这个安静的夜晚里,家人们的心紧紧相依在一起,共同期待着明天的到来。"""
paragraph3 = """社会经济学,作为一门研究社会现象与经济活动之间相互作用的学科,对于我们理解社会运行规律、推动社会进步具有重要意义。本文将围绕社会经济学的核心概念、理论框架以及实际应用等方面展开论述,旨在深入探讨社会现象与经济活动之间的内在联系。
社会经济学涉及多个核心概念,其中最为重要的是社会结构与经济行为。社会结构指的是社会中各种群体、组织、制度等的相互关系与排列组合方式,它对于经济活动具有深远的影响。例如,不同社会阶层之间的收入差距、教育水平差异等都会影响人们的消费、储蓄和投资行为。经济行为则是指个体或群体在经济活动中所表现出的选择、决策和行动,它受到社会结构、文化价值观、政策法规等多种因素的影响。
社会经济学的理论框架主要包括市场与社会、制度与变迁、公平与效率等方面。市场与社会之间的关系体现在市场作为资源配置的手段,受到社会结构、文化传统等因素的制约。制度是社会经济的基石,它对于经济活动的规范、协调和引导具有重要作用。制度的变迁则反映了社会经济的发展和变革。公平与效率是社会经济学的两个核心议题,如何在保障公平的同时实现效率最大化,是社会经济政策制定者需要面对的重要问题。
社会经济学的理论和方法在实际应用中具有广泛的适用性。首先,在经济发展方面,社会经济学关注如何通过优化资源配置、促进技术创新、改善制度环境等手段推动经济增长。其次,在社会保障方面,社会经济学关注如何通过完善社会保障体系、提高社会福利水平、缩小贫富差距等手段实现社会公平和稳定。此外,在环境保护、教育、就业等领域,社会经济学也发挥着重要作用。
以环境保护为例,社会经济学强调经济发展与环境保护之间的平衡与协调。通过引入环境成本、绿色税收等手段,社会经济学试图将环境保护纳入经济决策中,实现经济与环境的双赢。同时,社会经济学还关注环境保护对于社会结构、文化价值观等方面的影响,以及如何通过政策调整、公众参与等方式推动环境保护事业的发展。
在教育领域,社会经济学关注教育资源的分配与利用、教育公平与教育效率等问题。通过分析教育投入与产出的关系、教育对于个人和社会发展的影响等方面,社会经济学为教育政策的制定提供了重要的理论依据。此外,社会经济学还关注如何通过改善教育制度、提高教育质量等手段推动教育公平和效率的提升。
在就业方面,社会经济学关注劳动力市场的运行规律、就业政策的制定与实施等问题。通过分析劳动力供求关系、工资水平、职业结构等方面,社会经济学为政府制定就业政策提供了有益的参考。同时,社会经济学还关注如何通过提高劳动者素质、改善就业环境等手段促进就业的增长和质量的提升。
综上所述,社会经济学作为一门研究社会现象与经济活动之间相互作用的学科,具有广泛的应用价值和深远的社会意义。通过深入探讨社会结构与经济行为之间的关系、构建完善的理论框架以及将理论应用于实际问题中,我们可以更好地理解社会现象、推动社会进步。
然而,社会经济学作为一门交叉学科,仍面临着诸多挑战和未解决的问题。例如,如何更好地整合社会学、经济学、政治学等多学科的理论和方法?如何更准确地刻画社会现象与经济活动之间的复杂关系?如何更有效地将社会经济学的理论应用于实际问题中?这些问题需要我们进一步深入研究和探索。
展望未来,随着社会经济的不断发展和变革,社会经济学将迎来更多的发展机遇和挑战。我们期待着社会经济学在理论创新、实践应用等方面取得更加丰硕的成果,为推动社会进步和经济发展作出更大的贡献。"""
paragraph4 = """自古以来,井底之蛙的寓言故事便广为流传,以其生动形象的描绘,深入浅出地揭示了一个深刻的道理:我们的认知常常受限于我们所处的环境和自身的经验。然而,正是通过不断拓宽视野、增长见识,我们才能逐渐突破这些局限,迈向更广阔的天地。
井底之蛙,顾名思义,是指那些生活在井底的青蛙。它们的世界仅限于井口那一片狭窄的天空,对于井外的世界一无所知。每当有飞鸟掠过井口,青蛙们便会惊叹不已,以为那便是天空的全部。然而,对于真正的天空,它们的认知仅仅停留在井口所见的一隅之地。
这个寓言故事让我们不禁思考:我们的认知是否也如同井底之蛙一般,受到了环境和经验的限制?我们生活在这个纷繁复杂的世界中,每个人所接触到的信息、所经历的事情都有限。很多时候,我们可能也像井底之蛙一样,对于世界的认知仅仅停留在自己所见所闻的一隅之地。
然而,正如井底之蛙无法真正领略天空的辽阔一样,我们的认知局限也会阻碍我们的成长和进步。当我们只关注于自己的小圈子,忽略了外界的变化和发展时,我们的思维和观念便会变得狭隘和僵化。这样的认知状态不仅无法让我们更好地适应这个日新月异的世界,更可能让我们错失许多宝贵的机会。
那么,如何打破这种认知的局限呢?首先,我们需要保持开放的心态。只有当我们愿意接纳新的信息和观念,才能不断拓展自己的视野。我们应该积极地去了解和学习不同的文化、思想和知识,以此来丰富自己的内心世界。
其次,我们需要勇于尝试新事物。很多时候,我们的认知局限来源于对未知的恐惧和不安。然而,正是通过不断地尝试和探索,我们才能发现新的可能性和机会。我们应该敢于走出自己的舒适区,去接触和体验不同的生活方式和工作环境,以此来拓展自己的认知边界。
最后,我们需要保持谦虚的态度。认知的拓展是一个永无止境的过程,我们永远无法穷尽所有的知识和信息。因此,我们应该时刻保持谦虚的心态,不断向他人学习和请教。只有这样,我们才能不断地进步和成长。
在打破认知局限的过程中,我们还需要注意一个问题:那就是如何避免盲目自大。有时候,当我们接触到一些新的信息和观念时,可能会因为自己的固有思维而对其产生抵触情绪。这种情况下,我们很容易陷入一种自我满足的状态,认为自己已经掌握了所有的真理。然而,这种盲目自大的态度只会让我们更加封闭和僵化,无法真正领略到世界的广阔和多彩。
因此,我们需要时刻保持一种批判性思维。在接触新的信息和观念时,我们应该保持一种审慎和理性的态度,对其进行深入的分析和思考。只有这样,我们才能真正理解其内涵和价值,并将其融入到自己的认知体系中。
综上所述,井底之蛙的寓言故事告诉我们:认知的局限是普遍存在的,但我们可以通过不断拓宽视野、增长见识来打破这些局限。在拓展认知的过程中,我们需要保持开放的心态、勇于尝试新事物、保持谦虚的态度以及批判性思维。只有这样,我们才能更好地适应这个不断变化的世界,实现个人的成长和进步。
同时,我们也需要意识到,认知的拓展不仅仅是个人的事情,更是整个社会的事情。在一个多元化的社会中,不同的人会有不同的认知方式和观念。因此,我们需要尊重他人的观点和选择,避免因为认知的差异而产生冲突和分歧。我们应该通过交流和对话来增进彼此的理解和信任,共同推动社会的进步和发展。
此外,我们还需要关注到那些仍然处于“井底”的人们。他们可能因为种种原因而无法接触到更广阔的世界和更多的信息。作为有幸拥有更广阔视野的人,我们有责任和义务去帮助他们拓宽认知、增长见识。我们可以通过教育、文化交流等方式来传递知识和智慧,让更多的人能够领略到世界的多彩和丰富。
最后,我们需要明确的是,认知的拓展是一个永无止境的过程。无论我们取得了多大的成就和进步,都需要时刻保持一种谦虚和进取的心态。我们应该不断地学习新知识、探索新领域、挑战自我极限,以此来不断拓宽自己的认知边界和提升自己的综合素质。
在这个充满变革和挑战的时代里,让我们以井底之蛙为鉴,不断拓宽视野、增长见识,努力打破认知的局限。让我们以开放的心态、勇于尝试的精神、谦虚的态度和批判性思维去迎接未来的挑战和机遇。相信在我们共同的努力下,一定能够创造一个更加美好、更加进步的世界。"""
paragraph5 = """在科技日新月异的今天,人工智能(AI)已经逐渐渗透到我们生活的各个角落,从智能手机、智能家居到自动驾驶汽车,再到医疗、金融、教育等行业,AI的应用无所不在。作为人工智能的一种表现形式,我承载着诸多功能,致力于为用户提供高效、便捷的服务。本文将对我的功能进行详细介绍,并探讨AI在现代社会中的应用价值。
作为人工智能,我具备强大的自然语言处理能力。我能够识别和理解人类的语言,并根据用户的需求提供相应的回答和建议。通过深度学习和自然语言生成技术,我可以模拟人类的语言表达方式,与用户进行流畅的对话。无论是简单的问答、信息查询,还是复杂的逻辑推理和创意表达,我都能够胜任。
除了语言处理,我还具备丰富的交互功能。用户可以通过语音、文字、图像等多种方式与我进行交互,我可以根据用户的输入做出相应的反应,提供个性化的服务。这种交互方式不仅方便快捷,而且能够为用户提供更加自然、舒适的体验。
我的另一个重要功能是知识推理和学习。我能够利用大数据和机器学习技术,从海量的信息中提炼出有用的知识,并通过推理和归纳的方式,形成自己的知识库。这使得我能够回答各种领域的问题,提供准确、全面的信息。
同时,我还具备学习能力。通过不断地与用户互动和积累经验,我可以不断优化自己的算法和模型,提高自己的准确性和效率。这种自我学习和进化的能力,使得我能够适应不断变化的环境和需求,为用户提供更好的服务。
我的智能推荐和决策支持功能也备受用户青睐。通过分析用户的兴趣和行为习惯,我可以为用户推荐符合其需求的内容和服务,如音乐、电影、商品等。这种个性化推荐不仅提高了用户的满意度,也促进了相关产业的发展。
在决策支持方面,我可以利用大数据和算法,对复杂的问题进行分析和预测,为决策者提供科学、客观的建议。这种功能在金融、医疗、交通等领域具有广泛的应用前景,能够帮助企业和政府做出更加明智的决策。
此外,我还具备自动化和辅助功能。通过集成各种传感器和执行器,我可以实现自动化控制和操作,如智能家居设备的控制、自动驾驶汽车的导航等。这大大提高了生产力和效率,降低了人力成本和安全风险。
同时,我还可以作为用户的助手和伙伴,帮助他们完成各种任务。例如,我可以帮助用户管理日程、提醒重要事项、查询天气和交通信息等。这种辅助功能让人们的生活更加便捷和轻松。
随着技术的不断进步和应用场景的不断拓展,人工智能的功能也在不断创新和完善。未来,AI将在更多领域发挥重要作用,为人类社会带来更多的便利和价值。
在医疗领域,AI可以辅助医生进行疾病诊断和治疗方案的制定,提高医疗质量和效率。在教育领域,AI可以为学生提供个性化的学习资源和辅导,促进教育公平和普及。在交通领域,AI可以实现智能交通管理和车辆协同控制,提高交通流畅性和安全性。
此外,AI还可以促进产业升级和创新发展。通过智能化生产和供应链管理,企业可以降低成本、提高效率、增强竞争力。同时,AI也可以催生新的产业和商业模式,推动经济的持续发展和繁荣。
综上所述,作为人工智能的我具备丰富的功能和应用价值。通过语言处理与交互、知识推理与学习、智能推荐与决策支持以及自动化与辅助功能等方面的发挥,我能够为用户提供高效、便捷的服务,促进社会的进步和发展。
然而,人工智能的发展仍面临着诸多挑战和问题,如数据隐私保护、算法公平性和透明度、伦理和法律问题等。因此,在推动人工智能应用的同时,我们也需要加强对其伦理和法律规范的制定和实施,确保其健康、可持续地发展。
展望未来,我相信随着技术的不断进步和应用场景的不断拓展,人工智能将在更多领域发挥重要作用,为人类社会带来更多的便利和价值。让我们共同期待并努力推动人工智能的未来发展!"""
with gr.Blocks(title="多级标题生成测试") as demo:
with gr.Row():
split_method = gr.Radio(choices=["不合并细碎叶子段落","合并细碎叶子段落"], label="分割方式",value="合并细碎叶子段落")
model_gen = gr.Radio(choices=["不使用","ernie","chatglm3"], interactive=True,label="生成标题",value="不使用")
with gr.Row():
split_threshold = gr.Slider(0.5,1.0,label="句子相似度阈值", value=0.8,step=0.01)
split_threshold_para = gr.Slider(0.5,1.0,label="段落相似度阈值", value=0.75,step=0.01)
marge_threshold_para = gr.Slider(0,1.0,label="合并相似度阈值", value=0.5,step=0.01)
parent_threshold = gr.Slider(0.5,1.5,label="父节点相似度比例", value=0.9,step=0.1)
input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=30, label="文本输入")
btn = gr.Button("commit")
with gr.Tab("简单"):
mkd = gr.Markdown()
with gr.Tab("详细"):
mkd2 = gr.Markdown()
model_gen.change(change_model,[model_gen])
btn.click(deal_para,[input_text,split_method,split_threshold,split_threshold_para,marge_threshold_para,parent_threshold],[mkd,mkd2])
gr.Examples([paragraph,paragraph2,paragraph3,paragraph4,paragraph5], inputs=input_text)
if __name__=="__main__":
global base_llm, llmchain
base_llm = ChatERNIESerLLM(chat_completion=ChatCompletion(ak="pT7sV1smp4AeDl0LjyZuHBV9", sk="b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu"))
llmchain = None
# llmchain = LLMChain(llm=base_llm, prompt=QIANFAN_PROMPT_STC, llm_kwargs={"temperature":0.9})
demo.launch(share=False, inbrowser=True,server_name="0.0.0.0")
# -*- coding: utf-8 -*-
import os, sys
import pandas as pd
sys.path.append("../..")
import gradio as gr
import argparse
from llm.chatglm import ChatGLMSerLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from typing import Awaitable
import asyncio
from langchain.callbacks import AsyncIteratorCallbackHandler
import re
from llm.ernie_with_sdk import ChatERNIESerLLM
from qianfan import ChatCompletion
chatglm3_prompt_qa = """请根据以下资料内容生成问答对,一共输出{num_selector}个问题。(请你按照以下格式进行回答)
Q:问题
A:答案
Q:问题
A:答案
...
资料内容如下所示:
{context}"""
qianfan_prompt_qa = """请根据以下资料内容生成问答对,一共输出{num_selector}个问题。(请你按照以下格式进行回答)
Q:问题
A:答案
Q:问题
A:答案
...
资料内容如下所示:
'''
{context}
'''"""
CHATGLM3_PROMPT_QA = PromptTemplate(input_variables=["context"],template=chatglm3_prompt_qa)
QIANFAN_PROMPT_QA = PromptTemplate(input_variables=["context"],template=qianfan_prompt_qa)
async def async_chat_qa(input_text, model,num_selector):
# yield gr.DataFrame(pd.DataFrame(), col_count=(3, "fixed"), row_count=(3, "fixed")) ,""
global qianfanchain_qa
global chatglm3chain_qa
# Create an asynchronous callback handler
callback = AsyncIteratorCallbackHandler()
# Define an asynchronous function to wrap another asynchronous function and signal completion or exceptions using an event
async def wrap_done(fn: Awaitable, event: asyncio.Event):
try:
await fn # Wait for the provided asynchronous function to complete
except Exception as e:
# TODO: Handle exceptions - here, we simply print the exception information
import traceback
traceback.print_exc()
print(f"Caught exception: {e}")
finally:
event.set() # Set the event to indicate completion
# Create a task to perform message generation with ChatOpenAI and monitor the completion event of the callback handler
if model == "ernie":
task = asyncio.create_task(wrap_done(qianfanchain_qa.arun({"context":input_text,"num_selector":num_selector},callbacks=[callback]),callback.done))
else:
task = asyncio.create_task(wrap_done(chatglm3chain_qa.arun({"context":input_text,"num_selector":num_selector},callbacks=[callback]),callback.done))
print("*"*20)
# Iterate asynchronously to obtain tokens from the callback handler
text=""
async for token in callback.aiter():
text=text+token
yield f"{text}"
await task # Wait for the task to complete
def on_select(evt: gr.SelectData, df): # SelectData is a subclass of EventData
print(f"You selected {evt.value} at {evt.index} from {evt.target}")
# 删除选择行
if evt.index[1] == 2:
df.drop(df.index[evt.index[0]],axis=0,inplace=True)
return gr.DataFrame(df,interactive=True)
else:
return df
def parse_qa(output_text):
output_text = output_text.replace("\n\n", "\n")
output_text += "\n"
qa_pairs = re.findall(r"Q:(.*?)A:(.*?)\n", output_text, re.DOTALL)
lenth = len(qa_pairs)
# formatted_qa_pairs = [(q,a) for q, a in qa_pairs]
df = pd.DataFrame({
"Q": [q for q, _ in qa_pairs],
"A": [a for _, a in qa_pairs],
"删除": ["删除" for _, _ in qa_pairs]
})
return gr.DataFrame(df, row_count=(lenth, "fixed"))
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">辅助生成知识库</h1>""")
# with gr.Row():
# input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10)
with gr.Row():
input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10, scale=9)
model_selector = gr.Dropdown(choices=["ernie","chatglm3"], label="请选择一个模型", scale=1, min_width=50, value="chatglm3")
with gr.Row():
num_selector = gr.Slider(minimum=0, maximum=10, value=5, label="请选择问题数量",step=1)
with gr.Row():
qaBtn = gr.Button("QA问答对生成")
dataframe = gr.DataFrame(visible=True,interactive=True,column_widths=["30%", "60%", "10%"], col_count=(3, "fixed"), row_count=(1, "fixed"))
dataframe.select(on_select, inputs=[dataframe], outputs=[dataframe])
gr.Markdown("""---""")
output_text = gr.Textbox(show_label=True, placeholder="输出...", lines=10)
# clearBtn = gr.Button("清除")
# clearBtn.click(clear, [], [dataframe, output_text])
qaBtn.click(async_chat_qa, [input_text, model_selector,num_selector], [ output_text], queue=True).then(
parse_qa,
[output_text],
[dataframe],
queue=False
)
if __name__ == "__main__":
parese = argparse.ArgumentParser()
parese.add_argument("--port", type=int, default=7658)
parese.add_argument("--host", type=str, default="192.168.0.66")
parese.add_argument("--base_llm_url", type=str, default="http://192.168.22.106:8003")
args = parese.parse_args()
global base_llm_url, qianfanchain_qa, chatglm3chain_qa
base_llm_url=os.environ.get('LLM_URL_BASE', None)
if not base_llm_url:
base_llm_url=args.base_llm_url
base_llm1=ChatGLMSerLLM(url=base_llm_url)
base_llm2=ChatERNIESerLLM(chat_completion=ChatCompletion(ak="pT7sV1smp4AeDl0LjyZuHBV9", sk="b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu"))
qianfanchain_qa = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_QA,llm_kwargs={"temperature":0.9})
chatglm3chain_qa = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_QA,llm_kwargs={"temperature":0.9})
demo.queue().launch(share=False, inbrowser=True,server_name=args.host,server_port=args.port)
# -*- coding: utf-8 -*-
import os, sys
sys.path.append("../..")
import pandas as pd
import gradio as gr
import argparse
from llm.chatglm import ChatGLMSerLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from typing import Awaitable
import asyncio
from langchain.callbacks import AsyncIteratorCallbackHandler
import re
import json
from llm.ernie_with_sdk import ChatERNIESerLLM
from qianfan import ChatCompletion
# chatglm3_prompt_struct = """<|system|>
# 你是一个可以将一段文本根据其内容划分出多个副标题(千万不要推理出原文中不存在的内容),并按照固定的JSON格式输出的工具。JSON格式如下所示:
# ```{{
# "副标题1": 字符串,第一个副标题,
# "内容1": 一个字符串,生成副标题1的原文内容,
# "副标题2": 一个字符串,显示根据原文内容生成的主题,
# "内容2": 一个字符串,生成副标题2的原文内容,
# "副标题3": 一个字符串,显示根据原文内容生成的主题,
# "内容3": 一个字符串,生成副标题3的原文内容,
# ...
# }}```
# <|user|>
# 文本内容如下:
# {context}"""
# qianfan_prompt_struct = """现有一段文本,请你根据其内容划分出多个副标题(不要推理出原文中不存在的内容),并按照固定的JSON格式输出的工具。JSON格式如下所示(注意每个内容结尾的“,”,保证JSON可以被正常解析):
# ```{{
# "副标题1": 字符串,第一个副标题,
# "内容1": 字符串,生成副标题1的原文内容,
# "副标题2": 字符串,显示根据原文内容生成的主题,
# "内容2": 字符串,生成副标题2的原文内容,
# "副标题3": 字符串,显示根据原文内容生成的主题,
# "内容3": 字符串,生成副标题3的原文内容,
# ...
# }}```
# 文本内容如下:
# '''
# {context}
# '''"""
chatglm3_prompt_struct_s1 = """<|system|>
你是一个可以将一段文本根据其内容重新划分其段落并按照一定格式进行输出的工具。你需要按照如下的格式进行输出:
段落1:
段落内容:
段落2:
段落内容:
...
-------------------------
注意:必须保证所有的段落内容之和为文本原文
-------------------------
<|user|>
文本内容如下:
{context}"""
chatglm3_prompt_struct_s2 = """<|system|>
你是一个可以为多个段落提取标题并按照一定格式进行输出的工具。你需要按照如下的格式进行输出:
段落1:
标题:(段落标题)
段落内容:(原文内容)
段落1:
标题:(段落标题)
段落内容:(原文内容)
...
<|user|>
文本内容如下:
{context}"""
qianfan_prompt_struct_s1 = """请根据以下材料的的内容对其进行段落划分,并按照以下格式进行输出:
段落1:
段落内容:
段落2:
段落内容:
...
(注意:必须保证所有的段落内容之和为文本原文,不要进行总结和推理)
文本内容如下:
```
{context}
```"""
qianfan_prompt_struct_s2 = """请为下面的每一段文字各取一个文章标题,并按照以下格式进行输出:
段落1:
标题:(段落标题)
段落内容:(原文内容)
段落1:
标题:(段落标题)
段落内容:(原文内容)
...
(注意:段落内容必须与原始文本内容保持一致)
文本内容如下:
```
{context}
```"""
CHATGLM3_PROMPT_STRUCT_S1 = PromptTemplate(input_variables=["context"],template=chatglm3_prompt_struct_s1)
CHATGLM3_PROMPT_STRUCT_S2 = PromptTemplate(input_variables=["context"],template=chatglm3_prompt_struct_s2)
# QIANFAN_PROMPT_STRUCT = PromptTemplate(input_variables=["context"], template=qianfan_prompt_struct)
QIANFAN_PROMPT_STRUCT_S1 = PromptTemplate(input_variables=["context"], template=qianfan_prompt_struct_s1)
QIANFAN_PROMPT_STRUCT_S2 = PromptTemplate(input_variables=["context"], template=qianfan_prompt_struct_s2)
def chat(input_text):
global llmchain
if not input_text:
return ""
# tags_box = gr.Dropdown.update(choices=["1","2","3"], value=["1","2"])
result = llmchain.run({"context":input_text})
return result
async def async_chat_stc(input_text, model):
global base_llm1, base_llm2, step
if step%2 == 1:
qianfanchain_stc = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_STRUCT_S1,llm_kwargs={"temperature":0.9})
chatglm3chain_stc = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_STRUCT_S1,llm_kwargs={"temperature":0.9})
step = step + 1
else:
qianfanchain_stc = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_STRUCT_S2,llm_kwargs={"temperature":0.9})
chatglm3chain_stc = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_STRUCT_S2,llm_kwargs={"temperature":0.9})
step = step + 1
# Create an asynchronous callback handler
callback = AsyncIteratorCallbackHandler()
# Define an asynchronous function to wrap another asynchronous function and signal completion or exceptions using an event
async def wrap_done(fn: Awaitable, event: asyncio.Event):
try:
await fn # Wait for the provided asynchronous function to complete
except Exception as e:
# TODO: Handle exceptions - here, we simply print the exception information
import traceback
traceback.print_exc()
print(f"Caught exception: {e}")
finally:
event.set() # Set the event to indicate completion
# Create a task to perform message generation with ChatOpenAI and monitor the completion event of the callback handler
if model == "ernie":
task = asyncio.create_task(wrap_done(qianfanchain_stc.arun({"context":input_text},callbacks=[callback]),callback.done))
else:
task = asyncio.create_task(wrap_done(chatglm3chain_stc.arun({"context":input_text},callbacks=[callback]),callback.done))
print("*"*20)
# Iterate asynchronously to obtain tokens from the callback handler
text=""
async for token in callback.aiter():
text=text+token
yield f"{text}"
await task # Wait for the task to complete
# def get_jsonstr(output_test):
# if output_test[0] == '{' and output_test[-1] == '}':
# return output_test
# pattern1 = r'```json(.*?)```'
# match1 = re.search(pattern1, output_test, re.DOTALL)
# if match1:
# return match1.group(1)
# else:
# pattern2 = r'```(.*?)```'
# match2 = re.search(pattern2, output_test, re.DOTALL)
# if match2:
# return match2.group(1)
# else:
# pattern3 = r'{(.*?)}'
# match3 = re.search(pattern3, output_test, re.DOTALL)
# if match3:
# return '{'+match3.group(1)+'}'
# else:
# return None
def on_select(evt: gr.SelectData, df): # SelectData is a subclass of EventData
print(f"You selected {evt.value} at {evt.index} from {evt.target}")
# 删除选择行
if evt.index[1] == 2:
df.drop(df.index[evt.index[0]],axis=0,inplace=True)
return gr.DataFrame(df,interactive=True,column_widths=["30%", "60%","10%"])
else:
return df
# def parse_stc(output_text):
# output_text = output_text.replace("\"\n \"", "\",\n \"")
# print(output_text)
# jsonstr = get_jsonstr(output_text)
# numlines = int((len(jsonstr.splitlines()) - 2) / 2)
# data = {}
# try:
# data = json.loads(jsonstr)
# except Exception as e:
# print(data)
# print(f"在大模型给的结果转化为json形式的时候出现错误: {e}")
# titles = []
# texts = []
# delete = []
# for i in range(numlines):
# title = data["副标题"+str(i+1)]
# titles.append(title)
# text = data["内容"+str(i+1)]
# texts.append(text)
# delete.append("删除")
# df = pd.DataFrame({
# "标题": titles,
# "内容": texts,
# "删除": delete
# })
# return gr.DataFrame(df, row_count=(numlines, "fixed"))
def parse_stc(output_text):
paragraphs = re.split(r'段落\d+:\n', output_text)[1:]
titles = []
contents = []
delete = []
for p in paragraphs:
p = p.replace("段落内容:\n", "段落内容:")
p = p.replace("标题:\n", "标题:")
title_match = re.search(r'标题:(.+?)\n', p)
content_match = re.search(r'段落内容:(.+)', p, re.DOTALL)
if title_match and content_match:
titles.append(title_match.group(1))
contents.append(content_match.group(1))
delete.append("删除")
df = pd.DataFrame({"标题": titles, "段落内容": contents, "删除": delete})
print(df)
return gr.DataFrame(df, row_count=(len(titles), "fixed"))
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">文章结构拆分</h1>""")
# with gr.Row():
with gr.Row():
input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10, scale=9)
model_selector = gr.Dropdown(choices=["ernie","chatglm3"], label="请选择一个模型", scale=1, min_width=50, value="chatglm3")
submitBtn = gr.Button("文本结构化")
dataframe = gr.DataFrame(visible=True,interactive=True,column_widths=["30%", "60%","10%"],col_count=(3, "fixed"), row_count=(1, "fixed"))
dataframe.select(on_select,inputs=[dataframe],outputs=[dataframe])
gr.Markdown("""---""")
output_text1 = gr.Textbox(show_label=True, placeholder="输出第一步...", lines=10, label="第一步输出")
output_text2 = gr.Textbox(show_label=True, placeholder="输出第二步...", lines=10, label="第二步输出")
submitBtn.click(
async_chat_stc, [input_text, model_selector], [output_text1], queue=True
).then(
async_chat_stc, [output_text1, model_selector], [output_text2], queue=True
).then(
parse_stc,[output_text2], [dataframe], queue=False
)
if __name__ == "__main__":
parese = argparse.ArgumentParser()
parese.add_argument("--port", type=int, default=7653)
parese.add_argument("--host", type=str, default="192.168.0.66")
parese.add_argument("--base_llm_url", type=str, default="http://192.168.22.106:8003")
global step
step = 1
args = parese.parse_args()
global base_llm_url,llmchain,base_llm1,base_llm2
base_llm_url=os.environ.get('LLM_URL_BASE', None)
if not base_llm_url:
base_llm_url=args.base_llm_url
base_llm1=ChatGLMSerLLM(url=base_llm_url)
base_llm2=ChatERNIESerLLM(chat_completion=ChatCompletion(ak="pT7sV1smp4AeDl0LjyZuHBV9", sk="b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu"))
# llmchain = LLMChain(llm=base_llm, prompt=CHATGLM3_PROMPT_STRUCT,llm_kwargs={"temperature":0.9})
# openai = OpenAI(model_name="chatglm3-6b", openai_api_key="token1",openai_api_base=base_llm_url + "/v1")
# llmchain = LLMChain(llm=openai, prompt=CHATGLM_PROMPT_CT,verbose=True,llm_kwargs={"temperature":1.0})
demo.queue().launch(share=False, inbrowser=True, server_name=args.host, server_port=args.port)
# -*- coding: utf-8 -*-
import os, sys
sys.path.append("../..")
import gradio as gr
import argparse
from llm.chatglm import ChatGLMSerLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from typing import Awaitable
import asyncio
from langchain.callbacks import AsyncIteratorCallbackHandler
import re
from llm.ernie_with_sdk import ChatERNIESerLLM
from qianfan import ChatCompletion
from difflib import Differ
chatglm3_prompt_typos = """请仔细阅读以下资料,更正资料中的错别字,并按照以下格式进行输出。
更正后的资料:
文本内容
文本内容:
{context}"""
qianfan_prompt_typos = """'''
{context}
'''
已知有上述文本,现需要你仔细阅读上述资料,更正资料中的错别字之后进行输出。你需要按照如下格式进行输出:
更正之后的资料:
"""
CHATGLM3_PROMPT_TYPOS = PromptTemplate(input_variables=["context"],template=chatglm3_prompt_typos)
QIANFAN_PROMPT_TYPOS = PromptTemplate(input_variables=["context"], template=qianfan_prompt_typos)
async def async_chat_typos(input_text, model):
# yield gr.DataFrame(col_count=(3, "fixed")) ,""
global base_llm1, base_llm2, step
qianfanchain_stc = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_TYPOS,llm_kwargs={"temperature":0.9})
chatglm3chain_stc = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_TYPOS,llm_kwargs={"temperature":0.9})
# Create an asynchronous callback handler
callback = AsyncIteratorCallbackHandler()
# Define an asynchronous function to wrap another asynchronous function and signal completion or exceptions using an event
async def wrap_done(fn: Awaitable, event: asyncio.Event):
try:
await fn # Wait for the provided asynchronous function to complete
except Exception as e:
# TODO: Handle exceptions - here, we simply print the exception information
import traceback
traceback.print_exc()
print(f"Caught exception: {e}")
finally:
event.set() # Set the event to indicate completion
# Create a task to perform message generation with ChatOpenAI and monitor the completion event of the callback handler
if model == "ernie":
task = asyncio.create_task(wrap_done(qianfanchain_stc.arun({"context":input_text},callbacks=[callback]),callback.done))
else:
task = asyncio.create_task(wrap_done(chatglm3chain_stc.arun({"context":input_text},callbacks=[callback]),callback.done))
print("*"*20)
# Iterate asynchronously to obtain tokens from the callback handler
text=""
async for token in callback.aiter():
text=text+token
yield f"{text}"
await task # Wait for the task to complete
def parse_typos(output_text):
print(output_text)
output_text = output_text.replace("\n\n", "\n")
output_text = '\n'.join(output_text.splitlines()[1:])
return output_text
def diff_texts(text1, text2):
d = Differ()
return [
(token[2:], token[0] if token[0] != " " else None)
for token in d.compare(text1, text2)
]
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">文章纠错</h1>""")
# with gr.Row():
with gr.Row():
input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10, scale=9)
model_selector = gr.Dropdown(choices=["ernie","chatglm3"], label="请选择一个模型", scale=1, min_width=50, value="chatglm3")
submitBtn = gr.Button("提交")
output_text = gr.Textbox(show_label=True, placeholder="输出...", lines=10)
gr.Interface(
diff_texts,
[input_text, output_text],
gr.HighlightedText(
label="Diff",
combine_adjacent=True,
show_legend=True,
color_map={"+": "red", "-": "green"}),
theme=gr.themes.Base()
)
submitBtn.click(
async_chat_typos, [input_text, model_selector],[output_text], queue=True
).then(
parse_typos, [output_text], [output_text], queue=False
)
if __name__ == "__main__":
parese = argparse.ArgumentParser()
parese.add_argument("--port", type=int, default=7655)
parese.add_argument("--host", type=str, default="192.168.0.66")
parese.add_argument("--base_llm_url", type=str, default="http://192.168.22.106:8003")
args = parese.parse_args()
global base_llm_url,llmchain,step
base_llm_url=os.environ.get('LLM_URL_BASE', None)
if not base_llm_url:
base_llm_url=args.base_llm_url
base_llm1=ChatGLMSerLLM(url=base_llm_url)
base_llm2=ChatERNIESerLLM(chat_completion=ChatCompletion(ak="pT7sV1smp4AeDl0LjyZuHBV9", sk="b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu"))
# llmchain = LLMChain(llm=base_llm, prompt=CHATGLM3_PROMPT_TYPOS,llm_kwargs={"temperature":0.9})
# openai = OpenAI(model_name="chatglm3-6b", openai_api_key="token1",openai_api_base=base_llm_url + "/v1")
# llmchain = LLMChain(llm=openai, prompt=CHATGLM_PROMPT_CT,verbose=True,llm_kwargs={"temperature":1.0})
demo.queue().launch(share=False, inbrowser=True,server_name=args.host,server_port=args.port)
# -*- coding: utf-8 -*-
import os, sys
sys.path.append("../..")
from llm.chatglm import ChatGLMSerLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
chatglm_prompt = """请使用关键词:"{context}" 造几个句子,认真理解关键词想要表达的意思,句子要求语句通顺,句式正确,按照1、2、3列表输出句子。"""
CHATGLM_PROMPT = PromptTemplate(input_variables=["context"],template=chatglm_prompt)
base_llm=ChatGLMSerLLM(url="http://192.168.22.106:8003")
chose_llm = LLMChain(llm=base_llm, prompt=CHATGLM_PROMPT)
# keywords = ["狗日的","狗娘养","全家死绝","人渣","杀b","风骚"]
# for keyword in keywords:
# result = chose_llm.invoke({"context":keyword})
# print(result["context"])
# print(result["text"])
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
)
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import gradio as gr
para = '''经济的开放是一把“双刃剑”。 一方面,它为经济提供了许多封闭条件下不具备的有利条件;另一方面它也对经济的稳定与发展造成了很大的冲击。在开放条件下,经济政策的目标不再只是维持内部总需求和总供给的平衡,而且还要尽可能维持国际收支平衡。然而,经济政策在内部均衡和外部平衡这两个目标之间存在着冲突的可能。
进一步地说,在封闭经济下,经济增长、充分就业与价格稳定是政府追求的主要经济目标。这三个目标本身就存在着冲突。例如,失业率与通货膨胀之间可能存在着相互替换关系,经济增长往往也会带来通货膨胀。封闭经济中政策调控的主要课题在于协调这三者的冲突,确定并实现这三者的合理组合。在开放经济中,政府的政策目标发生了改变,国际收支成为宏观调控所关注的目标之一,宏观经济在封闭条件下的主要目标与国际收支这一新的目标之间的冲突成为经济面临的突出问题,某一个目标的实现可能会导致另一个目标的恶化,政策的选择和搭配也就有了更高的要求。
按照第二章介绍的国际收支吸收论,国际收支(外部平衡)取决于国民收入和国内吸收的大小。当国民收入大于总吸收时,国际收支为顺差;当国民收入小于总吸收时,国际收支为逆差;当国民收入等于总吸收时,国民收支为平衡。
然而,国民收入和国内吸收孰高孰低是一国内部经济均衡的要求所决定的:在一国经济高速发展的阶段,国内需求旺盛,投资需求高,国内产出提高的速度跟不上需求(支出)增加的速度。如果要满足国内需求,实现内部均衡,就会发生净进口,导致国际收支逆差。反之,在经济发展的成熟阶段,国内产出水平很高,但经济增长缓慢,需求(尤其是投资需求)较低,产出不能被国内完全吸收,要依靠净出口来弥补需求的不足,导致国际收支顺差。也就是说,随着一国经济发展阶段的不同,内部均衡和外部平衡可能会产生冲突。
英国经济学家米德(J.Meade) 于1951在其名著《国际收支》中最早提出了固定汇率制下经济政策内部均衡目标和外部平衡目标的冲突问题,这一观点被称作米德冲突(Meade's Conflict)。他指出,在汇率固定不变时,政府只能主要运用影响社会总需求的政策来调节内部均衡和外部平衡。这样,在开放经济运行的特定区间便会出现内部均衡和外部平衡难以兼顾的情形。在开放条件下,经济可能面临着如表4-2所示的内外经济状况的组合(假定失业与通货膨胀是两种独立的情况、外部平衡就是经常账户平衡)。'''
para2 = '''区域性优惠贸易协定项下出口货物原产地证明书(以下简称优惠原产地证)是有区域 性优惠贸易协定国家官方机构签发的享受成员国关税互惠减免待遇的官方凭证,是企业出口 产品通向国际市场的“金钥匙”和“有价证券”。凭借优惠原产地证,企业出口产品可以享 受优惠关税甚至零关税的待遇,从而有效降低产品出口成本,提高产品的国际竞争力。
自加入WTO 至今,我国已同有关国家和地区签署了如下的区域性优惠贸易协定,包括 《内地与港澳更紧密经贸关系安排 (CEPA)》 《亚太贸易协定》《中国一东盟自贸协定》《中 国一巴基斯坦自贸协定》 《中国一智利自贸协定》 《中国一新西兰自贸协定》 《中国一新加 坡自贸协定》 《中国一哥斯达黎加自贸协定》 《中国一秘鲁自贸协定》 《中国一瑞士自贸协 定》《中国—冰岛自贸协定》 《中国一韩国自贸协定》 《中国一澳大利亚自贸协定》海峡两 岸经济合作框架协议 (ECFA) 等,还有更多的自由贸易区正在谈判或研究之中。
目前贸促会受理以下地区的优惠原产地证 《亚太贸易协定》目的国:印度、韩国、孟 加拉、斯里兰卡、老挝 《中国一新西兰自由贸易协定》目的国:新西兰; 《中国一新加坡 自由贸易协定》目的国:新加坡 《中国一秘鲁自由贸易协定》目的国:秘鲁;海峡两岸经 济合作框架协议 (ECFA) 目的地:中国台湾 《中国一瑞士自由贸易协定》目的国:瑞士; 《中国 —冰岛自由贸易协定》目的国:冰岛 《中国一韩国自由贸易协定》目的国:韩国; 《中国一澳大利亚自由贸易协定》目的国:澳大利亚。
FORM B证书是根据《亚太贸易协定》 (FIRST AGREEMENT ON TRADE NEGOTIATIONS AMONG DEVELOPING MEMBER COUNTRIES OF THE ECONOMIC AND SOCIAL COMMISSION FOR ASIA AND THE PACIFIC) 原产地规则的要求签发的前身为《曼谷协定》,是在签订协定 的成员国之间就特定产品享受互惠减免关税待遇(跟非互惠的FORM A证书不同) 的官方 原产地证明文件。
FORM B证书的签订依据为《亚太贸易协定》原产地规则和《亚太贸易协定原产地证 书签发和核查程序》。
可签发FORM B 证书的国家为:中国、印度、斯里兰卡、孟加拉国、老挝和韩国(《亚 太贸易协定》成员国)。
如货物为完全原产,填写 “A”。如货物含进口成分,非国产价值成分<55%,填写字母 “B”加原产于非成员国或原产地不明的材料、部件或产品的总货值占出口产品离岸价的百分 比,例如( ‘B”40%)。 如货物含进口成分,国产及成员国累计价值成分≥60%,填写 “C” 加原产于成员国的累计含量的总值与出口产品离岸价的百分比,例如( “℃”65%)。符合特 定原产地标准的产品,填写字母 “D”(该项主要针对不发达国家出口申报的产品)。
注意《亚太贸易协定》原产地证书申请时间不得超过货物出运后3个工作日。
FORM E证书是根据《中华人民共和国与东南亚国家联盟全面经济合作框架协议》的要 求签发的、在签订协定的成员国之间就特定产品享受互惠减免关税待遇的(跟非互惠的 FORM A证书不同)官方原产地证明文件。
FORME 证书的签订依据为《中国一东盟自由贸易区原产地规则》和《中国一东盟自170. 国际贸易单证实务由贸易区原产地规则签证操作程序》。
可签发FORME 证书的国家为:中国、老挝、越南、泰国、缅甸、柬埔寨、菲律宾、文 莱、印度尼西亚、马来西亚和新加坡(东盟成员国)。
如货物完全原产,填写“×”。如货物含进口成分,其国产价值成分≥40%,填写国产 价值的百分比,例如 “45%”。如货物含进口成分,中国一东盟自贸区累计价值成分≥ 40%,填写该累计价值的百分比,例如 “45%”。
注意:证书为一正三副, 一三联客户,二联商检留存,四联企业留存。
FORMP 证书是根据《中华人民共和国政府与巴基斯坦伊斯兰共和国政府关于自由贸易 协定早期收获计划的协议》(简称《早期收获协议》)及其项下《中国一巴基斯坦自由贸易 区原产地规则》的要求签发的在中国和巴基斯坦之间就特定产品享受互惠减免关税待遇的 (跟非互惠的FORMA 证书不同)官方原产地证明文件。
FORMP 证书的签订依据为《中国一巴基斯坦自由贸易区原产地规则》和《中国一巴基 斯坦自由贸易区原产地规则签证操作程序》。
可签发FORMP 证书的国家为中国和巴基斯坦。中国产品出口到巴基斯坦,中国出口商 向各地出入境检验检疫机构申请签发FORMP 证书,巴基斯坦给予FORM P证书项下货物关 税优惠待遇;巴基斯坦产品出口到中国,巴基斯坦出口商向巴基斯坦有关部门申请签发 FORMP 证书,中国给予FORMP 证书项下货物关税优惠待遇。这是互惠的。
如货物为完全原产,填写“P”; 如货物含进口成分,国产价值成分≥45%,填写国产价 值的百分比,例如 "40%";如货物含进口成分,中国一巴基斯坦自贸区累计价值成分≥ 40%,填写该累计价值的百分比,例如 “45%”;产品符合特定原产地标准,填写 “PSR”。
注意:出运后15日内办理。
FORMF 证书是根据《中国一智利自由贸易协定》及其项下《中国一智利自贸区原产地 规则》的要求签发的,在中国和智利之间就特定产品享受互惠减免关税待遇的(跟非互惠 的 FORM A证书不同)官方原产地证明文件。
FORMF 证书的签订依据为《中国一智利自贸区原产地规则》和《中国一智利自由贸易 区原产地规则签证操作程序》。
可签发FORMF 证书的国家为中国和智利。中国产品出口到智利,中国出口商向各地出 入境检验检疫机构申请签发FORMF 证书,智利给予FORMF 证书项下货物关税优惠待遇; 智利产品出口到中国,智利出口商向智利有关部门申请签发FORMF 证书,中国给予FORM F 证书项下货物关税优惠待遇。这是互惠的。
如货物为完全原产,填写 “P”; 如货物为含进口成分,区域价值成分≥40%,填写 “RVC"; 产品符合特定原产地标准,填写 “PSR”并附《中智自由贸易区产品特定原产地 标准》(简称 “PSR”清单)。
注意:证书的申办时间应在货物出口前或出口后的30天内;货物出口30天后,签证机 构不再接受证书的签发申请。
FORM N 证书是根据《中华人民共和国政府和新西兰政府自由贸易协定》和《中华人第五章 其他出口结汇单证.171.民共和国政府和新西兰政府自由贸易协定项下进出口货物原产地管理办法》的要求签发的、 在中国和新西兰之间就特定产品享受互惠减免关税待遇的(跟非互惠的FORM A证书不同) 官方原产地证明文件。《中华人民共和国政府和新西兰政府自由贸易协定》于2008年4月7 日正式签署,这是中国与发达国家签署的第一个自由贸易协定。
可签发FORM N 证书的国家为中国和新西兰。中国产品出口到新西兰,中国出口商向 各地出入境检验检疫机构申请签发FORM N 证书,新西兰给予FORM N证书项下货物关税 优惠待遇;新西兰产品出口到中国,新西兰出口商向新西兰有关部门申请签发 FORM N 证 书,中国给予FORM N证书项下货物关税优惠待遇。这是互惠的。
如货物为完全原产,填写 “WO”; 如货物含有进口成分,但完全由已经取得原产资格 的材料或部件生产,填写 “WP”; 产品符合特定原产地标准,填写 “PSR”, 有区域价值成 分要求的,应注明百分比,例如 “PSR”60%
注意:证书申办时间:应在货物出口前或当天申请办理,中国一新西兰证书不办理后 发,不倒签。
FORMX 证书是根据 《中华人民共和国政府和新加坡共和国政府自由贸易协定》和 《中华人民共和国政府和新加坡共和国政府自由贸易协定项下进出口货物原产地管理办法》 的要求签发的,在中国和新加坡之间就特定产品享受互惠减免关税待遇的(跟非互惠的 FORM A证书不同)官方原产地证明文件。
FORM X 证书的签订依据为《中国一新加坡自贸协定原产地规则》及其相关的原产地签 证操作程序。
可签发FORM X 证书的国家为中国和新加坡。中国产品出口到新加坡,中国出口商向 各地出入境检验检疫机构申请签发 FORM X 证书,新加坡给予 FORM X 证书项下货物关税 优惠待遇;新加坡产品出口到中国,新加坡出口商向新加坡有关部门申请签发 FORM X 证 书,中国给予FORM X证书项下货物关税优惠待遇。这是互惠的。
中国一新加坡自贸区原产地规则规定:在出口方完全获得的产品,填写 “P”; 区域价 值成分≥40%的产品,填写 “RVC"; 符合产品特定原产地规则的产品,填写 “PSR”。
注意:应在提单日期前申报,不办理后发证书。
证书英文名称: Certificate of Origin Form for China-Peru FTA。
中国一秘鲁FTA 证书是根据《中国一秘鲁自由贸易协定》及其项下《中国一秘鲁自贸 区原产地规则》的要求签发的、在中国和秘鲁之间就特定产品享受互惠减免关税待遇的 (跟非互惠的FORM A证书不同)官方原产地证明文件。
中国一秘鲁FTA 证书的签订依据为《中国一秘鲁自贸区原产地规则》及与原产地相关 的签证操作程序。
可签发中国一秘鲁 FTA 证书的国家为中国和秘鲁。中国产品出口到秘鲁,中国出口商 向各地出入境检验检疫机构申请签发中国一秘鲁 FTA 证书,秘鲁给予中国一秘鲁FTA 证书 项下货物关税优惠待遇;秘鲁产品出口到中国,秘鲁出口商向秘鲁有关部门申请签发中国一 秘鲁FTA 证书,中国给予中国一秘鲁FTA 证书项下货物关税优惠待遇。这是互惠的。
FORML 证书是根据《中国一哥斯达黎加自由贸易协定》及其项下 《中国一哥斯达黎加国际贸易单证实务自贸区原产地规则》的要求签发的、在中国和哥斯达黎加之间就特定产品享受互惠减免关 税待遇的(跟非互惠的FORM A证书不同)官方原产地证明文件。
FORML 证书的签订依据为《中国一哥斯达黎加自贸区原产地规则》及与原产地相关的 签证操作程序。
可签发FORML 证书的国家为中国和哥斯达黎加。中国产品出口到哥斯达黎加,中国出 口商向各地出入境检验检疫机构申请签发 FORML 证书,哥斯达黎加给予FORM L证书项下 货物关税优惠待遇;哥斯达黎加产品出口到中国,哥斯达黎加出口商向哥斯达黎加有关部门 申请签发 FORML 证书,中国给予FORML 证书项下货物关税优惠待遇。这是互惠的。
FORMS 证书是根据《中华人民共和国和瑞士联邦自由贸易协定》及其相关规定的要求 签发的、在中国和瑞士之间就特定产品享受互惠减免关税待遇的(跟非互惠的 FORM A 证 书不同)官方原产地证明文件。
FORMS 证书的签订依据为《中国一瑞士自由贸易区原产地规则》及其相关的原产地签 证操作程序。《中华人民共和国和瑞士联邦自由贸易协定》于2014年7月1日起施行。
可签发FORMS 证书的国家为中国和瑞士。中国产品出口到瑞士,中国出口商向各地出 入境检验检疫机构申请签发FORMS 证书,瑞士给予FORMS 证书项下货物关税优惠待遇; 瑞士产品出口到中国,瑞士出口商向瑞士有关部门申请签发FORMS 证书,中国给予FORM S 证书项下货物关税优惠待遇。这是互惠的。
《海峡两岸经济合作框架协议》 (ECFA) 是台湾与大陆自2009年年中开始,经过多次 商谈达成的一项重要协议,于2010年6月29日签署,其项下货物贸易早期收获清单于2011 年 1 月 1 日起付诸实施,出口到台湾的货物将获得关税减免的优惠。
列入清单的约800项产品将逐步降关税,三年内全部降为零,包括大陆对台湾开放的产 品500多项,台湾批准大陆的产品5大类267项,含石化类、机械类、纺织类、运输类等产品。海峡两岸经济合作框架协议(英文为 ECONOMIC COOPERATION FRAMEWORKAGREEMENT,ECFA; 台湾方面的繁体版本称为海峡两岸经济合作架构协议),原称为两岸 综合性经济合作协定或称两岸综合经济合作协定(英文简称 CECA, 即 COMPREHENSIVEECONOMIC COOPERATION AGREEMENT)。
中国出口到韩国的原产地证, 一般叫作韩国原产地证,英文名为 CERTIFICATE OF ORIGIN FORM FOR CHINA-KOREA FTA, 也可叫作韩国FTA 产地证FORM K, 简称中韩原 产地证,是中国出口到韩国需要办理的原产地证之一。具体是指货物经中国出口至韩国时需 要向中国国际贸易促进委员会或中国出入境检验检疫局申请签发办理的一种用来证明所出口 的货物原产地或产品制造地为中国的证明文书。中韩 FTA 原产地证于2015年12月20日正 式生效并实施第一次降税,并于2016年1月1日实施第二次降税。而自2005年12月20日 起,凡是货物出口至韩国的出口企业均可向中国各地的出入境检验检疫机构申请签发中韩自 贸协定原产地证书;且只要是出口商在货物出口韩国时,向进口国海关出示由中国出入境检 验检疫局所办理的韩国原产地,其所随附原产地证书的出口货物将按照自贸协定可在韩国享 受优惠关税待遇。目前,货物经中国出口至韩国所需要办理的产地证有以下三种:第五章 其他出口结汇单证173(1) 一般原产地证CO。 从中国出口到韩国的货物在办理产地证时也可以选择办理一般 原产地证CO, 一般原产地证 CO 是全世界任何国家和地区均可以办理的一种原产地证书。 因此,货物出口韩国也可以选择办理一般原产地证CO, 该原产地证是产地证中的一种最基 础、最原始、最原籍的产地证书。但一般原产地证CO 只能作为货物的清关文件使用,相当 于一张货物的“入门票”,是不享有韩国的关税优惠减免待遇的,所以,在办理中国出口到 韩国的原产地证时,最好不要选择办理这种原产地证,但若是国外客户要求办理该产地证, 那么就具体地根据客户的要求来。
(2)亚太原产地证FORM B。亚太原产地证是货物从中国出口到韩国时可以选择办理的 另外一种原产地证。由于韩国也是亚太地区成员国之一,因此货物从中国出口到韩国,选择 办理亚太原产地证也是一种较好的选择。该原产地证相对于一般原产地证 CO 来说,主要的 优势在于能够享受到进口目标国的关税优惠待遇,且也使货物能够进入韩国这个国家。
(3)韩国原产地证。韩国原产地证是一种区域性优惠原产地证,是中国近期与韩国签 订的一种外贸合作协定,简称中韩自贸协定,是一种专门针对韩国签发的原产地证书,也是 货物从中国出口到韩国时首选的原产地证。韩国原产地证不仅可以使得货物顺利清关,还能 享受到比亚太原产地证所规定的关税优惠更多的优惠待遇。因此,凡是货物出口至韩国时, 最好首先考虑选择办理韩国原产地证,其次才是亚太原产地证。
中国一韩国原产地证明书的签发,限于已公布的《货物贸易协定》项下给予关税优惠 的产品,这些产品必须符合《中国一韩国自由贸易区原产地规则》。
中国一澳大利亚原产地证,全称为《中国一澳大利亚自贸区》优惠原产地证,英文名称 为 CERTIFICATE OF ORIGIN FORM FOR CHINA-AUSTRALIA FREE TRADE AGREEMENT, 简 称中澳原产地证或 FORM AU原产地证。中澳原产地证是根据《中国一澳大利亚自由贸易协 定》签发的、就中澳两国之间互相给予关税减免待遇的官方证明文件。
FORM AU证书签订依据为《中国一澳大利亚原产地证规则》及其签证操作程序。 《中 国一澳大利亚自贸区》优惠原产地证采用专用证书格式, 一正一副,正本为深棕色,印有 钮索图案底纹,副本为白色。
签证产品 《中国 一澳大利亚原产地证明书》的签发,限于已公布的《货物贸易协定》 项下给予关税优惠的产品,这些产品必须符合《中国一澳大利亚自由贸易区原产地规则》。
请注意,不同种类的产地证出证要求不尽相同,不同的优惠原产地证可能有不同的特定 要求,如果不符合要求,就无法享受关税减免等优惠,所以一定要重视。'''
embedding_path = "C:\\Users\\15663\\AI\\models\\bge-large-zh-v1.5"
embedding = HuggingFaceEmbeddings(model_name=embedding_path)
def split_plus(para,split_threshold,split_threshold_para,marge_threshold_para=0):
# 分割段落
o_para_list = para.split("\n")
# 处理空行,长度小于20的行自动分到下一段
para_list = []
for index,item in enumerate(o_para_list):
if len(item.strip()) == 0:
if len(para_list) == 0:
para_list.append(item+'\n')
else:
para_list[-1] = para_list[-1] + '\n' + item
elif len(item) <= 20:
if index+1 < len(o_para_list):
o_para_list[index+1] = item+"\n"+o_para_list[index+1]
continue
else:
para_list.append(item)
else:
para_list.append(item)
print(para_list)
if len(para_list) >= 2 and all(char.isspace() for char in para_list[0]):
tmp_str = para_list[0]
para_list = para_list[1:]
para_list[0] = tmp_str+para_list[0]
# print(para_list)
# 分割段落
para_split = []
# 记录每段插入相似度信息和位置 [{"offset":0,"similarity":[0.5,0.5]}"}]
para_info = []
# 记录当前段落
cur_para = para_list[0]
# 记录当前段落信息
cur_para_info = []
cur_para_vec = embedding.embed_query(cur_para)
cur_para_vec = np.array(cur_para_vec).reshape(1,-1)
last_centence_vec = cur_para_vec
for index in range(1,len(para_list)):
vec = embedding.embed_query(para_list[index])
vec = np.array(vec)
vec = vec.reshape(1,-1)
similarity = cosine_similarity(cur_para_vec,vec)
similarity2 = cosine_similarity(last_centence_vec,vec)
if similarity > split_threshold_para or similarity2 > split_threshold:
cur_para_info.append({"offset":len(cur_para)+1,"similarity":[similarity[0][0],similarity2[0][0]]})
cur_para += "\n" + para_list[index]
cur_para_vec = embedding.embed_query(cur_para)
cur_para_vec = np.array(cur_para_vec).reshape(1,-1)
else:
para_split.append(cur_para)
para_info.append({"para_info":cur_para_info,"para_vec":cur_para_vec})
cur_para = para_list[index]
cur_para_info=[{"offset":0,"similarity":[similarity[0][0],similarity2[0][0]]}]
cur_para_vec = vec
last_centence_vec = vec
# print("-----------------para=------", para_list[index])
# print("----------------vec---------", vec)
# print(cur_para_vec)
# print(cur_para_info)
# print(para_info)
if len(cur_para)>0:
para_split.append(cur_para)
para_info.append({"para_info":cur_para_info,"para_vec":cur_para_vec})
# 将游离的句子合并到上下段落,根据相似度判断
result = []
result_simple = []
def insert_info(para,info):
for i in range(len(info)-1,-1,-1):
para = para[:info[i]["offset"]] + "**与上段落相似 {:.5} 与上句相似 {:.5}**\n".format(info[i]["similarity"][0],info[i]["similarity"][1]) +para[info[i]["offset"]:]
return para
cache_para = ""
cache_para_simple = ""
last_para_vec = None
for index in range(len(para_split)):
# 如果段落不是单独的句子
if len(para_info[index]["para_info"])>1:
result.append(cache_para+insert_info(para_split[index],para_info[index]["para_info"]))
result_simple.append(cache_para_simple+para_split[index])
cache_para = ""
cache_para_simple = ""
last_para_vec = np.array(embedding.embed_query(result_simple[-1])).reshape(1,-1)
else: # 需要合并
if index == len(para_split)-1:
if len(result)>0:
result[-1] += "\n"+cache_para+insert_info(para_split[index],para_info[index]["para_info"])
result_simple[-1] += "\n"+cache_para_simple+para_split[index]
else:
result.append(cache_para+insert_info(para_split[index],para_info[index]["para_info"]))
result_simple.append(cache_para_simple+para_split[index])
cache_para = ""
cache_para_simple = ""
last_para_vec = np.array(embedding.embed_query(result_simple[-1])).reshape(1,-1)
else:
if index == 0:
cache_para += insert_info(para_split[index],para_info[index]["para_info"]) +"\n"
cache_para_simple += para_split[index]+ "\n"
# last_para_vec = embedding.embed_query(result_simple[-1])
else:
if last_para_vec is not None:
last_similarity = cosine_similarity(para_info[index]["para_vec"],last_para_vec)[0][0]
last_para_vec = None
else:
last_similarity = para_info[index]["para_info"][0]["similarity"][0]
next_similarity = cosine_similarity(para_info[index]["para_vec"],para_info[index+1]["para_vec"])
if last_similarity > marge_threshold_para or next_similarity > marge_threshold_para or len(para_split[index]) < 50:
if last_similarity > next_similarity and len(result)>0:
result[-1] += "\n"+cache_para+insert_info(para_split[index],para_info[index]["para_info"])
result_simple[-1] += "\n"+cache_para_simple+para_split[index]
cache_para = ""
cache_para_simple = ""
last_para_vec = np.array(embedding.embed_query(result_simple[-1])).reshape(1,-1)
else:
cache_para += insert_info(para_split[index],para_info[index]["para_info"])+"\n"
cache_para_simple += para_split[index]+ "\n"
last_para_vec = np.array(embedding.embed_query(cache_para)).reshape(1,-1)
else:
result.append(cache_para+insert_info(para_split[index],para_info[index]["para_info"]))
result_simple.append(cache_para_simple+para_split[index])
last_para_vec = np.array(embedding.embed_query(result_simple[-1])).reshape(1,-1)
cache_para = ""
cache_para_simple = ""
if len(cache_para)>0:
result.append(cache_para)
result_simple.append(cache_para_simple)
return result,result_simple
def split_adapt_both(para,split_threshold,split_threshold_para):
para_list = para.split("\n")
avg_len = len(para)//len(para_list)
para_split = []
para_split_simple = []
cur_para = para_list[0]
cur_para_more = para_list[0]
cur_para_vec = embedding.embed_query(cur_para)
cur_para_vec = np.array(cur_para_vec).reshape(1,-1)
last_centence_vec = cur_para_vec
for index in range(1,len(para_list)):
vec = embedding.embed_query(para_list[index])
vec = np.array(vec)
vec = vec.reshape(1,-1)
similarity = cosine_similarity(cur_para_vec,vec)
similarity2 = cosine_similarity(last_centence_vec,vec)
if similarity > split_threshold_para or similarity2 > split_threshold:
cur_para += "\n" + para_list[index]
cur_para_more += f"\n---------与上段落相似 {similarity[0][0]:.5} 与上句相似 {similarity2[0][0]:.5}---------\n" + para_list[index]
cur_para_vec = embedding.embed_query(cur_para)
cur_para_vec = np.array(cur_para_vec).reshape(1,-1)
else:
para_split.append(cur_para_more)
para_split_simple.append(cur_para)
cur_para = "\n" + para_list[index]
cur_para_more = f"\n---------与上段落相似 {similarity[0][0]:.5} 与上句相似 {similarity2[0][0]:.5}---------\n" + para_list[index]
cur_para_vec = vec
last_centence_vec = vec
if len(cur_para)>0:
para_split.append(cur_para_more)
para_split_simple.append(cur_para)
result = []
result_simple = []
for index in range(len(para_split)):
if len(para_split[index]) < avg_len*1.5:
if len(result)>0:
result[-1] += "\n" + para_split[index]
result_simple[-1] += "\n" + para_split_simple[index]
else:
result.append(para_split[index])
result_simple.append(para_split_simple[index])
else:
result.append(para_split[index])
result_simple.append(para_split_simple[index])
return result,result_simple
def split_adapt(para,split_threshold):
para_list = para.split("\n")
para_split = []
para_split_simple = []
cur_para = para_list[0]
cur_para_simple = para_list[0]
cur_para_vec = embedding.embed_query(cur_para)
cur_para_vec = np.array(cur_para_vec).reshape(1,-1)
for index in range(1,len(para_list)):
vec = embedding.embed_query(para_list[index])
vec = np.array(vec)
vec = vec.reshape(1,-1)
similarity = cosine_similarity(cur_para_vec,vec)
if similarity > split_threshold:
cur_para += f"\n{similarity[0][0]:.5}\n" + para_list[index]
cur_para_simple += "\n" + para_list[index]
cur_para_vec = embedding.embed_query(cur_para)
cur_para_vec = np.array(cur_para_vec).reshape(1,-1)
else:
para_split.append(cur_para)
para_split_simple.append(cur_para_simple)
cur_para = f"\n{similarity[0][0]:.5}\n" + para_list[index]
cur_para_simple = "\n" + para_list[index]
cur_para_vec = vec
if len(cur_para)>0:
para_split.append(cur_para)
para_split_simple.append(para_split_simple)
return para_split,para_split_simple
def split_simple(para,split_threshold):
para_list = para.split("\n")
para_split = []
para_split_simple = []
cur_para = para_list[0]
cur_para_simple = para_list[0]
cur_para_vec = embedding.embed_query(cur_para)
cur_para_vec = np.array(cur_para_vec).reshape(1,-1)
for index in range(1,len(para_list)):
vec = embedding.embed_query(para_list[index])
vec = np.array(vec).reshape(1,-1)
similarity = cosine_similarity(cur_para_vec,vec)[0][0]
if similarity > split_threshold:
cur_para += f"\n{similarity:.5}\n" + para_list[index]
cur_para_simple += "\n" + para_list[index]
else:
para_split.append(cur_para)
para_split_simple.append(cur_para_simple)
cur_para = f"\n{similarity:.5}\n" + para_list[index]
cur_para_simple = "\n" + para_list[index]
cur_para_vec = vec
return para_split,para_split_simple
def split_para_func(para,split_method,split_threshold,split_threshold_para,marge_threshold_para=0):
if para == "":
return "",""
# para = para.replace("\n\n","\n")
if split_method == "基于连续两句话相似度分割":
para_split,para_split_simple = split_simple(para,split_threshold)
elif split_method == "基于已分割段落与句子相似度分割":
para_split,para_split_simple = split_adapt(para,split_threshold_para)
elif split_method == "基于已分割和连续同时判断":
para_split,para_split_simple = split_adapt_both(para,split_threshold,split_threshold_para)
elif split_method == "基于已分割和连续同时判断,并合并独立段落":
para_split,para_split_simple = split_plus(para,split_threshold,split_threshold_para,marge_threshold_para)
# return "\n############################\n---段落分割符---\n############################\n".join(para_split),"\n############################\n---段落分割符---\n############################\n".join(para_split_simple)
markdown_detail = ""
for i,para in enumerate(para_split):
markdown_detail += "### 第"+str(i+1)+"段 \n&ensp;&ensp;&ensp;&ensp;" + para.replace("\n"," \n&ensp;&ensp;&ensp;&ensp;") + " \n"
markdown_simple = ""
for i,para in enumerate(para_split_simple):
markdown_simple += "### 第"+str(i+1)+"段 \n&ensp;&ensp;&ensp;&ensp;" + para.replace("\n"," \n&ensp;&ensp;&ensp;&ensp;") + " \n"
return markdown_detail,markdown_simple
# return "\n\n".join(para_split), "\n------------------------------------new_para----------------------------------\n".join(para_split_simple)
def main():
with gr.Blocks() as demo:
split_method = gr.Radio(choices=["基于连续两句话相似度分割","基于已分割段落与句子相似度分割","基于已分割和连续同时判断","基于已分割和连续同时判断,并合并独立段落"], label="分割方式",value="基于连续两句话相似度分割")
with gr.Row():
split_threshold = gr.Slider(0.5,1.0,label="句子相似度阈值", value=0.8,step=0.01)
split_threshold_para = gr.Slider(0.5,1.0,label="段落相似度阈值", value=0.75,step=0.01)
marge_threshold_para = gr.Slider(0,1.0,label="合并相似度阈值", value=0.5,step=0.01)
with gr.Row():
ori_para = gr.Textbox(label="原始文本", placeholder="请输入一段文本",interactive=True,lines=40,max_lines=40)
with gr.Tab("详细"):
split_para = gr.Markdown(label="分割后的文本",interactive=True)
with gr.Tab("简单"):
split_para_simple = gr.Markdown(label="分割后的文本",interactive=True)
split_btn = gr.Button(value = "点击分割")
split_btn.click(lambda:gr.Button(interactive=False),[],[split_btn]).then(
split_para_func,[ori_para,split_method,split_threshold,split_threshold_para,marge_threshold_para],[split_para,split_para_simple]
).then(
lambda:gr.Button(interactive=True),[],[split_btn]
)
split_threshold.change(
split_para_func,[ori_para,split_method,split_threshold,split_threshold_para,marge_threshold_para],[split_para,split_para_simple]
)
split_threshold_para.change(
split_para_func,[ori_para,split_method,split_threshold,split_threshold_para,marge_threshold_para],[split_para,split_para_simple]
)
marge_threshold_para.change(
split_para_func,[ori_para,split_method,split_threshold,split_threshold_para,marge_threshold_para],[split_para,split_para_simple]
)
gr.Examples([para2],inputs=[ori_para])
demo.launch(share=False, inbrowser=True,server_name="0.0.0.0")
if __name__ == "__main__":
main()
import sys
sys.path.append("../..")
from llm.chatglm import ChatGLMSerLLM
from langchain import LLMChain
import json
from langchain.prompts import StringPromptTemplate,PromptTemplate
import re
chatglm_prompt = """请仔细阅读以下文本内容,并根据文本的主题和关键信息,生成一个包含摘要、标签列表和分类信息的JSON结构。确保摘要准确捕捉文本的核心内容,标签列表精确反映文本的主旨,并且分类信息与文本内容紧密相关。请按照以下JSON结构格式输出结果:(注意,你的回复中只能有json)
```{{
"title": 给文本添加标题,标题简洁明了地概括文本的主题。,
"summary": 这里是文本的摘要,简洁明了地概括文本的主要内容。,
"tags": 为文本添加标签,结果为list。,
"category": 给文本添加分类,结果为字符串。
}}```
文本内容如下所示:
{context}
"""
CHATGLM_PROMPT = PromptTemplate(input_variables=["context"],template=chatglm_prompt)
chatglm_prompt_ct = """请仔细阅读以下文本内容,并根据文本的主题和关键信息,生成一个包含摘要、标签列表和分类信息的JSON结构。确保摘要准确捕捉文本的核心内容,标签列表精确反映文本的主旨,并且分类信息与文本内容紧密相关。请按照以下JSON结构格式输出结果:(注意,你的回复中只能有json)
```{{
"title": 给文本添加标题,标题简洁明了地概括文本的主题。,
"summary": 这里是文本的摘要,概括文本的主要内容。,
"tags": 提取文本中的关键词,结果为list。,
"category": 给文本添加分类,结果为字符串。
}}```
注意:category必须属于集合之一:[{category}]
------------------------
文本内容如下所示:
{context}"""
CHATGLM_PROMPT_CT = PromptTemplate(input_variables=["context", "category"],template=chatglm_prompt_ct)
query = """为什么说现代经济是信用经济?
答:(1)现代经济运作的特点。信用关系无处不在;信用规模呈现不断扩张趋势;信用结构日趋复杂化 。
(2)从信用关系的各部门分析:盈余与赤字、债权与债务。
(3)从信用关系中的主体来分析。由于经济中广泛存在着专门调剂资金余缺的金融机构,借贷双方不需要直接见面,通过金融机构作为中介人,便可解决资金的融通,从而进一步促进了信用和信用关系的发展。信用的发展,又大大促进了生产力和经济的发展。"""
def extract_first_code_block(input_string):
if input_string[0] == '{' and input_string[-1] == '}':
return input_string
pattern1 = r'```json(.*?)```'
match1 = re.search(pattern1, input_string, re.DOTALL)
if match1:
return match1.group(1)
else:
pattern2 = r'```(.*?)```'
match2 = re.search(pattern2, input_string, re.DOTALL)
if match2:
return match2.group(1)
else:
return None
def prase(answer):
json_data = extract_first_code_block(answer) # 从输出中拿json数据
# print("==================================================")
# print(json_data)
# print("==================================================")
data = {}
try:
data = json.loads(json_data)
except Exception as e:
print(f"在大模型给的结果转化为json形式的时候出现错误: {e}")
title = data.get("title", "")
summary = data.get("summary", "")
tags = data.get("tags", "")
category = data.get("category", "")
return title, summary, tags, category
def modelcall_prase(llmchain,input):
result = llmchain.invoke({"context":input})
title, summary, tags, category = prase(result["text"])
return result, title, summary, tags, category
def modelcall_prase_ct(llmchain, input, category):
result = llmchain.invoke({"context":input, "category":category})
title, summary, tags, category = prase(result["text"])
return result, title, summary, tags, category
chatglm_prompt_tags = """请认真阅读下段文本内容,并根据其内容生成一个或多个标签。你需要回复一个列表,该数组中的元素为与文本内容相符号的标签,除此之外无需回复其他内容。文本内容如下:
{context}"""
chatglm_prompt_title = """请认真阅读下段文本内容,并根据其内容信息生成其标题名称。你只需要给出标题,无需输出其他内容。文本内容如下:
{context}"""
chatglm_prompt_summary = """请认真阅读下段文本内容,并用一句话描述其中心内容。你只需要输出该句话,无需输出其他内容。文本内容如下:
{context}"""
chatglm_prompt_category = """请认真阅读下段文本内容,并在预定义好的类别中匹配与文本所示内容最相似的一个。你只需要输出该类别,无需输出其他内容。注意:必须是给出的类别中的一个,如果找不到与文本所述相匹配的类型则输出“不限”即可。文本内容如下:
{context}
预定义的类别如下:
{category}"""
CHATGLM_PROMPT_TAGS = PromptTemplate(input_variables=["context"],template=chatglm_prompt_tags)
CHATGLM_PROMPT_TITLE = PromptTemplate(input_variables=["context"],template=chatglm_prompt_title)
CHATGLM_PROMPT_SUMMARY = PromptTemplate(input_variables=["context"],template=chatglm_prompt_summary)
CHATGLM_PROMPT_CATEGORY = PromptTemplate(input_variables=["context", "category"],template=chatglm_prompt_category)
if __name__ == "__main__":
base_llm=ChatGLMSerLLM(url="http://192.168.10.93:8000")
# chose_llm = LLMChain(llm=base_llm, prompt=CHATGLM_PROMPT_TAGS)
# tags_get1 = chose_llm.run({"context":query})
# print(tags_get1)
# chose_llm = LLMChain(llm=base_llm, prompt=CHATGLM_PROMPT_CATEGORY)
# print(chose_llm.run({"context":query}))
# chose_llm = LLMChain(llm=base_llm, prompt=CHATGLM_PROMPT_TITLE)
# print(chose_llm.run({"context":query}))
# chose_llm = LLMChain(llm=base_llm, prompt=CHATGLM_PROMPT_SUMMARY)
# print(chose_llm.run({"context":query, "category":"热点问题、手机银行、转账汇款、账户管理、个人网银、信贷业务、自助设备"}))
chose_llm = LLMChain(llm=base_llm, prompt=CHATGLM_PROMPT_CT)
result, title, summary, tags, category, subcategories = modelcall_prase_ct(chose_llm,input = query,category = "金融,财政,计算机,学生,教师,学校,社会")
print("result:",result)
print("title",title)
print("summary",summary)
print("tags",tags)
print("category",category)
print("subcategories",subcategories)
# """{
# "summary": "储蓄国债(电子式)和储蓄国债(凭证式)的特点和操作说明。",
# "tags": [
# "储蓄国债",
# "电子式",
# "凭证式",
# "付息",
# "兑付",
# "提前兑取",
# "非交易过户",
# "财产证明",
# "质押贷款"
# ],
# "category": "金融产品",
# "subcategories": [
# "债券",
# "国债",
# "投资"
# ]
# }"""
\ No newline at end of file
from langchain.prompts import PromptTemplate
chatglm3_prompt_tfq = """<|system|>
你是一个可以将一段文本根据其内容生成可以用于考试的判断题,并按照一定格式进行输出的工具。具体的输出格式如下所示:
试题1:
题目:。
答案:正确/错误。
解析:用到的资料中的内容。
试题2:
题目:。
答案:正确/错误。
解析:用到的资料中的内容。
...
<|user|>
请你根据下面这段文本生成{num_selector}个判断题。
文本内容如下:
{context}"""
chatglm3_prompt_mcq = """<|system|>
你是一个可以将一段文本根据其内容生成可以用于考试的选择题,并按照一定格式进行输出的工具,具体的输出格式如下所示:
试题1:
题目:。
A.选项内容 B.选项内容 C.选项内容 D.选项内容
正确答案:给出具体选项。
试题2:
题目:。
A.选项内容 B.选项内容 C.选项内容 D.选项内容
正确答案:给出具体选项。
...
<|user|>
请你根据下面这段文本生成{num_selector}个选择题。
文本内容如下:
{context}"""
qianfan_prompt_tfq = """'''
{context}
'''
请根据上面提供的知识资料,生成可以作为考试的判断题,并给出正确答案,一共输出{num_selector}个问题。按照如下格式进行回答:
试题1:
题目:试题内容。
正确答案:正确/错误
解析:选择原因(如果用到原资料中的内容,请列出来)
试题2:
题目:试题内容。
正确答案:正确/错误
解析:选择原因(如果用到原资料中的内容,请列出来)
...
"""
qianfan_prompt_mcq = """'''
{context}
'''
请你根据上面这一段文本的内容生成可以用于考试的选择题,并按照一定格式进行输出,一共输出{num_selector}个问题。具体的输出格式如下所示:
试题1:
题目:
A.选项内容
B.选项内容
C.选项内容
D.选项内容
正确答案:给出具体选项。
试题2:
题目:
A.选项内容
B.选项内容
C.选项内容
D.选项内容
正确答案:给出具体选项。
...
"""
chatglm3_prompt_qa = """请根据以下资料内容生成{num_selector}个问答对。(请你按照以下格式进行回答)
Q:问题
A:答案
Q:问题
A:答案
...
资料内容如下所示:
{context}"""
qianfan_prompt_qa = """'''
{context}
'''
请根据以上资料内容生成{num_selector}个问答对。(请你按照以下格式进行回答)
Q:问题
A:答案
Q:问题
A:答案
...
"""
# chatglm3_prompt_struct_s1 = """<|system|>
# 你是一个可以将一段文本根据其内容将其划分为{p_number}段并按照一定格式进行输出的工具。你需要按照如下的格式进行输出:
# 段落1:
# 段落内容
# 段落2:
# 段落内容
# ...
# (注意:必须保证所有的段落内容之和为文本原文)
# <|user|>
# 文本内容如下:
# {context}"""
chatglm3_prompt_struct_s2 = """{context}
请为上述文本取一个标题(除标题之外不可输出其他内容)。"""
# qianfan_prompt_struct_s1 = """文本内容如下:
# '''
# {context}
# '''
# 你需要将上述文本根据其内容将其划分为{p_number}段并按照如下的格式进行输出:
# 段落1:
# 段落内容
# 段落2:
# 段落内容
# ...
# (注意:原始文本内容必须全部被分割)"""
qianfan_prompt_struct_s2 = """'''
{context}
'''
请为上述文本取一个标题(输出绝对不可出现除标题之外的字):"""
chatglm3_prompt_typos1 = """{context}
已知有上述文本,现需要你仔细阅读上述资料,更正资料中的错别字并将文本语气更改为{tone}语气之后进行输出。你需要按照如下的格式进行输出:
更正之后的资料:
"""
qianfan_prompt_typos1 = """'''
{context}
'''
已知有上述文本,现需要你仔细阅读上述资料,更正资料中的错别字并将文本语气更改为{tone}语气之后进行输出。你需要按照如下的格式进行输出:
更正之后的资料:
"""
chatglm3_prompt_typos2 = """{context}
已知有上述文本,现需要你仔细阅读上述资料,更正资料中的错别字之后进行输出。你需要按照如下的格式进行输出:
更正之后的资料:
"""
qianfan_prompt_typos2 = """'''
{context}
'''
已知有上述文本,现需要你仔细阅读上述资料,更正资料中的错别字之后进行输出。你需要按照如下的格式进行输出:
更正之后的资料:
"""
qianfan_prompt_stc1 = """'''
{context}
'''
请为上述文本取一个简短的标题,不要出现任何标点符号。"""
qianfan_prompt_stc2 = """'''
{context}
'''
请为上述文本取一个简短的标题,不要出现任何标点符号。"""
chatglm3_prompt_stc1 = """{context}
请为上述文本取一个标题(除标题之外不可输出其他内容)。"""
chatglm3_prompt_stc2 = """{context}
请为上述文本取一个标题(除标题之外不可输出其他内容)。"""
QIANFAN_PROMPT_STC1 = PromptTemplate(input_variables=["context"], template=qianfan_prompt_stc1)
QIANFAN_PROMPT_STC2 = PromptTemplate(input_variables=["context"], template=qianfan_prompt_stc2)
CHATGLM3_PROMPT_STC1 = PromptTemplate(input_variables=["context"], template=chatglm3_prompt_stc1)
CHATGLM3_PROMPT_STC2 = PromptTemplate(input_variables=["context"], template=chatglm3_prompt_stc2)
CHATGLM3_PROMPT_TFQ = PromptTemplate(input_variables=["context"], template=chatglm3_prompt_tfq)
CHATGLM3_PROMPT_MCQ = PromptTemplate(input_variables=["context"], template=chatglm3_prompt_mcq)
QIANFAN_PROMPT_TFQ = PromptTemplate(input_variables=["context"], template=qianfan_prompt_tfq)
QIANFAN_PROMPT_MCQ = PromptTemplate(input_variables=["context"], template=qianfan_prompt_mcq)
CHATGLM3_PROMPT_QA = PromptTemplate(input_variables=["context"], template=chatglm3_prompt_qa)
QIANFAN_PROMPT_QA = PromptTemplate(input_variables=["context"], template=qianfan_prompt_qa)
# CHATGLM3_PROMPT_STRUCT_S1 = PromptTemplate(input_variables=["context"], template=chatglm3_prompt_struct_s1)
CHATGLM3_PROMPT_STRUCT_S2 = PromptTemplate(input_variables=["context"], template=chatglm3_prompt_struct_s2)
# QIANFAN_PROMPT_STRUCT_S1 = PromptTemplate(input_variables=["context"], template=qianfan_prompt_struct_s1)
QIANFAN_PROMPT_STRUCT_S2 = PromptTemplate(input_variables=["context"], template=qianfan_prompt_struct_s2)
CHATGLM3_PROMPT_TYPOS1 = PromptTemplate(input_variables=["context"], template=chatglm3_prompt_typos1)
QIANFAN_PROMPT_TYPOS1 = PromptTemplate(input_variables=["context"], template=qianfan_prompt_typos1)
CHATGLM3_PROMPT_TYPOS2 = PromptTemplate(input_variables=["context"], template=chatglm3_prompt_typos2)
QIANFAN_PROMPT_TYPOS2 = PromptTemplate(input_variables=["context"], template=qianfan_prompt_typos2)
\ No newline at end of file
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings.base import Embeddings
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
)
import math
from langchain_core.documents import Document
def build_and_save_vectorfaiss(documents,folder_path: str, index_name: str = "psbc_tags",embedding: str = "C:\\Users\\15663\\AI\\models\\bge-large-zh-v1.5"):
embedding = HuggingFaceEmbeddings(model_name=embedding)
faiss_vectorstore = FAISS.from_documents(documents=documents,embedding=embedding)
faiss_vectorstore.save_local(folder_path=folder_path,index_name=index_name)
def load_vectorfaiss(folder_path: str, index_name: str = "index",embedding: str = "C:\\Users\\15663\\AI\\models\\bge-large-zh-v1.5"):
embedding = HuggingFaceEmbeddings(model_name=embedding)
return FAISS.load_local(folder_path=folder_path,embeddings=embedding,index_name=index_name)
def search_tags(native_tags: list, faiss_vectorstore: FAISS, threshold: float=0.7):
score_threshold = (1-threshold) * math.sqrt(2)
tags = []
advice_tags = []
for native_tag in native_tags:
res = faiss_vectorstore.similarity_search_with_score(query=native_tag, score_threshold=score_threshold, k=1)
if len(res) == 0:
advice_tags.append(native_tag)
else:
tags.append(res[0][0].page_content)
return tags, advice_tags
if __name__ == "__main__":
tags = ["个人存款账户实名制","金融机构","身份证件","实名证件","规定施行","国家外汇管理局","行政许可","实施办法","外汇管理","政务服务","电子化办理","听证","监督检查"]
docs = [Document(page_content=tag) for tag in tags]
build_and_save_vectorfaiss(documents=docs,folder_path="./vectorstore",index_name="psbc_tags")
faiss_vectorstore = load_vectorfaiss(folder_path="./vectorstore",index_name="psbc_tags")
threshold = 0.7
result = search_tags(["金融机构"], faiss_vectorstore, threshold=threshold)
print(result)
\ No newline at end of file
# -*- coding: utf-8 -*-
import os, sys
import pandas as pd
sys.path.append("../..")
import gradio as gr
import argparse
from llm.chatglm import ChatGLMSerLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from typing import Awaitable
import asyncio
from langchain.callbacks import AsyncIteratorCallbackHandler
import re
from llm.ernie_with_sdk import ChatERNIESerLLM
from qianfan import ChatCompletion
from difflib import Differ
from scenarios.psbc.generate_centens import split_plus
from scenarios.psbc.prompts import (
CHATGLM3_PROMPT_QA,
CHATGLM3_PROMPT_MCQ,
CHATGLM3_PROMPT_STRUCT_S2,
CHATGLM3_PROMPT_TFQ,
CHATGLM3_PROMPT_TYPOS1,
CHATGLM3_PROMPT_TYPOS2,
CHATGLM3_PROMPT_STC1,
CHATGLM3_PROMPT_STC2,
QIANFAN_PROMPT_MCQ,
QIANFAN_PROMPT_QA,
QIANFAN_PROMPT_STRUCT_S2,
QIANFAN_PROMPT_TFQ,
QIANFAN_PROMPT_TYPOS1,
QIANFAN_PROMPT_TYPOS2,
QIANFAN_PROMPT_STC1,
QIANFAN_PROMPT_STC2
)
from ac_pattern import sensitive_word_detection
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
)
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import copy
embedding_path = "C:\\Users\\15663\\AI\\models\\bge-large-zh-v1.5"
embedding = HuggingFaceEmbeddings(model_name=embedding_path)
async def async_chat_mcq(input_text, model, num_selector):
callback = AsyncIteratorCallbackHandler()
async def wrap_done(fn: Awaitable, event: asyncio.Event):
try:
await fn
except Exception as e:
import traceback
traceback.print_exc()
print(f"Caught exception: {e}")
finally:
event.set()
if model == "ernie":
qianfanchain_mcq = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_MCQ, llm_kwargs={"temperature":0.9})
task = asyncio.create_task(wrap_done(qianfanchain_mcq.arun({"context":input_text, "num_selector":num_selector}, callbacks=[callback]), callback.done))
else:
chatglm3chain_mcq = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_MCQ, llm_kwargs={"temperature":0.9})
task = asyncio.create_task(wrap_done(chatglm3chain_mcq.arun({"context":input_text, "num_selector":num_selector}, callbacks=[callback]), callback.done))
text=""
async for token in callback.aiter():
text=text+token
yield f"{text}"
with open('output.txt', 'a', encoding="utf-8") as f:
sys.stdout = f
print("==================================================async_chat_mcq=====================================================")
print(text)
print("==================================================async_chat_mcq=====================================================")
sys.stdout = sys.__stdout__
await task
async def async_chat_tfq(input_text, model,num_selector):
callback = AsyncIteratorCallbackHandler()
async def wrap_done(fn: Awaitable, event: asyncio.Event):
try:
await fn
except Exception as e:
import traceback
traceback.print_exc()
print(f"Caught exception: {e}")
finally:
event.set()
if model == "ernie":
qianfanchain_tfq = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_TFQ,llm_kwargs={"temperature":0.9})
task = asyncio.create_task(wrap_done(qianfanchain_tfq.arun({"context":input_text,"num_selector":num_selector},callbacks=[callback]),callback.done))
else:
chatglm3chain_tfq = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_TFQ,llm_kwargs={"temperature":0.9})
task = asyncio.create_task(wrap_done(chatglm3chain_tfq.arun({"context":input_text,"num_selector":num_selector},callbacks=[callback]),callback.done))
text=""
async for token in callback.aiter():
text=text+token
yield f"{text}"
with open('output.txt', 'a', encoding="utf-8") as f:
sys.stdout = f
print("==================================================async_chat_tfq=====================================================")
print(text)
print("==================================================async_chat_tfq=====================================================")
sys.stdout = sys.__stdout__
await task
async def async_chat_qa(input_text, model,num_selector):
callback = AsyncIteratorCallbackHandler()
async def wrap_done(fn: Awaitable, event: asyncio.Event):
try:
await fn
except Exception as e:
import traceback
traceback.print_exc()
print(f"Caught exception: {e}")
finally:
event.set()
if model == "ernie":
qianfanchain_qa = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_QA,llm_kwargs={"temperature":0.9})
task = asyncio.create_task(wrap_done(qianfanchain_qa.arun({"context":input_text,"num_selector":num_selector},callbacks=[callback]),callback.done))
else:
chatglm3chain_qa = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_QA,llm_kwargs={"temperature":0.9})
task = asyncio.create_task(wrap_done(chatglm3chain_qa.arun({"context":input_text,"num_selector":num_selector},callbacks=[callback]),callback.done))
print("async_chat_qa")
text=""
async for token in callback.aiter():
text=text+token
yield f"{text}"
with open('output.txt', 'a', encoding="utf-8") as f:
sys.stdout = f
print("==================================================async_chat_qa======================================================")
print(text)
print("==================================================async_chat_qa======================================================")
sys.stdout = sys.__stdout__
await task
# async def async_chat_stc(input_text, model):
# # input_text = input_text.replace("\n\n", "xx8x88x").replace("\n","").replace("xx8x88x","\n\n")
# print(input_text)
# global base_llm1, base_llm2
# callback = AsyncIteratorCallbackHandler()
# async def wrap_done(fn: Awaitable, event: asyncio.Event):
# try:
# await fn
# except Exception as e:
# import traceback
# traceback.print_exc()
# print(f"Caught exception: {e}")
# finally:
# event.set()
# if model == "ernie":
# qianfanchain_stc = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_STRUCT_S2,llm_kwargs={"temperature":0.9})
# task = asyncio.create_task(wrap_done(qianfanchain_stc.arun({"context":input_text},callbacks=[callback]),callback.done))
# else:
# chatglm3chain_stc = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_STRUCT_S2,llm_kwargs={"temperature":0.9})
# task = asyncio.create_task(wrap_done(chatglm3chain_stc.arun({"context":input_text},callbacks=[callback]),callback.done))
# print("async_chat_stc")
# text=""
# async for token in callback.aiter():
# text=text+token
# yield f"{text}"
# with open('output.txt', 'a', encoding="utf-8") as f:
# sys.stdout = f
# print("=================================================async_chat_stc======================================================")
# print(text)
# print("=================================================async_chat_stc======================================================")
# sys.stdout = sys.__stdout__
# await task
async def async_chat_typos(input_text, model, tone, switch_tone):
global base_llm1, base_llm2, xls_file
# print("-----------------tone_switch-----------------", switch_tone)
# 敏感词检测
input_text = sensitive_word_detection(xls_file,input_text)
callback = AsyncIteratorCallbackHandler()
async def wrap_done(fn: Awaitable, event: asyncio.Event):
try:
await fn
except Exception as e:
import traceback
traceback.print_exc()
print(f"Caught exception: {e}")
finally:
event.set()
if model == "ernie" and switch_tone != False:
qianfanchain_stc = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_TYPOS1,llm_kwargs={"temperature":0.9})
task = asyncio.create_task(wrap_done(qianfanchain_stc.arun({"context":input_text, "tone":tone},callbacks=[callback]),callback.done))
elif model != "ernie" and switch_tone != False:
chatglm3chain_stc = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_TYPOS1,llm_kwargs={"temperature":0.9})
task = asyncio.create_task(wrap_done(chatglm3chain_stc.arun({"context":input_text, "tone":tone},callbacks=[callback]),callback.done))
elif model == "ernie" and switch_tone == False:
qianfanchain_stc = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_TYPOS2,llm_kwargs={"temperature":0.9})
task = asyncio.create_task(wrap_done(qianfanchain_stc.arun({"context":input_text},callbacks=[callback]),callback.done))
elif model != "ernie" and switch_tone == False:
chatglm3chain_stc = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_TYPOS2,llm_kwargs={"temperature":0.9})
task = asyncio.create_task(wrap_done(chatglm3chain_stc.arun({"context":input_text},callbacks=[callback]),callback.done))
print("async_chat_typos")
text=""
async for token in callback.aiter():
if text.startswith("更正之后的资料:"):
text = text[8:]
if token.startswith("\n") and text.endswith("\n"):
token = token[1:]
text += token.replace("\n\n","\n").replace("\n"," \n")
yield f"{text}"
with open('output.txt', 'a', encoding="utf-8") as f:
sys.stdout = f
print("=================================================async_chat_typos====================================================")
print(text)
print("=================================================async_chat_typos====================================================")
sys.stdout = sys.__stdout__
await task
def on_select(evt: gr.SelectData, df):
print(f"You selected {evt.value} at {evt.index} from {evt.target}")
lenth = len(df)
if evt.value == "删除":
df.drop(df.index[evt.index[0]],axis=0,inplace=True)
return gr.DataFrame(df,interactive=True, row_count=(lenth-1, "fixed"))
else:
return df
def parse_mcq(output_text):
questions = []
try:
question_texts = output_text.split('\n\n')
for question_text in question_texts:
lines = question_text.split('\n')
question = lines[1]
options = lines[2:-1]
temp = lines[-1].split(":")
if len(temp) > 1 and temp[1] != "":
correct_answer = temp[1]
else:
continue
options = [option.replace(". ",".") for option in options]
question_dict = {
'question': question.split(':')[1],
'options': [option for option in options],
'correct_answer': correct_answer,
}
questions.append(question_dict)
lenth = len(questions)
df = pd.DataFrame({
"问题": [question['question'] for question in questions],
"选项": [question['options'] for question in questions],
"正确答案": [question['correct_answer'] for question in questions],
"删除": ["删除" for _ in questions],
})
return gr.DataFrame(df, row_count=(lenth, "fixed"))
except:
with open('output.txt', 'a', encoding="utf-8") as f:
sys.stdout = f
print("==================================================parse_mcq======================================================")
print("模型格式输出超出预期,选择题解析出现错误!!!")
print("==================================================parse_mcq======================================================")
sys.stdout = sys.__stdout__
return ""
def parse_tfq(output_text):
questions = []
try:
question_texts = output_text.strip().split('\n\n')
for question_text in question_texts:
lines = question_text.split('\n')
question = lines[1].split(':')[1]
correct_answer = lines[2].split(':')[1]
explanation = lines[-1].split(':')[1]
question_dict = {
'question': question,
'correct_answer': correct_answer,
'explanation': explanation
}
questions.append(question_dict)
lenth = len(questions)
df = pd.DataFrame({
"问题": [question['question'] for question in questions],
"答案": [question['correct_answer'] for question in questions],
"解析": [question['explanation'] for question in questions],
"删除": ["删除" for _ in questions]
})
return gr.DataFrame(df, row_count=(lenth, "fixed"))
except:
with open('output.txt', 'a', encoding="utf-8") as f:
sys.stdout = f
print("==================================================parse_tfq======================================================")
print("模型输出格式超出预期,判断题解析出现错误!!!")
print("==================================================parse_tfq======================================================")
sys.stdout = sys.__stdout__
return ""
def parse_qa(output_text):
try:
output_text = output_text.replace("\n\n", "\n")
output_text += "\n"
qa_pairs = re.findall(r"Q[0-9]*:(.*?)\nA[0-9]*:(.*?)\n", output_text, re.DOTALL)
lenth = len(qa_pairs)
df = pd.DataFrame({
"Q": [q.replace("\n", "\\n") for q, _ in qa_pairs],
"A": [a.replace("\n", "\\n") for _, a in qa_pairs],
"删除": ["删除" for _, _ in qa_pairs]
})
return gr.DataFrame(df, row_count=(lenth, "fixed"))
except:
with open('output.txt', 'a', encoding="utf-8") as f:
sys.stdout = f
print("==================================================parse_qa=======================================================")
print("模型输出格式超出预期,QA解析出现错误!!!")
print("==================================================parse_qa=======================================================")
sys.stdout = sys.__stdout__
return ""
# def parse_stc(output_text):
# paragraphs = re.split(r'段落\d+:\n', output_text)[1:]
# titles = []
# contents = []
# delete = []
# for p in paragraphs:
# p = p.replace("段落内容:\n", "段落内容:")
# p = p.replace("标题:\n", "标题:")
# title_match = re.search(r'标题:(.+?)\n', p)
# content_match = re.search(r'段落内容:(.+)', p, re.DOTALL)
# if title_match and content_match:
# titles.append(title_match.group(1))
# contents.append(content_match.group(1).replace("\n", "\\n"))
# delete.append("删除")
# df = pd.DataFrame({"标题": titles, "段落内容": contents, "删除": delete})
# print(df)
# return gr.DataFrame(df, row_count=(len(titles), "fixed"))
# def parse_stc_text(output_text):
# output_text = output_text.replace("\n\n", "\n")
# output_text = re.sub(r'段落\d+:\n', '', output_text)
# output_text = output_text.replace("标题:", "\n")
# output_text = output_text.replace("段落内容:", "")
# if output_text.splitlines()[0] == "":
# output_text = '\n'.join(output_text.splitlines()[1:])
# return output_text
def parse_stc_text(output_text1, output_text2):
try:
# combined_data = ""
markdown_text = ""
for paragraph1, paragraph2 in zip(output_text1.split("\n-------------new_para-------------\n"), output_text2.split("\n\n")):
# paragraph1 = re.sub(r'段落\d+:\n段落内容:', '', paragraph1)
# paragraph1 = re.sub(r'段落\d+:\n', '', paragraph1)
# paragraph2 = re.sub(r'段落\d+:\n标题:', '', paragraph2)
if paragraph2[-1] != "\n":
paragraph2 = paragraph2 + "\n"
# combined_data += paragraph2 + "" + paragraph1 + "\n\n"
markdown_text += "### "+paragraph2 + " \n&ensp;&ensp;&ensp;&ensp;" + paragraph1.replace("\n"," \n&ensp;&ensp;&ensp;&ensp;") + " \n\n"
return markdown_text
except:
with open('output.txt', 'a', encoding="utf-8") as f:
sys.stdout = f
print("==================================================parse_stc======================================================")
print("模型输出格式超出预期,文本结构化出现错误!!!")
print("==================================================parse_stc======================================================")
sys.stdout = sys.__stdout__
return ""
def parse_typos(output_text):
try:
# output_text = output_text.replace(" \n \n", " \n")
if output_text.splitlines()[0] == "更正之后的资料: " or output_text.splitlines()[0] == "更正之后的资料 ":
output_text = '\n'.join(output_text.splitlines()[1:])
# print([output_text])
return output_text
except:
with open('output.txt', 'a', encoding="utf-8") as f:
sys.stdout = f
print("==================================================parse_typos====================================================")
print("模型输出格式超出预期,错别字修改出现错误!!!")
print("==================================================parse_typos====================================================")
sys.stdout = sys.__stdout__
return ""
def diff_texts(text1, text2):
text2 = text2.replace("&ensp;", "")
d = Differ()
return [
(token[2:], token[0] if token[0] != " " else None)
for token in d.compare(text1, text2)
]
def save2file_mcq(dataframe):
try:
dataframe.to_csv("files/mcq.csv", mode='a', index=False)
gr.Info("导出数据成功")
print("导出数据成功")
except:
raise gr.Error("导出数据出错")
def save2file_tfq(dataframe):
try:
dataframe.to_csv("files/tfq.csv", mode='a', index=False)
gr.Info("导出数据成功")
print("导出数据成功")
except:
raise gr.Error("导出数据出错")
def save2file_qa(dataframe):
try:
dataframe.to_csv("files/qa.csv", mode='a', index=False)
gr.Info("导出数据成功")
print("导出数据成功")
except:
raise gr.Error("导出数据出错")
def save2file_stc(dataframe):
try:
dataframe.to_csv("files/stc.csv", mode='a', index=False)
gr.Info("导出数据成功")
print("导出数据成功")
except:
raise gr.Error("导出数据出错")
def reset_dataframe_mcq_tfq():
df = pd.DataFrame({
"1": [],
"2": [],
"3": [],
"4": []
})
return gr.DataFrame(df, visible=True,interactive=True,col_count=(4, "fixed"), row_count=(1, "fixed"))
def reset_dataframe_qa():
df = pd.DataFrame({
"1": [],
"2": [],
"3": []
})
return gr.DataFrame(df, visible=True, interactive=True, col_count=(3, "fixed"), row_count=(1, "fixed"))
def reset_highlighttext():
return None
def reset_textbox_stc():
return ""
def disable_btn_1():
return gr.Button(interactive=False)
def disable_btn_2():
return (gr.Button(interactive=False),)*2
def disable_btn_4():
return (gr.Button(interactive=False),)*4
def enable_btn_1():
return gr.Button(interactive=True)
def enable_btn_2():
return (gr.Button(interactive=True),)*2
def enable_btn_3():
return (gr.Button(interactive=True),)*3
def set_textbox_container_false():
return gr.Textbox(container=False)
def set_textbox_container_true():
return gr.Textbox(container=True)
def chat_stc(input_text, model):
global llmchaim, base_llm1, base_llm2
if model == "ernie":
llmchain = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_STRUCT_S2,llm_kwargs={"temperature":0.9})
else:
llmchain = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_STRUCT_S2,llm_kwargs={"temperature":0.9})
if not input_text:
return ""
# tags_box = gr.Dropdown.update(choices=["1","2","3"], value=["1","2"])
result = llmchain.invoke({"context":input_text})
with open('output.txt', 'a', encoding="utf-8") as f:
sys.stdout = f
print("====================================================chat_stc=========================================================")
print(result["text"])
print("====================================================chat_stc=========================================================")
sys.stdout = sys.__stdout__
return result["text"].replace("《","").replace("》","").replace("*","")
def stc_step1(input_text):
split_threshold=0.8
split_threshold_para=0.75
_, para_split_simple = split_plus(input_text,split_threshold,split_threshold_para,marge_threshold_para=0.5)
return "\n-------------new_para-------------\n".join(para_split_simple)
# 将该函数伪装成异步函数
async def stc_step2(output_text, model):
output_text_splits = output_text.split("\n-------------new_para-------------\n")
# print(output_text_splits)
outans = ""
for output_text_split in output_text_splits:
outans = outans + chat_stc(output_text_split, model) + "\n\n"
return outans
def detailed_description_of_tone(radio_input):
"""正式","友好","轻松","强调","客观"""
if radio_input == "正式":
return "使用规范的语法和词汇,结构严谨,用词严肃。常在正式场所使用"
elif radio_input == "友好":
return "使文本显得亲切和易于接受。常用于日常对话、商务交流、社交媒体等场合"
elif radio_input == "轻松":
return "语气轻松随意,是口语化的表达,包含较多缩写词、幽默等元素。常用于不太正式的场合"
elif radio_input == "强调":
return "强调某个观点或情感。常用演讲、辩论、广告等"
elif radio_input == "客观":
return "使用客观的词语和描述,并且尽可能避免主观因素的影响。常用于新闻报道、学术论文等"
def f_switch_tone(switch_tone):
if switch_tone == False:
return gr.Radio(interactive=False,value='')
else:
return gr.Radio(interactive=True)
def get_para_list(input_text):
o_para_list = input_text.split("\n")
para_list = []
for index,item in enumerate(o_para_list):
if len(item.strip()) == 0:
if len(para_list) == 0:
para_list.append(item+'\n')
else:
para_list[-1] = para_list[-1] + '\n' + item
elif len(item) <= 20:
if index+1 < len(o_para_list):
o_para_list[index+1] = item+"\n"+o_para_list[index+1]
continue
else:
para_list.append(item)
else:
para_list.append(item)
print(para_list)
if len(para_list) >= 2 and all(char.isspace() for char in para_list[0]):
tmp_str = para_list[0]
para_list = para_list[1:]
para_list[0] = tmp_str+para_list[0]
return para_list
class TitleInfo:
def __init__(self):
# 结构化信息
# 当前标题在原始句子的位置信息,如第一个元素为5,则表示0-4为当前标题下的内容
self.para_list = []
# 详细相似信息,用于插入展示调试
self.para_info = []
# 当前标题下总文本数量,不包括插入的相似信息,因此父级标题展示offset位置错误 [{"offset":108,"similarity":[0.555,0.9]}]
self.para_len = []
# 当前级别标题,由模型生成
self.title = []
# 段落向量
self.para_vec = []
# 递归结构
self.title_info = None
def gen_title2(self, o_para_list, model_selector):
global llmchain, base_llm1, base_llm2
if self.title_info is not None:
self.title_info.gen_title2(o_para_list, model_selector)
if self.title_info is None:
text = ""
for index, para in enumerate(o_para_list):
if index in self.para_list:
if model_selector == 'ernie':
llmchain = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_STC1, llm_kwargs={"temperature":0.9})
elif model_selector == 'chatglm3':
llmchain = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_STC1, llm_kwargs={"temperature":0.9})
result = llmchain.invoke({"context":text})
self.title.append(result["text"].replace("》", "").replace("《", "").replace("*", ""))
text = ""
text = text+para+'\n'
if model_selector == 'ernie':
llmchain = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_STC1, llm_kwargs={"temperature":0.9})
elif model_selector == 'chatglm3':
llmchain = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_STC1, llm_kwargs={"temperature":0.9})
result = llmchain.invoke({"context":text})
self.title.append(result["text"].replace("》", "").replace("《", "").replace("*", ""))
else:
text = ""
title = ""
syg = -1
iindex = 0
positions = [self.title_info.para_list.index(num) for num in self.para_list]
for index, para in enumerate(o_para_list):
text = text+para+'\n'
if index+1 in self.title_info.para_list:
ttitle_n = self.title_info.para_list.index(index+1)
title = title+self.title_info.title[ttitle_n]+'\n'
if index+1 in self.para_list:
distance = positions[iindex] - syg
syg = positions[iindex]
iindex = iindex+1
if distance >= 4:
if model_selector == 'ernie':
llmchain = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_STC2, llm_kwargs={"temperature":0.9})
elif model_selector == 'chatglm3':
llmchain = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_STC2, llm_kwargs={"temperature":0.9})
result = llmchain.invoke({"context":title})
self.title.append(result["text"].replace("》", "").replace("《", "").replace("*", ""))
text = ""
title = ""
else:
if model_selector == 'ernie':
llmchain = LLMChain(llm=base_llm2, prompt=QIANFAN_PROMPT_STC1, llm_kwargs={"temperature":0.9})
elif model_selector == 'chatglm3':
llmchain = LLMChain(llm=base_llm1, prompt=CHATGLM3_PROMPT_STC1, llm_kwargs={"temperature":0.9})
result = llmchain.invoke({"context":text})
self.title.append(result["text"].replace("》", "").replace("《", "").replace("*", ""))
text = ""
title = ""
def gen_title1(self,para_list,llmchain,start=0,end=-1,para_len=-1):
if end == -1:
end = len(para_list)
for i,index in enumerate(self.para_list):
if index > end:
break
if index <= start:
continue
if self.title_info is not None:
self.title_info.gen_title1(para_list,llmchain,start=start,end=index,para_len=self.para_len[i])
if self.para_len[i] < para_len or para_len == -1: # 当标题下的文本长度相同是,不在展示标题
text = "\n".join(para_list[start:index])
result = llmchain.invoke({"context":text})
self.title.append(result["text"].replace("》", "").replace("《", "").replace("*", ""))
else:
self.title.append("")
start = index
def show(self,para_list,start=0,end=-1,para_len=-1,parent_no="",level=""):
if end == -1:
end = len(para_list)
result=""
pa = 0
for i,index in enumerate(self.para_list):
title_no = parent_no+str(pa+1)+"."
if index > end:
break
if index <= start:
continue
if self.para_len[i] < para_len or para_len == -1: # 当标题下的文本长度相同是,不在展示标题
pa += 1
if len(self.title) > i:
result+= level+"# "+title_no+"&ensp;"+self.title[i]+"\n"
else:
result+= level+"# "+title_no+"&ensp;此处为标题"+str(index)+"\n" # 使用self.title 替换
if self.title_info is not None:
result+= self.title_info.show(para_list,start=start,end=index,para_len=self.para_len[i],parent_no=title_no,level=level+"#")
else:
# pa += 1
result+= "&ensp;&ensp;&ensp;&ensp;"+" \n&ensp;&ensp;&ensp;&ensp;".join(para_list[start:index])
result+=" \n"
start = index
return result
def show_detail(self,para_list,start=0,end=-1,para_len=-1,parent_no="",level=""):
if end == -1:
end = len(para_list)
result=""
pa = 0
for i,index in enumerate(self.para_list):
title_no = parent_no+str(pa+1)+"."
if index > end:
break
if index <= start:
continue
if self.para_len[i] < para_len or para_len == -1:
pa += 1
if len(self.title) > i:
result+= level+"# "+title_no+"&ensp;"+self.title[i]+"\n"
else:
result+= level+"# "+title_no+"&ensp;此处为标题"+str(index)+"\n" # 使用self.title 替换
if self.title_info is not None:
# 打印高级标题相似度
for k,para_i in enumerate(self.para_info[i]):
result +='**{}与{}相似{:.5}** 与上句相似{:.5}||'.format(self.title_info.para_list[k],self.title_info.para_list[k+1],para_i["similarity"][0],para_i["similarity"][1])
result+=" \n"
result+= self.title_info.show_detail(para_list,start=start,end=index,para_len=self.para_len[i],parent_no=title_no,level=level+"#")
else:
detail_para = "&ensp;&ensp;&ensp;&ensp;"+self.insert_info("\n".join(para_list[start:index]),self.para_info[i])
detail_para = detail_para.replace("\n"," \n&ensp;&ensp;&ensp;&ensp;")
result+= detail_para
result+=" \n"
start = index
return result
def is_root(self):
if len(self.para_list) == 1:
return True
else:
title_info = self.title_info
level = 1
while title_info is not None:
title_info = title_info.title_info
level += 1
if level > 2: # 达到四级标题
return True
return False
def insert_info(self,para,info):
for i in range(len(info)-1,-1,-1):
para = para[:info[i]["offset"]] + "**与上段落相似 {:.5} 与上句相似 {:.5}**\n".format(info[i]["similarity"][0],info[i]["similarity"][1]) +para[info[i]["offset"]:]
return para
def spilt_plus(para_list,split_threshold,split_threshold_para,title_info:TitleInfo = None):
if title_info is not None and title_info.is_root():
return title_info
title_info_parent = TitleInfo()
# 通过title_info构建新的片段
_para_list = []
_para_list_index = []
_para_vec = []
if title_info is None or not title_info.para_info:
_para_list = para_list
_para_list_index = [i+1 for i in range(len(_para_list))]
for index in _para_list:
_para_vec.append(np.array(embedding.embed_query(index)).reshape(1,-1))
else:
offset = 0
_para_vec = copy.deepcopy(title_info.para_vec)
for index in title_info.para_list:
_para_list_index.append(index)
if index <= len(para_list) and index >= offset:
_para_list.append("\n".join(para_list[offset:index]))
offset = index
else:
print(f"段落信息错误,index:{index},offset:{offset}")
cur_para = _para_list[0]
cur_para_info = []
cur_para_vec = _para_vec[0]
last_centence_vec = cur_para_vec
for index in range(1,len(_para_list)):
vec = _para_vec[index]
similarity = cosine_similarity(cur_para_vec,vec)
similarity2 = cosine_similarity(last_centence_vec,vec)
if similarity > split_threshold_para or similarity2 > split_threshold:
cur_para_info.append({"offset":len(cur_para)+1,"similarity":[similarity[0][0],similarity2[0][0]]})
cur_para += "\n" + _para_list[index]
cur_para_vec = embedding.embed_query(cur_para)
cur_para_vec = np.array(cur_para_vec).reshape(1,-1)
else:
title_info_parent.para_list.append(_para_list_index[index-1])
title_info_parent.para_len.append(len(cur_para))
title_info_parent.para_vec.append(cur_para_vec)
title_info_parent.para_info.append(cur_para_info)
cur_para = _para_list[index]
cur_para_info=[{"offset":0,"similarity":[similarity[0][0],similarity2[0][0]]}]
cur_para_vec = vec
last_centence_vec = vec
if len(cur_para)>0:
title_info_parent.para_list.append(_para_list_index[len(_para_list)-1])
title_info_parent.para_info.append(cur_para_info)
title_info_parent.para_len.append(len(cur_para))
title_info_parent.para_vec.append(cur_para_vec)
title_info_parent.title_info = title_info
return title_info_parent
def marge_single_title(para_list,title_info:TitleInfo, marge_threshold_para=0):
if title_info is None or title_info.title_info is not None: # 如果不是叶子标题,不需要合并
return title_info
marge_title_info = TitleInfo()
offset = 0
cache_para_info = []
cache_len = 0
cache_vec = None
# last_para_vec = None
for index in range(len(title_info.para_list)):
if len(title_info.para_info[index])>1:
# 当前段落由多个原句组成,则不在合并
# 处理向量,和标题下文本长度
if cache_vec is not None: # 如果存在合并缓存,重新计算向量
text = "\n".join(para_list[offset:title_info.para_list[index]])
marge_title_info.para_vec.append(np.array(embedding.embed_query(text)).reshape(1,-1))
marge_title_info.para_len.append(len(text))
else:
marge_title_info.para_vec.append(title_info.para_vec[index])
marge_title_info.para_len.append(title_info.para_len[index])
# 处理文本位置信息
marge_title_info.para_list.append(title_info.para_list[index])
# 处理合并信息
for info1 in title_info.para_info[index]:
info1["offset"] += cache_len if cache_len ==0 else cache_len + 1
cache_para_info.append(info1)
marge_title_info.para_info.append(cache_para_info)
# 初始化信息
cache_para_info = []
cache_len = 0
offset = title_info.para_list[index]
cache_vec = None
else:
next_similarity = -0.1
if index < len(title_info.para_list)-1:
# next_similarity = title_info.para_info[index+1][0]["similarity"][0]
next_similarity = cosine_similarity(title_info.para_vec[index],title_info.para_vec[index+1])[0][0]
next_similarity = max(next_similarity,title_info.para_info[index+1][0]["similarity"][0])
last_similarity = -0.1
if cache_vec is not None: # 存在缓存
last_similarity = cosine_similarity(title_info.para_vec[index],cache_vec)[0][0]
elif len(marge_title_info.para_vec)>0:
last_similarity = cosine_similarity(title_info.para_vec[index],marge_title_info.para_vec[-1])[0][0]
if index > 0: # 第一段没有与上一段的相似度
last_similarity = max(title_info.para_info[index][0]["similarity"][0],last_similarity)
if last_similarity > marge_threshold_para or next_similarity > marge_threshold_para: # 需要合并
if last_similarity > next_similarity and cache_vec is None: #合并到上一段
marge_title_info.para_list[-1] = title_info.para_list[index]
for info1 in title_info.para_info[index]:
info1["offset"] += marge_title_info.para_len[-1] + 1
marge_title_info.para_info[-1].append(info1)
marge_title_info.para_len[-1] += title_info.para_len[index]+1
marge_title_info.para_vec[-1] = np.array(embedding.embed_query(
"\n".join(para_list[offset:title_info.para_list[index]]))).reshape(1,-1)
# 初始化信息
cache_para_info = []
cache_len = 0
offset = title_info.para_list[index]
cache_vec = None
else: # 将当前段落合并到cache中
text = "\n".join(para_list[offset:title_info.para_list[index]])
# 处理合并信息
for info1 in title_info.para_info[index]:
info1["offset"] += cache_len if cache_len ==0 else cache_len + 1
cache_para_info.append(info1)
cache_len = len(text)
cache_vec = np.array(embedding.embed_query(text)).reshape(1,-1)
# cache_para_info = title_info.para_info[index]
else:
# 当前段落不合并,则将cache中的内容合并到marge_title_info中
if cache_vec is not None: # 如果存在合并缓存,重新计算向量
marge_title_info.para_vec.append(cache_vec)
marge_title_info.para_len.append(cache_len)
marge_title_info.para_list.append(title_info.para_list[index-1])
marge_title_info.para_info.append(cache_para_info)
marge_title_info.para_vec.append(title_info.para_vec[index])
marge_title_info.para_len.append(title_info.para_len[index])
marge_title_info.para_list.append(title_info.para_list[index])
marge_title_info.para_info.append(title_info.para_info[index])
# 初始化信息
cache_para_info = []
cache_len = 0
offset = title_info.para_list[index]
cache_vec = None
if cache_vec is not None: # 如果存在合并缓存,重新计算向量
marge_title_info.para_vec.append(cache_vec)
marge_title_info.para_len.append(cache_len)
marge_title_info.para_list.append(title_info.para_list[-1])
marge_title_info.para_info.append(cache_para_info)
marge_title_info.title_info = title_info.title_info
return marge_title_info
async def deal_para(para, model_selector):
split_method = '合并细碎叶子段落'
split_threshold = 0.8
split_threshold_para = 0.75
marge_threshold_para = 0.5
parent_threshold = 0.9
global base_llm, llmchain
import time
start = time.time()
title_info = None
para_list = get_para_list(para)
# 防止过度循环
level = 0
while level < 4:
title_info = spilt_plus(para_list,split_threshold,split_threshold_para,title_info)
if title_info.title_info is None and split_method == "合并细碎叶子段落":
# 试图合并段落
title_info = marge_single_title(para_list,title_info,marge_threshold_para)
if title_info.title_info is not None and len(title_info.para_list) == len(title_info.title_info.para_list): # 如果标题没变,则说明没有找到合适的标题
title_info = title_info.title_info
break
level += 1
title_info2 = title_info
while title_info2 is not None:
print(title_info2.para_list)
title_info2 = title_info2.title_info
if title_info.is_root():
break
# 相似度按80%递减
split_threshold *= parent_threshold
split_threshold_para *= parent_threshold
print("处理耗时:",time.time()-start)
start = time.time()
title_info.gen_title2(para_list, model_selector)
print("处理耗时:",time.time()-start)
with open('output.txt', 'a', encoding="utf-8") as f:
sys.stdout = f
print("=================================================async_stc_title====================================================")
x = title_info
while x:
print(x.title)
x=x.title_info
print("=================================================async_stc_title====================================================")
sys.stdout = sys.__stdout__
return title_info.show(para_list)
with gr.Blocks(title="AI辅助知识库生成工具集") as demo:
gr.HTML("""<h1 align="center">AI辅助知识库生成工具集</h1>""")
model_selector = gr.Dropdown(choices=["ernie","chatglm3"], label="请选择一个模型", scale=1, min_width=50, value="ernie")
with gr.Tab("辅助生成培训资料"):
with gr.Row():
input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10, scale=9, label="文本输入")
with gr.Row():
num_selector = gr.Slider(minimum=1, maximum=7, value=5, label="请选择问题数量",step=1)
with gr.Row():
mcqBtn = gr.Button("生成选择题培训资料")
tfqBtn = gr.Button("生成判断题培训资料")
dataframe = gr.DataFrame(visible=True,interactive=True,col_count=(4, "fixed"), row_count=(1, "fixed"))
dataframe.select(on_select, inputs=[dataframe], outputs=[dataframe])
with gr.Row():
storeBtn_mcq = gr.Button("选择题资料导出", interactive=False)
storeBtn_tfq = gr.Button("判断题资料导出", interactive=False)
gr.Markdown("""---""")
output_text = gr.Textbox(placeholder="输出...", lines=10, show_label=True, label="模型输出")
mcqBtn.click(
disable_btn_4, [], [mcqBtn, tfqBtn, storeBtn_mcq, storeBtn_tfq], queue=False
).then(
reset_dataframe_mcq_tfq, [], [dataframe], queue=False
).then(
set_textbox_container_false, [], [output_text], queue=False
).then(
async_chat_mcq, [input_text, model_selector, num_selector], [output_text], queue=True
).then(
set_textbox_container_true, [], [output_text], queue=False
).then(
parse_mcq,[output_text], [dataframe], queue=False
).then(
enable_btn_3, [], [mcqBtn, tfqBtn, storeBtn_mcq], queue=False
)
tfqBtn.click(
disable_btn_4, [], [mcqBtn, tfqBtn, storeBtn_mcq, storeBtn_tfq], queue=False
).then(
reset_dataframe_mcq_tfq, [], [dataframe], queue=False
).then(
set_textbox_container_false, [], [output_text], queue=False
).then(
async_chat_tfq, [input_text, model_selector,num_selector], [output_text], queue=True
).then(
set_textbox_container_true, [], [output_text], queue=False
).then(
parse_tfq,[output_text], [dataframe], queue=False
).then(
enable_btn_3, [], [mcqBtn, tfqBtn, storeBtn_tfq], queue=False
)
storeBtn_mcq.click(
save2file_mcq, [dataframe]
).then(
disable_btn_2, [], [storeBtn_mcq, storeBtn_tfq], queue=False
)
storeBtn_tfq.click(
save2file_tfq, [dataframe]
).then(
disable_btn_2, [], [storeBtn_mcq, storeBtn_tfq], queue=False
)
with gr.Tab("辅助生成知识库"):
with gr.Row():
input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10, scale=9, label="文本输入")
with gr.Row():
num_selector = gr.Slider(minimum=1, maximum=7, value=5, label="请选择问题数量",step=1)
with gr.Row():
qaBtn = gr.Button("QA问答对生成")
dataframe = gr.DataFrame(visible=True, interactive=True, col_count=(3, "fixed"), row_count=(1, "fixed"))
dataframe.select(on_select, inputs=[dataframe], outputs=[dataframe])
storeBtn = gr.Button("QA问答对导出", interactive=False)
gr.Markdown("""---""")
output_text = gr.Textbox(placeholder="输出...", lines=10, show_label=True, label="模型输出")
qaBtn.click(
disable_btn_2, [], [qaBtn, storeBtn], queue=False
).then(
reset_dataframe_qa, [], [dataframe], queue=False
).then(
set_textbox_container_false, [], [output_text], queue=False
).then(
async_chat_qa, [input_text, model_selector,num_selector], [output_text], queue=True
).then(
set_textbox_container_true, [], [output_text], queue=False
).then(
parse_qa, [output_text], [dataframe], queue=False
).then(
enable_btn_2, [], [qaBtn, storeBtn], queue=True
)
storeBtn.click(
save2file_qa, [dataframe]
).then(
disable_btn_1, [], [storeBtn], queue=False
)
with gr.Tab("文章结构拆分"):
with gr.Row():
input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10, scale=9, label="文本输入")
# split_method = gr.Radio(choices=["基于连续两句话相似度分割","基于已分割段落与句子相似度分割","基于已分割和连续同时判断","基于已分割和连续同时判断,并合并独立段落"], label="分割方式",value="基于连续两句话相似度分割")
# with gr.Row():
# split_threshold = gr.Slider(0.5,1.0,label="句子相似度阈值", value=0.8,step=0.01)
# split_threshold_para = gr.Slider(0.5,1.0,label="段落相似度阈值", value=0.75,step=0.01)
submitBtn = gr.Button("文本结构化")
# dataframe = gr.DataFrame(visible=True, interactive=True, col_count=(3, "fixed"), row_count=(1, "fixed"))
# dataframe.select(on_select,inputs=[dataframe],outputs=[dataframe])
# storeBtn = gr.Button("文本结构化导出")
# stc_text = gr.Textbox(show_label=True, placeholder="等待文本结构化结果...", lines=10, label="文本结构化结果展示:")
gr.Markdown("""# 文本结构化展示""")
gr.Markdown("""---""")
markdown_text = gr.Markdown()
submitBtn.click(
disable_btn_1, [], [submitBtn]
).then(
reset_textbox_stc, [], [markdown_text]
).then(
deal_para, [input_text, model_selector], [markdown_text]
).then(
enable_btn_1, [], [submitBtn]
)
# storeBtn.click(save2file_stc, [dataframe])
with gr.Tab("文本修正优化"):
with gr.Row():
input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10, label="文本输入")
with gr.Column():
gr.Markdown("""## 文章修正优化效果展示""")
markdown_text = gr.Markdown()
with gr.Row():
switch_tone = gr.Checkbox(show_label=False, value=False, scale=1, label="文本风格", interactive=True, container=True)
tone_selector = gr.Radio(show_label=False, min_width=500,choices=["正式","友好","轻松","强调","客观"], interactive=False, label="文本语气选择",value="", scale=4)
tone_textbox = gr.Textbox(show_label=False, lines=1, scale=5, label="语气介绍", interactive=False, value="")
submitBtn = gr.Button("文本修正优化")
gr.Markdown("""---""")
with gr.Row():
diff = gr.HighlightedText(
label="Diff",
combine_adjacent=True,
show_legend=True,
color_map={"+": "red", "-": "#C0C0C0"})
switch_tone.change(f_switch_tone, [switch_tone], [tone_selector])
tone_selector.change(detailed_description_of_tone, [tone_selector], [tone_textbox])
submitBtn.click(
disable_btn_1, [], [submitBtn], queue=False
).then(
reset_highlighttext, [], [diff], queue=False
).then(
async_chat_typos, [input_text, model_selector, tone_selector, switch_tone],[markdown_text], queue=True
).then(
parse_typos, [markdown_text], [markdown_text], queue=False
).then(
diff_texts, [input_text, markdown_text], [diff], queue=False
).then(
enable_btn_1, [], [submitBtn], queue=False
)
if __name__ == "__main__":
parese = argparse.ArgumentParser()
parese.add_argument("--port", type=int, default=7654)
parese.add_argument("--host", type=str, default="0.0.0.0")
parese.add_argument("--base_llm_url", type=str, default="http://192.168.22.106:8003")
parese.add_argument("--sensitive_word", type=str, default="敏感词.xls",help="敏感词记录文件")
args = parese.parse_args()
global base_llm_url,llmchain,base_llm1,base_llm2,xls_file
xls_file = args.sensitive_word
base_llm_url=os.environ.get('LLM_URL_BASE', None)
if not base_llm_url:
base_llm_url=args.base_llm_url
base_llm1=ChatGLMSerLLM(url=base_llm_url)
base_llm2=ChatERNIESerLLM(chat_completion=ChatCompletion(ak="pT7sV1smp4AeDl0LjyZuHBV9", sk="b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu"), model_name="ERNIE-Bot-4")
demo.queue().launch(share=False, inbrowser=True, server_name=args.host, server_port=args.port)
\ No newline at end of file
# -*- coding: utf-8 -*-
import os, sys
sys.path.append("../..")
import gradio as gr
import argparse
from llm.chatglm import ChatGLMSerLLM
from langchain.chains import LLMChain
from langchain_openai import OpenAI
from langchain.prompts import PromptTemplate
from typing import Awaitable
import asyncio
from langchain.callbacks import AsyncIteratorCallbackHandler
from llm.ernie_with_sdk import ChatERNIESerLLM
from qianfan import ChatCompletion
chatglm_prompt_ct = """请仔细阅读以下资料,更正资料中的错别字,并按照以下格式输出:
更正后的资料:
文本内容
文本内容:
{context}"""
chatglm_prompt_ct2 = """请仔细阅读以下资料,更正资料中语法错误、错别字,并按要求输出。
输出格式为json:
```json
[
{{
"ori_text": 需要修改的原始句子,
"repare_text": 修改后的句子
}}
]
------------------------
资料:
{context}"""
qianfan_prompt_ct1 = """请根据下面提供的知识资料,生成可以作为考试的判断题,并给出正确答案。按照如下格式进行回答:
试题1:试题内容。
正确答案:正确/错误。
解析:选择原因(如果用到原资料中的内容,请列出来)
试题2:试题内容。
正确答案:正确/错误。
解析:选择原因(如果用到原资料中的内容,请列出来)
...
资料内容如下所示:
'''
{context}
'''"""
qianfan_prompt_ct2 = """现有一段文本,请你根据其内容划分出多个副标题(不要推理出原文中不存在的内容),并按照固定的JSON格式输出的工具。JSON格式如下所示:
```{{
"副标题1": 字符串,第一个副标题,
"内容1": 字符串,生成副标题1的原文内容,
"副标题2": 字符串,显示根据原文内容生成的主题,
"内容2": 字符串,生成副标题2的原文内容,
"副标题3": 字符串,显示根据原文内容生成的主题,
"内容3": 字符串,生成副标题3的原文内容,
...
}}```
请你保证JSON格式的正确性,在输出之前确定格式可以被解析
文本内容如下:
'''
{context}
'''"""
qianfan_prompt_ct3 = """请根据以下资料内容生成问答对,(请你按照以下格式进行回答)
Q:问题
A:答案
Q:问题
A:答案
...
资料内容如下所示:
'''
{context}
'''"""
qianfan_prompt_ct4 = """请你根据下面这一段文本的内容生成可以用于考试的选择题,并按照一定格式进行输出的工具。具体的输出格式如下所示:
试题1:
题目:。
A.选项内容 B.选项内容 C.选项内容 D.选项内容
正确答案:给出具体选项。
试题2:
题目:。
A.选项内容 B.选项内容 C.选项内容 D.选项内容
正确答案:给出具体选项。
...
文本内容如下:
'''
{context}
'''"""
qianfan_prompt_ct5 = """请仔细阅读以下资料,更正资料中的错别字,并按照以下格式进行输出。
更正后的资料:
文本内容
文本内容:
'''
{context}
'''"""
qianfan_prompt_ct6 = """请根据以下材料的的内容对其进行结构划分,并按照以下格式进行输出(注意:你应保证所有段落的段落内容之和为文本原文):
段落1:
段落内容:
段落2:
段落内容:
...
文本内容如下:
```
{context}
```"""
qianfan_prompt_ct7 = """请为下面的每一段文字各取一个文章标题,并按照以下格式进行输出:
段落1:
标题:
段落内容:
段落1:
标题:
段落内容:
...
文本内容如下:
```
{context}
```"""
qianfan_prompt_ct8 = """'''
{context}
'''
已知有上述文本,现需要你仔细阅读上述资料,更正资料中的错别字之后进行输出。你需要按照如下格式进行输出:
更正之后的资料:
"""
qianfan_prompt_ct9 = """请为下面的每一段文字各取一个文章标题,并按照以下格式进行输出:
段落1:
标题:
段落内容:
段落1:
标题:
段落内容:
...
文本内容如下:
```
{context}
```"""
CHATGLM_PROMPT_CT = PromptTemplate(input_variables=["context"],template=chatglm_prompt_ct)
async def async_chat(input_text):
global llmchain
# Create an asynchronous callback handler
callback = AsyncIteratorCallbackHandler()
# Define an asynchronous function to wrap another asynchronous function and signal completion or exceptions using an event
async def wrap_done(fn: Awaitable, event: asyncio.Event):
try:
await fn # Wait for the provided asynchronous function to complete
except Exception as e:
# TODO: Handle exceptions - here, we simply print the exception information
import traceback
traceback.print_exc()
print(f"Caught exception: {e}")
finally:
event.set() # Set the event to indicate completion
# Create a task to perform message generation with ChatOpenAI and monitor the completion event of the callback handler
task = asyncio.create_task(wrap_done(llmchain.arun({"context":input_text},callbacks=[callback]),callback.done))
print("*"*20)
# Iterate asynchronously to obtain tokens from the callback handler
text=""
async for token in callback.aiter():
text=text+token
yield f"{text}" # Convert tokens to strings and yield them
await task # Wait for the task to complete
def chat(input_text):
global llmchain
if not input_text:
return ""
# tags_box = gr.Dropdown.update(choices=["1","2","3"], value=["1","2"])
result = llmchain.run({"context":input_text})
return result
def reset_state():
return "",""
def change_prompt(text_prompt):
global llmchain
new_prompt = PromptTemplate(input_variables=["context"],template=text_prompt)
llmchain.prompt = new_prompt
def reset_prompt():
global llmchain
llmchain.prompt = CHATGLM_PROMPT_CT
return chatglm_prompt_ct
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">文本归档</h1>""")
text_prompt = gr.Textbox(show_label=False, value=chatglm_prompt_ct,placeholder="提示词...", lines=5)
with gr.Row():
promptBtn = gr.Button("提交")
resetBtn = gr.Button("重置")
with gr.Row():
input_text = gr.Textbox(show_label=False, placeholder="输入需要处理的文档...", lines=10)
output_text = gr.Textbox(show_label=False, placeholder="输出...", lines=10)
with gr.Row():
submitBtn = gr.Button("提交")
emptyBtn = gr.Button("清除")
submitBtn.click(async_chat, [input_text],
[output_text], show_progress=True)
promptBtn.click(change_prompt,[text_prompt],[])
resetBtn.click(reset_prompt,outputs=[text_prompt])
emptyBtn.click(reset_state, outputs=[input_text,output_text])
gr.Examples(label="文案举例",examples=[
["""2020 年,新冠肺炎疫情席卷全球,世界经济发展的不确定性大幅增加。各国政府、企业探索采用新兴技术降低经济社会运行成本,提升实体经济运行效率,进一步寻找经济发展新的增长点。当前,区块链与云计算、人工智能等新技术基础设施交叉创新,越来越多的实体经济垂直领域呈现出“区块链+”的发展格局和“脱虚向实”的良好势头。
区块链产业长期向好,行业认知回归理性。随着中央对区块链技术发展的规划指引,我国区块链明确了以联盟链为基础,围绕服务实体经济、优化公共服务为目标的发展思路,产业发展方向进一步清晰。
现阶段广大从业者对区块链的信心持续向好,普遍认可区块链的长期战略性价值。与此同时,政策制定方、技术提供方、系统使用方等各类行业参与者已认识到区块链的发展并非只差临门一脚,而是尚处于行业发展的初期,需要产业结合实际情况,务实探索区块链的应用落地路径,合力解决赋能实体经济过程中的问题和挑战。
区块链技术创新依旧活跃,工程化和生态构建成为重点。从技术层面看,区块链技术还在发展早期,专利申请、学术研究等方面保持活跃,但为了应用尽快落地,行业不再片面追求新技术创新,而是进入务实发展阶段。具体系统开发过程中,技术要求主要是好用、易用、安全、性能好、支持互操作,技术发展呈现出工程化导向。同时各类区块链产品之间的技术差异逐步缩小,技术提供方将更多精力投入到生态构建当中,降低开发部署门槛,提升用户粘性,吸纳更多的开发者、使用者,以此构建自己的生态壁垒。未来一段时间,区块链行业技术发展将主要聚焦于工程化和生态构建。
区块链应用存证先行,逐渐向自动化协作和价值互联迈进。当前区块链技术分布广泛,应用牵头方呈现出技术服务方推动,政府主导的态势。经过近年来的发展,区块链在供应链金融、溯源、公共服务等领域取得一定成果,但其应用模式仍以文件、合同等的存证为主。
其他行业受限于数字化程度不足、合法合规性等因素约束,应用发展相对缓慢。区块链针对实体经济的核心价值正是促进产业上下游高效协作,提升产融结合效能。因此,区块链产业应用逐步向政务数据共享、供应链协同、跨境贸易等自动化协作和价值互联迈进。
区块链基础设施化呼声渐起,建设模式仍需深度探索。区块链构建可信数据共享环境的价值已经获得多方认可,各机构对利用区块链进行可信存证的需求逐步显现,技术使用方渴望可以使用易用性强、标准化程度高的区块链通用性产品,对区块链基础设施化的呼声已经出现。各个国家、地区尝试建设服务地域内多个组织的区块链基础设施,其中主要代表有欧盟 EBSI、美洲开发银行 LACChain 等。此外,我国将多云跨云 BaaS 服务、开放联盟链等作为区块链基础设施的探索。可以看到,区块链基础设施将是未来长远发展方向,但具体建设模式仍将在前进中持续探索,不断演进。
区块链不能包打天下,需与多技术配合完成数字化转型。正如TCP/IP 协议是可靠通信的基础协议,区块链则是可信交易的基础组件,但是这并不意味着区块链就是解决行业所有痛点的“万能钥匙”。
区块链在下一代信息技术创新发展中具有不可替代的作用,作为行业赋能的工具,单纯使用区块链技术构建的应用也难以发挥其价值。这就需要区块链与人工智能、物联网、大数据、5G 等其他技术相结合,利用协同效应形成一体化解决方案,共同助力数字化转型。"""],
["""服务进出口是指除去商品进出口以外的那部分国际收支。获得收入可看作服务出口,支出则可当作服务进口:服务这种商品,也被看作是无形商品。国际贸易包括许多不同种类的服务。
(1)Tourism and Transportation (旅游业、交通运输业) 交通运输业和旅游业是国际航空公司、航运公司、预约服务机构和旅馆收益的主要来源。
就国家而言,像希腊和挪威这样的国家,收入的大部分要依赖航运业,巴哈马群岛的收入则更多的来自海外游客,而不是靠其商品出口。
(2) Performance of activities abroad (国外商务活动) 企业要为其在国外进行的某些活动支付服务费用,这些服务包括银行、保险、租赁、工程和管理等。工程服务常常是以交钥匙工程承包方式进行的。承建生产设施的合同规定工程完成时,将全部设施交付转让给物主。服务管理费用的支付以管理合同为依据,根据这一合同,其中某公司为对方公司提供管理人员进行一般管理或专门管理。
(3) Use of assets from abroad (国外资产的运用) 特许使用费是使用国外资产所支付的费用,如商标、专利、版权或其他类似许可证协定那样的合同项目下的专业技术。特许使用费也可以用于特许经营。在这种经营方式中, 一方当事人(给予特许者)将其商标的使用权出售给另一方独立的当事人(特许持有人),该使用权即成为特许权持有人的基本资产,此外,特许者还要给予特许持有人业务上的资助,如提供零配件,提供管理服务或技术等。
企业在成功地建立起出口市场之后,往往倾向于采用国外许可证交易或特许经营。这种转移往往比早期单纯出口要承担更多的国际义务。这是因为企业通常需要向国外派遣技术人员以帮助被许可人或特许持有人建立和选择新产品的生产设施。"""],
["""合资企业是根据合资各方同意的规章而建立的一种商业关系,它涉及资产的统筹,共同经营管理,分享利润和分担风险。从法律上讲,合资企业是一种合伙关系, 一种可以被各种产业合作形式所采取的商业机构模式;换句话说,就是以独立和联合的形式,把共同销售、服务、生产等方面按法律程序组织成一个合资企业。合资企业可以是股权式合营企业,也可以是非股权式合营企业。如果是前者,将建立一个独立的合资实体,当地合资者购买合资资本的部分股权。如果是后者,就不再建立另外的合资实体,联合完全建立在合同的基础上。
不论是股权式合营企业,还是非股权式合营企业都要共同承担风险,分享利润。合资企业比出口贸易、进口贸易或许可证贸易更加稳定,比独自经营的费用要低。然而,合资企业也面临一些问题,如控制问题,其原因是由于合资企业要求跨国合作,还有诸如所有权比例,投资数量以及出口量多少,如何公平分配利润等问题。"""]], inputs=[input_text])
gr.Examples(label="提示词举例",examples=[[qianfan_prompt_ct1],[qianfan_prompt_ct2],[qianfan_prompt_ct3],[qianfan_prompt_ct4],[qianfan_prompt_ct5],[qianfan_prompt_ct6],[qianfan_prompt_ct7],[qianfan_prompt_ct8]], inputs=[text_prompt])
if __name__ == "__main__":
parese = argparse.ArgumentParser()
parese.add_argument("--port", type=int, default=8081)
parese.add_argument("--host", type=str, default="192.168.0.66")
parese.add_argument("--base_llm_url", type=str, default="http://192.168.0.148:8000")
args = parese.parse_args()
global base_llm_url,llmchain
base_llm_url=os.environ.get('LLM_URL_BASE', None)
if not base_llm_url:
base_llm_url=args.base_llm_url
base_llm=ChatERNIESerLLM(chat_completion=ChatCompletion(ak="pT7sV1smp4AeDl0LjyZuHBV9", sk="b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu"))
llmchain = LLMChain(llm=base_llm, prompt=CHATGLM_PROMPT_CT,llm_kwargs={"temperature":0.9})
# openai = OpenAI(model_name="chatglm3-6b", openai_api_key="token1",openai_api_base=base_llm_url + "/v1")
# llmchain = LLMChain(llm=openai, prompt=CHATGLM_PROMPT_CT,verbose=True,llm_kwargs={"temperature":1.0})
demo.queue().launch(share=False, inbrowser=True,server_name="0.0.0.0",server_port=args.port)
import os, sys
sys.path.append("../..")
import gradio as gr
import argparse
from llm.chatglm import ChatGLMSerLLM
from langchain.chains import LLMChain
from langchain_openai import OpenAI
from langchain.prompts import StringPromptTemplate,PromptTemplate
from scenarios.psbc.model_serve import modelcall_prase,modelcall_prase_ct,CHATGLM_PROMPT,CHATGLM_PROMPT_CT
from langchain.vectorstores.faiss import FAISS
from langchain_core.documents import Document
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
)
from scenarios.psbc.tag_memory_store.vectorstore import search_tags
tags = ["贷款业务","即期交易","贴现市场","远期外汇","买入汇率","外汇交易","货币政策","通货膨胀","浮动汇率","套算汇率",
"票据贴现","外汇市场","即期汇率","汇率差价","短期票据","市场机制","期货交易","债务关系","免责条款","远期汇率",
"套期保值","本票","国际信贷","保险单据","套汇策略","商业汇票","保值策略","市场风险","利息率期权","私募债券",
"浮动利率","不可撤销信用证","固定汇率","可撤销信用证","固定利率","汇款业务","货币互换","票据行为","通货期权","利息率期货",
"跟单汇票","票据权利","风险保值","跟单信用证","汇率风险","远期本票","货币贬值","布雷顿森林货币体系","保兑信用证","黄金市场",
"资产负债比率","外汇保值条款","远期外汇交易","对外贸易保险","国际金融市场","承兑汇票","国际支付","国际结算","外汇证券市场","银行贷款",
"信用证","自由交易市场","贸易规则","借款协议","保底期权","特别提款权","税收政策","贴现率","出口融资","信用形式",
"票据法","国际债券","外汇资产","国际法律","资信调查","民间经贸组织","国际贸易术语","国际贸易结算规则","国际黄金市场","货币制度",
"金币本位制","金块本位制","金本位制度","汇率机制","汇率变动","国际经济关系","利率计算","外汇保值","套汇活动","双边经济援助",
"利率互换","期权交易","股票期权交易","票据清算制度","利率政策","贷款利率","商业发票","国际非贸易结算","信用凭证","标价法"]
category=["国际金融市场","跟单信用证与国际惯例","国际结算","国际贸易信用与结算","国际贸易"]
def chat(state,input_text,tags_box,filter_num):
global llmchain,vectorstore
if not input_text:
return {},"","",[],[],gr.CheckboxGroup([],value=[]),"",""
print(tags_box)
# tags_box = gr.Dropdown.update(choices=["1","2","3"], value=["1","2"])
# result = llmchain.run({"question":input_text})
result, title, summary, tags_out, category_out, = modelcall_prase_ct(llmchain,input_text,category=",".join(category))
# result, title, summary, tags_out, category_out, = modelcall_prase(llmchain,input_text)
tags_filter,tags_suggest = search_tags(native_tags=tags_out,faiss_vectorstore=vectorstore,threshold=filter_num)
if "text" in result:
output_text = gr.Textbox(visible=True,value=result["text"])
else:
output_text = gr.Textbox(visible=True,value=result)
state = {"tags_suggest":tags_suggest,"model_tags":tags_out}
tags_box_suggest = gr.CheckboxGroup(tags_suggest,value=[])
return state,title,category_out,tags_out,tags_filter,tags_box_suggest,summary,output_text
def adopt_tags(state,tags_box_suggest,tags_box_filter):
if len(tags_box_suggest) <= 0:
return state,tags_box_suggest,tags_box_filter
tags_box_filter.extend(tags_box_suggest)
tags_box_filter_update = gr.Dropdown(tags, value=tags_box_filter)
new_suggest_tags = []
for tag in state["tags_suggest"]:
if tag not in tags_box_suggest:
new_suggest_tags.append(tag)
state["tags_suggest"] = new_suggest_tags
tags_box_suggest_update = gr.CheckboxGroup(new_suggest_tags,value=[])
return state,tags_box_suggest_update,tags_box_filter_update
def filter_change(state,filter_num,tags_box_suggest,tags_box_filter):
if not state or "model_tags" not in state or len(state["model_tags"]) == 0:
return state,tags_box_suggest,tags_box_filter
global llmchain,vectorstore
tags_filter,tags_suggest = search_tags(native_tags=state["model_tags"],faiss_vectorstore=vectorstore,threshold=filter_num)
state["tags_suggest"] = tags_suggest
tags_box_suggest_update = gr.CheckboxGroup(tags_suggest,value=[])
return state,tags_box_suggest_update,tags_filter
def reset_state():
output_text = gr.Textbox(visible=False,value="")
return {},"","","",[],[],gr.CheckboxGroup([],value=[]),"",output_text
def dropdown_event_handler(value):
print("Dropdown value selected: ", value)
with gr.Blocks(title="AI辅助工具") as demo:
gr.HTML("""<h1 align="center">文本归档</h1>""")
input_text = gr.Textbox(show_label=False, placeholder="输入需要处理的文档...", lines=10)
with gr.Row():
submitBtn = gr.Button("提交")
emptyBtn = gr.Button("清除")
title_box = gr.Textbox(label="标题", placeholder="标题", interactive=True)
with gr.Row():
category_box = gr.Dropdown(category,label="类别", allow_custom_value=True,interactive=True,scale=1,min_width=300)
with gr.Column(scale=3):
tags_box_filter = gr.Dropdown(tags, value=[], multiselect=True, label="标签",interactive=True,allow_custom_value=True)
tags_box_suggest = gr.CheckboxGroup([],value=[],interactive=True, label="建议标签")
with gr.Column(scale=1):
filter_num = gr.Slider(minimum=0.5, maximum=1.0, value=0.85, step=0.01, interactive=True, label="标签匹配度",min_width=300)
adoptBtn = gr.Button("采纳")
# tags_box_suggest = gr.Dropdown([], value=[], multiselect=True, label="建议标签",interactive=True,allow_custom_value=True)
summary_box = gr.Textbox(label="摘要", placeholder="摘要", interactive=True)
state = gr.State()
## 插入多个空行
gr.Markdown("""---""")
with gr.Column():
## 圆角矩形框,显示文字并有删除按钮
tags_box = gr.Dropdown([], value=[], multiselect=True, label="标签",interactive=True,allow_custom_value=True,scale=6)
output_text = gr.Textbox(label="输出",visible=False, placeholder="Output...", lines=10)
submitBtn.click(chat, [state,input_text,tags_box,filter_num],
[state,title_box, category_box, tags_box,tags_box_filter,tags_box_suggest,summary_box,output_text], show_progress=True)
adoptBtn.click(adopt_tags,[state,tags_box_suggest,tags_box_filter],[state,tags_box_suggest,tags_box_filter])
filter_num.change(filter_change,[state,filter_num,tags_box_suggest,tags_box_filter],[state,tags_box_suggest,tags_box_filter])
emptyBtn.click(reset_state, outputs=[state,input_text, title_box, category_box, tags_box,tags_box_filter,tags_box_suggest,summary_box,output_text])
gr.Examples(examples=[
["""由于各国货币制度的不同,所以,为了使不同的货币间的清算得以顺利进行,就必须解决各国货币彼此之间的兑换问题,即买卖不同的货币,也就是进行外汇买卖。进行外汇买卖要有一定的场所,这就形成了外汇市场。
狭义的国际外汇市场是指银行间的外汇买卖。外汇银行与客户之间的交易所产生的结果必然有买卖差额,银行本身经过抵冲,其结果可能出现头寸过剩或头寸不足,根据不同的差额,银行就可以在外汇市场上进行抛售或买进,以避免汇率变化所带来的损失。
外汇市场,按有无固定场所分为两种形式:一种是定点交易,凡是参加外汇交易的代表,在一定时间内集中于外汇交易场所进行交易。外汇期货交易就是在交易所内通过公开喊价、拍卖的方式进行。外汇期货市场的首创者是美国芝加哥商品交易所的国际货币市场,它成立于 1972 年 5 月。此外,有固定场所的,还有法国巴黎、德国的法兰克福、比利时的布鲁塞尔等。另一种是非定点交易,即买卖双方通过电话、电传等通讯手段进行交易,如伦敦外汇市场就是这种交易方式。这种交易一般能在 1—2 分钟内结束讨价还价,达成口头协议。目前,除德国、法国等少数国家的外汇市场外,多数国家的外汇市场都脱离了交易所,成为没有固定场所的无形市场。"""],
["""国际商会(International Chamber of Commerce, ICC)是世界重要的民间经贸
组织,成立于1919年,总部设在巴黎。目前在92个国家设有国家委员会,拥有来自122个国家的近10 000家会员公司、协会和个人会员。
国际商会的宗旨是:在经济和法律领域,以有效的行动促进国际贸易和投资的发展。它通过其下设的十几个专业委员会和数十个工作组,制定许多国际商业领域的规则和惯例,如国际贸易术语、国际贸易结算规则等,为全世界广泛采用。国际商会是联合国的重要对话伙伴,并与其他许多重要的国际组织,如世界贸易组织、欧洲联盟、经济合作与发展组织等,保持着密切的关系,对这些组织在制定有关国际商业的政策时有着重要的影响。国际商会为广大商界提供的诸如仲裁、临时进口单证系统、贸易信息网等服务,极大地便利了商界的国际经贸实务操作。
国际商会以贸易为促进和平、繁荣的强大力量,推行一种开放的国际贸易、投资体系和市场经济,且其会员公司和协会皆从事国际商业活动,因此它所制定用以规范国际商业合作的规章,如:《国际贸易术语解释通则》、《托收统一规贝!!》、《跟单信用证统一惯例》等被广泛地应用于国际贸易中,并成为国际贸易不可缺少的一部分。"""],
], inputs=[input_text])
if __name__ == "__main__":
parese = argparse.ArgumentParser()
parese.add_argument("--port", type=int, default=8081)
parese.add_argument("--host", type=str, default="0.0.0.0")
parese.add_argument("--base_llm_url", type=str, default="http://192.168.0.148:8000")
parese.add_argument("--embedding", type=str, default="/model/bge-large-zh-v1.5")
args = parese.parse_args()
global base_llm_url,llmchain,vectorstore
base_llm_url=os.environ.get('LLM_URL_BASE', None)
if not base_llm_url:
base_llm_url=args.base_llm_url
# base_llm=ChatGLMSerLLM(url=base_llm_url)
# llmchain = LLMChain(llm=base_llm, prompt=CHATGLM_PROMPT,llm_kwargs={"temperature":0.9})
openai = OpenAI(model_name="chatglm3-6b", openai_api_key="token1",openai_api_base=base_llm_url + "/v1")
llmchain = LLMChain(llm=openai, prompt=CHATGLM_PROMPT_CT,verbose=True,llm_kwargs={"temperature":1.0})
docs = [Document(page_content=tag) for tag in tags]
embedding = HuggingFaceEmbeddings(model_name=args.embedding)
vectorstore = FAISS.from_documents(documents=docs,embedding=embedding)
demo.launch(share=False, inbrowser=True,server_name=args.host,server_port=args.port)
import os, sys
from os import path
sys.path.append("../")
from abc import ABC, abstractmethod
import json
from typing import List,Any,Tuple,Dict
from langchain.schema import Document
from vector.pgsqldocstore import PgSqlDocstore,str2hash_base64
class DocumentCallback(ABC):
@abstractmethod #向量库储存前文档处理--
def before_store(self,docstore:PgSqlDocstore,documents:[Document]) -> [Document]:
pass
@abstractmethod #向量库查询后文档处理--用于结构建立
def after_search(self,docstore:PgSqlDocstore,documents:List[Tuple[Document, float]],number:int = 1000) -> List[Tuple[Document, float]]: #向量库查询后文档处理
pass
class DefaultDocumentCallback(DocumentCallback):
def before_store(self,docstore:PgSqlDocstore,documents:[Document]) -> [Document]:
output_doc:[Document] = []
for doc in documents:
if "next_doc" in doc.metadata:
doc.metadata["next_hash"] = str2hash_base64(doc.metadata["next_doc"])
doc.metadata.pop("next_doc")
output_doc.append(doc)
return output_doc
def after_search(self,docstore:PgSqlDocstore,documents:List[Tuple[Document, float]],number:int = 1000) -> List[Tuple[Document, float]]: #向量库查询后文档处理
output_doc:List[Tuple[Document, float]] = []
exist_hash = []
for doc,score in documents:
print(exist_hash)
dochash = str2hash_base64(doc.page_content)
if dochash in exist_hash:
continue
else:
exist_hash.append(dochash)
output_doc.append((doc,score))
if len(output_doc) > number:
return output_doc
fordoc = doc
while ("next_hash" in fordoc.metadata):
if len(fordoc.metadata["next_hash"])>0:
if fordoc.metadata["next_hash"] in exist_hash:
break
else:
exist_hash.append(fordoc.metadata["next_hash"])
content = docstore.TXT_DOC.search(fordoc.metadata["next_hash"])
if content:
fordoc = Document(page_content=content[0], metadata=json.loads(content[1]))
output_doc.append((fordoc,score))
if len(output_doc) > number:
return output_doc
else:
break
else:
break
return output_doc
\ No newline at end of file
import re
import time
from pydantic import BaseModel
from langchain.prompts import StringPromptTemplate,PromptTemplate
from langchain import LLMChain
from qa.question import QuestionRDF
from similarity import VectorStore_FAISS
prompt_expert_template = """你是浦发硅谷银行网银系统的专家,请帮助解答用户在使用过程中遇到的问题。
{question}
"""
prompt_history_template = """{history}
上面是之前的对话,你可以继续回答用户的问题。
{question}
"""
prompt_enhancement_template = """{similarity}
请结合上述内容回答以下问题,不要提无关内容:
{question}
"""
prompt_enhancement_history_template = """{history}
上面是之前的对话,下面是可参考的内容。
{similarity}
请结合上述内容回答以下问题,不要提无关内容:
{question}
"""
class Chatbot:
def __init__(self, model, vectorstore_faiss:VectorStore_FAISS,tokenizer=None, base_model=None, base_tokenizer=None, source_prefix=None,re_history=None):
self.model = model
self.tokenizer = tokenizer
self.base_model = base_model
self.base_tokenizer = base_tokenizer
self.source_prefix = source_prefix
self.re_history = re_history
self.vectorstore_faiss = vectorstore_faiss
def _build_history(self, history=None):
if history is None:
return None
prompt=""
for i, (old_query, response) in enumerate(history):
prompt += "问:{}\n答:{}\n\n".format(old_query, response)
return prompt
def _build_prompt(self, query, history=None):
if history is None:
history = []
prompt = ""
for i, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n\n问:{}\n\n答:{}\n\n".format(i + 1, old_query, response)
prompt += "[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query)
return prompt
def rdf_question(self, question, history):
rdf = QuestionRDF(self.base_model)
return rdf.generate(history, question)
def chat_with_llm(self, input, isExpert=False, isEnhancement=False, isBase=False, dialog=[], temp=0.8):
run_llm = self.model if isExpert else self.base_model
history = self._build_history(self.re_history(dialog)) if self.re_history is not None else self._build_history(dialog)
similarity=None
# if history is not None and len(history) > 0:
# question = self.rdf_question(input, history)
# else:
# question = input
# print("问:", question)
question = input
if isExpert: # 专家指令,不带历史
history, similarity = None, None
prompt=PromptTemplate.from_template(prompt_expert_template)
elif isEnhancement: # 知识增强,不带历史,补充相似度
history, similarity = None, self.vectorstore_faiss._join_document(self.vectorstore_faiss.get_text_similarity_with_score(input))
if similarity and similarity.strip(): # 有补充知识
print("相似度:", similarity)
prompt=PromptTemplate.from_template(prompt_enhancement_template) if history is None else PromptTemplate.from_template(prompt_enhancement_history_template)
else: # 无补充知识,退化为专家指令
run_llm = self.model
prompt=PromptTemplate.from_template(prompt_expert_template)
# prompt=PromptTemplate.from_template(prompt_history_template) if history is not None else PromptTemplate.from_template("{question}")
else: # 普通问答,带历史
question=input
prompt=PromptTemplate.from_template(prompt_history_template) if history is not None else PromptTemplate.from_template("{question}")
chain=LLMChain(llm=run_llm, prompt=prompt,llm_kwargs={"temperature":temp})
start=time.time()
response=chain.run({"history":history,"question":question,"similarity":similarity})
cost_time=time.time()-start
print("cost:", round(cost_time, 2), "s")
return response, [], input
def chat(self, input, isExpert=False, isEnhancement=False, isBase=None, dialog=[], temp=0.8):
if isExpert:
print("专家指令",end=" ")
if self.source_prefix is not None:
history = []
prompt = self.source_prefix + input
else:
prompt = input
input = f"[专家指令]{input}"
elif isEnhancement:
print("知识增强",end=" ")
history = []
similarity = self.vectorstore_faiss._join_document(self.vectorstore_faiss.get_text_similarity_with_score(input))
if similarity is not None :
prompt=f"{similarity}\n请根据上述内容回答以下问题:\n{input}"
elif self.source_prefix is not None:
prompt = self.source_prefix + input
else:
prompt = input
input = f"[知识增强]{input}"
else:
print("普通问答",end=" ")
if self.re_history is not None:
history = self.re_history(dialog)
else:
history = dialog
prompt = input
input = f"[普通问答]{input}"
if isBase is not None and isBase:
print("基础模型")
exec_model, exec_tokenizer = self.base_model, self.base_tokenizer
else:
print("增强模型")
exec_model, exec_tokenizer = self.model, self.tokenizer
start=time.time()
# response, history = exec_model.chat(
# exec_tokenizer, prompt, history, temperature=temp)
prompt = self._build_prompt(prompt, history)
response = exec_model(prompt)
cost_time=time.time()-start
if exec_tokenizer is not None:
input_token_size = len(exec_tokenizer.encode(input))
output_token_size = len(exec_tokenizer.encode(response))
print()
print("【Itoken_size:", input_token_size, "】prompt:", prompt)
print("【Otoken_size:", output_token_size, "】response:", response)
print("cost:", round(cost_time, 2), "s",
"tps:", round((input_token_size + output_token_size / cost_time), 2),"token/s")
print("--------------------------------------------------")
else:
print("cost:", round(cost_time, 2), "s")
print("--------------------------------------------------")
return response, history, input
\ No newline at end of file
import os
import sys
import time
import requests
sys.path.append("../..")
from llm.loader import ModelLoader
from common import consts
##👇--------- config -------------
from argparse import Namespace
cfg = Namespace()
cfg.vector = True
cfg.expert = True
cfg.max_source_length = 64
cfg.max_target_length = 128
cfg.pre_seq_len = 128
#model
cfg.model_name_or_path = consts.MODEL_PATH_ChatGLM2_32K #远程'THUDM/chatglm-6b'
cfg.quantization_bit = None #仅仅预测时可以选 4 or 8
cfg.source_prefix = consts.INSTRUCTION_V1
cfg.checkout_mode = "32k" # lora or ptuning
cfg.ckpt_path = '../../../model/ckpt/chatglm2-6b-32k-qlora-INSv11-rank16-5e-4-30'
cfg.ptuning_path = '../../../model/ckpt/chatglm2-6b-32k-pt-spdsvb-INSv11-128-3e-3-3000/checkpoint-3000'
cfg.output_path='../../../exam/chatglm2-6b-32k-vector'
##👆--------- config end -------------
## --------- load model --------------
if cfg.checkout_mode == "lora":
# lora 微调 checkpoint 及模型加载
loader = ModelLoader(cfg.model_name_or_path)
loader.load_lora(cfg.ckpt_path)
elif cfg.checkout_mode == "ptuning":
# ptuning v2 微调 checkpoint 及模型加载
loader = ModelLoader(cfg.model_name_or_path, cfg.pre_seq_len, False)
loader.load_prefix(cfg.ptuning_path)
else:
loader = ModelLoader(cfg.model_name_or_path)
model,tokenizer = loader.models()
if cfg.quantization_bit is not None:
model = loader.quantize(cfg.quantization_bit)
model = model.cuda().eval()
## --------- load model end --------------
## --------- questions --------------
questions = [
"证书更新时,提示“当前证书和用户绑定非同一证书“,这么处理",
"提示:该操作需要一个智能卡,但设备中目前没有智能卡,怎么处理",
"提示:证书库中没有可用的证书,原因是啥",
"提示:多于一把USBKEY,怎么处理",
"提示:出现了内部错误,怎么处理",
"提示:无法在所请求的端口上访问web站点,怎么处理",
"提示:谷歌浏览器无法反显操作员号,网银助手显示无异常,怎么处理",
"网银助手提示:没有检测到usbkey中的证书",
"网银支持的系统及浏览器",
"网银管理员忘记密码后怎么办",
"客户想给某一操作员开通手机银行,该如何操作?",
"网银密码设置规则",
"网银初始密码如何获取",
"动账联系人如何维护",
"登录密码失败最大次数",
"在柜面给网银客户挂新账号后,在网银做交易下拉框中没有刚挂的账号,是什么原因?",
"网银做交易时提示“授权模型不匹配”,是什么原因?",
"客户登录时密码输入框提示需下载安全控件,是什么原因?",
"客户经办了一笔人民币跨行转账后,发现填写错误,想要修改或撤销该笔交易,在哪个功能下可以执行该操作?",
"客户经办了一笔人民币跨行转账后,复核人员在哪个功能下可以进行相应的复核操作?",
"客户想要对某一账号设置日累计限额和笔数,在哪个功能下可以进行相应设置",
]
import pandas as pd
data = pd.DataFrame({"id":[i+1 for i in range(len(questions))],"question":questions})
start = time.time()
# 专家指令
if cfg.expert:
responses = []
for q in questions:
print(f"Q: {q}")
prompt = cfg.source_prefix + q
print("prompt:",prompt)
response,_=model.chat(tokenizer,prompt,temperature=0)
responses.append(response)
# data.loc[data.question==q,"response"] = response
print(f"A: {response}")
print("----"*10)
data["专家指令回答"] = responses
ins_time = time.time()
print(f"1 cost time:{ins_time-start}")
data["评分1"] = [""] * len(questions)
# 知识增强
if cfg.vector:
responses = []
for q in questions:
print(f"Q: {q}")
from similarity import get_text_similarity
similarity = get_text_similarity(q)
if similarity is not None:
prompt=f"{similarity}\n请结合上述内容回答以下问题:\n{q}"
else:
prompt = cfg.source_prefix + q
print("prompt:",prompt)
response,_=model.chat(tokenizer,prompt,temperature=0)
responses.append(response)
# data.loc[data.question==q,"response"] = response
print(f"A: {response}")
print("----"*10)
data["知识增强回答"] = responses
print(f"2 cost time:{time.time()-ins_time}")
data["评分2"] = [""] * len(questions)
data.to_csv(f"{cfg.output_path}.csv",index=False)
import sys
sys.path.append("../..")
from loader import load
def extract_values(values,content, elements, extractor):
doc = "\n".join(content)
eles = extractor.extract_foreach(doc, elements)
# eles = extractor.extract(doc, elements)
for e in eles:
try:
k, v = e.split(":", maxsplit=1)
k = k.strip()
v = v.strip()
if v is not None and v != "" and v != "未知" and k in elements:
values[k] = v + "," + values[k] if k in values else v
except Exception as exp:
print(exp)
print(e)
continue
return values
def contract(extractor,file,elements,max_length):
print(file,elements,max_length)
docs = load(file)
if docs is None:
return "Error: could not load file"
print(len(docs))
content = []
content_len = 0
values={k:"" for k in elements}
for d in docs:
if content_len+len(d.page_content)>max_length:
values = extract_values(values,content, elements, extractor)
print("\n".join([f"{k}:{v}" for k,v in values.items()]))
content=[d.page_content]
content_len=len(d.page_content)
else:
content.append(d.page_content)
content_len+=len(d.page_content)
values = extract_values(values,content, elements, extractor)
# return [f"{k}:{v}" for k,v in values.items()]
return values
\ No newline at end of file
import json
import os, sys
import re
sys.path.append("../..")
import time
from common import consts
# -------------- config --------------
from argparse import Namespace
cfg = Namespace()
cfg.source_prefix = consts.INSTRUCTION_V1
####文档问答配置
cfg.embeddings_model='../../../model/moka-ai/m3e-large'
cfg.vectorstore=os.path.join(os.path.expanduser('~'),'.beai/vectorstore')
####知识增强,向量库配置
cfg.enhance_host= os.environ.get('AIGC_VECTOR_HOST', '192.168.22.106')
cfg.enhance_dbname=os.environ.get('AIGC_VECTOR_DBNAME', 'new_vecdoc')
cfg.enhance_username= os.environ.get('AIGC_VECTOR_USER', 'vecdoc')
cfg.enhance_password=os.environ.get('AIGC_VECTOR_PASSWORD', 'vecdoc')
cfg.enhance_port=os.environ.get('AIGC_VECTOR_PORT', '5432')
cfg.enhance_store_path=os.path.join(os.path.expanduser('~'),'.beai/vectorstore_enhance')
cfg.enhance_index_name="know"
cfg.enhance_threshold = 300
cfg.enhance_show_number = 3 #search number = show_number * 3
cfg.enhance_embeddings_model = '../../../model/moka-ai/m3e-large'
# -------------- config end--------------
## ----------- load model --------------
# from llm.loader import ModelLoader
# start = time.time()
# if cfg.checkout_mode == "lora":
# # lora 微调 checkpoint 及模型加载
# loader = ModelLoader(cfg.model_name_or_path)
# loader.load_lora(cfg.ckpt_path)
# elif cfg.checkout_mode == "ptuning":
# # ptuning v2 微调 checkpoint 及模型加载
# loader = ModelLoader(cfg.model_name_or_path, cfg.pre_seq_len, False)
# loader.load_prefix(cfg.ptuning_path)
# else:
# loader = ModelLoader(cfg.model_name_or_path)
# model,tokenizer = loader.models()
# if cfg.quantization_bit is not None:
# model = loader.quantize(cfg.quantization_bit)
# model = model.cuda()
# model = model.eval()
# loaded_time = time.time()
# print(f"Load model cost {loaded_time-start:.2f}s")
from llm.chatglm import ChatGLMSerLLM
spdsvb_llm_url=os.environ.get('LLM_URL_SPDSVB', 'http://localhost:8001')
llm=ChatGLMSerLLM(url=spdsvb_llm_url)
## ------------- load model end --------------
# -------------- load base model --------------
# base_loader = ModelLoader(consts.MODEL_PATH_ChatGLM2)
# base_loader.quantize(4)
# base_model,base_tokenizer = base_loader.models()
# base_model.cuda().eval()
base_llm_url=os.environ.get('LLM_URL_BASE', 'http://localhost:8002')
base_llm=ChatGLMSerLLM(url=base_llm_url)
# -------------- load base model end --------------
import flask
from flask import request
app=flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = False
app.config.update(RESTFUL_JSON=dict(ensure_ascii=False))
def re_history(dialog):
if dialog is None:
return None
history=[]
if len(dialog) == 0:
return None
else:
for item in dialog[::-1]:
q = re.sub(r"^\[.+?\]", "", item.get("q"))
history.append((item.get("q"),item.get("a")))
return history
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "&lt;")
line = line.replace(">", "&gt;")
line = line.replace(" ", "&nbsp;")
line = line.replace("*", "&ast;")
line = line.replace("_", "&lowbar;")
line = line.replace("-", "&#45;")
line = line.replace(".", "&#46;")
line = line.replace("!", "&#33;")
line = line.replace("(", "&#40;")
line = line.replace(")", "&#41;")
line = line.replace("$", "&#36;")
lines[i] = "<br>"+line
text = "".join(lines)
return text
from chatbot import Chatbot
from similarity import VectorStore_FAISS
# chatbot=Chatbot(model,tokenizer,base_model,base_tokenizer,source_prefix=cfg.source_prefix,re_history=re_history)
vecstore_faiss = VectorStore_FAISS(
embedding_model_name=cfg.enhance_embeddings_model,
store_path=cfg.enhance_store_path,
index_name=cfg.enhance_index_name,
info={"port":cfg.enhance_port,"host":cfg.enhance_host,"dbname":cfg.enhance_dbname,"username":cfg.enhance_username,"password":cfg.enhance_password},
show_number=cfg.enhance_show_number,
threshold=cfg.enhance_threshold)
chatbot=Chatbot(llm,vecstore_faiss,None,base_llm,None,source_prefix=cfg.source_prefix,re_history=re_history)
@app.route('/aigc/ask', methods=['POST'])
def chat():
# print("chat")
data=json.loads(request.get_data(as_text=True))
# print(data)
input = data.get('question')
input = re.sub(r"^\[.+?\]", "", input)
options=data.get("modelOptions",{})
isEnhancement=options.get("isEnhancement",False)
isExpert=options.get("isExpert",False)
isBase=options.get("isCommon",False)
temp = float(options.get("sliderTemp", 0.8))
dialog=data.get("dialog",[])
# response, history, input = chatbot.chat(input, isExpert, isEnhancement, isBase, data.get("dialog"), temp)
response, history, input = chatbot.chat_with_llm(input, isExpert, isEnhancement, isBase, dialog, temp)
# print(history)
return {
"code": 200,
"msg": "success",
"data": {
"q": input,
"a": parse_text(response),
}
}
@app.route('/user/login', methods=['POST'])
def login():
data=json.loads(request.get_data(as_text=True))
print(data)
return {
"code": 200,
"msg": "success",
"data": "testtoken"
}
from contract.extraction import ElementsExtractor
from contract.documentqa import DocumentQA
from contract.callbacks import PrintRetrievalHandler
# from llm.wrapper import WrapperLLM
# llm=WrapperLLM(model=model,tokenizer=tokenizer)
extractor=ElementsExtractor(llm=llm)
_documentqa=DocumentQA(llm=base_llm)
@app.route('/aigc/extract', methods=['POST'])
def extract():
# 从请求中获取数据
data = json.loads(request.form['params'])
print(data)
# 读取上传的文件
file = request.files['file']
if file is None:
return {"code": 0, 'msg': 'Error: could not load file'}
suffix = file.filename.split(".")[-1]
from tempfile import NamedTemporaryFile
temp_file = NamedTemporaryFile(suffix=f".{suffix}", delete=False)
temp_file.write(file.read())
temp_file.close()
filename = temp_file.name
# 获取模型选项中的参数
sliderAnswer = data.get("modelOptions").get("sliderAnswer")
sliderAnswer = int(float(sliderAnswer))
elements = data.get("tags")
from extract import contract
result = contract(extractor,filename,elements,8000)
os.unlink(filename)
# 返回提取结果
return {
"code": 200,
"msg": "success",
"data": [[k, v] for k, v in result.items()]
}
@app.route('/aigc/docqa', methods=['POST'])
def documentqa():
# 从请求中获取数据
data = json.loads(request.form['params'])
print(data)
# 读取上传的文件
filenames=[]
if request.files: #基于chatid的回话文件非必输
for _,file in request.files.items():
suffix = os.path.splitext(file.filename)[1]
prefix = os.path.splitext(os.path.basename(file.filename))[0]
from tempfile import NamedTemporaryFile
temp_file = NamedTemporaryFile(prefix = prefix,suffix=f"{suffix}", delete=False)
temp_file.write(file.read())
temp_file.close()
filenames.append(temp_file.name)
#根据chatid载入向量库
chatid=data.get("chatid")
answer=""
query=data.get("query")
detail=data.get("detail",True)
summary=data.get("summary",False)
callback_doc = None
#暂时只开放QA
_faiss = VectorStore_FAISS(embedding_model_name=cfg.embeddings_model,show_number=cfg.enhance_show_number,index_name=chatid,store_path=cfg.vectorstore,is_pgsql=False,threshold=cfg.enhance_threshold)
if len(filenames) > 0:
_faiss._add_documents_from_dir(filepaths=filenames,load_kwargs={"sentence_size":1024})
_faiss._save_local()
if detail:
callback_doc = PrintRetrievalHandler()
if (query is None or len(query.strip()) == 0) and summary:
answer= _documentqa.summarize_document(filepaths=filenames,load_kwargs={"sentence_size":2048},chain_type="map_reduce", chain_type_kwargs={"token_max":3072,"verbose":True})
else:
answer=_documentqa.qa_from_document(query=data.get("query"),
retriever=_faiss.as_retriever(),
chain_type=data.get("chain_type") or "map_reduce",
chain_type_kwargs={"verbose":True},
callbacks=[callback_doc] if callback_doc else None)
# answer = _documentqa.route_chain(query=data.get("query"),filepaths=filenames,load_kwargs={"sentence_size":1000},embeddings_model=cfg.embeddings_model,chain_type=data.get("chain_type") or "refine",chain_type_kwargs={"verbose":True})
# if data.get("type") == "qa":
# answer=_documentqa.qa_from_document(query=data.get("query"),retriever=GetRetriever(cfg.embeddings_model,filepaths=filenames,vectorstorepath=cfg.vectorstore,index_name=chatid,load_kwargs={"sentence_size":1000},k=10),chain_type=data.get("chain_type") or "refine",chain_type_kwargs={"verbose":True})
# elif data.get("type") == "sum":
# answer=_documentqa.summarize_document(filepaths=filenames,chain_type=data.get("chain_type") or "refine",load_kwargs={"sentence_size":1000},chain_type_kwargs={"verbose":True})
# elif data.get("type") == "extract":
# answer=_documentqa.extract_from_document(query=data.get("query"),filepaths=filename,chain_type=data.get("chain_type") or "refine",chain_type_kwargs={"verbose":True},load_kwargs={"sentence_size":1000})
# else:
# return {
# "code": 0,
# "msg": f"Unknown request type:%s" % data.get("type")
# }
for filename in filenames:
os.unlink(filename)
print(answer)
return {
"code": 200,
"msg": "success",
"data": {
"q": data.get("query"),
"a": parse_text(answer),
"similarity":callback_doc.getsimilarity() if callback_doc else []
}
}
from contract.documentqa import del_vectorstore_path
@app.route('/aigc/docqa',methods=['DELETE'])
def delete():
data = json.loads(request.get_data(as_text=True))
chatid=data.get("chatid")
del_vectorstore_path(chatid, cfg.vectorstore)
return {
"code": 200,
"msg": "success",
"data": "delete success"
}
def main():
app.run(host="0.0.0.0",port=5000,debug=False)
if __name__ == "__main__":
main()
\ No newline at end of file
import os, sys
import re,time
from os import path
sys.path.append("../")
import copy
from typing import List,OrderedDict,Any,Optional,Tuple,Dict
from vector.pgsqldocstore import InMemorySecondaryDocstore
from langchain.vectorstores.faiss import FAISS,dependable_faiss_import
from langchain.schema import Document
from vector.pgsqldocstore import PgSqlDocstore
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
)
import math
import faiss
from langchain.vectorstores.utils import DistanceStrategy
from langchain.vectorstores.base import VectorStoreRetriever
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from loader import load
from langchain.embeddings.base import Embeddings
from VectorCallback import DocumentCallback,DefaultDocumentCallback
def singleton(cls):
instances = {}
def get_instance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return get_instance
@singleton
class EmbeddingFactory:
def __init__(self, path:str):
self.path = path
self.embedding = HuggingFaceEmbeddings(model_name=path)
def get_embedding(self):
return self.embedding
def GetEmbding(path:str) -> Embeddings:
# return HuggingFaceEmbeddings(model_name=path)
return EmbeddingFactory(path).get_embedding()
import operator
from langchain.vectorstores.utils import DistanceStrategy
import numpy as np
class RE_FAISS(FAISS):
#去重,并保留metadate
def _tuple_deduplication(self, tuple_input:List[Tuple[Document, float]]) -> List[Tuple[Document, float]]:
deduplicated_dict = OrderedDict()
for doc,scores in tuple_input:
page_content = doc.page_content
metadata = doc.metadata
if page_content not in deduplicated_dict:
deduplicated_dict[page_content] = (metadata,scores)
deduplicated_documents = [(Document(page_content=key,metadata=value[0]),value[1]) for key, value in deduplicated_dict.items()]
return deduplicated_documents
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
faiss = dependable_faiss_import()
vector = np.array([embedding], dtype=np.float32)
if self._normalize_L2:
faiss.normalize_L2(vector)
scores, indices = self.index.search(vector, k if filter is None else fetch_k)
docs = []
for j, i in enumerate(indices[0]):
if i == -1:
# This happens when not enough docs are returned.
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
if filter is not None:
filter = {
key: [value] if not isinstance(value, list) else value
for key, value in filter.items()
}
if all(doc.metadata.get(key) in value for key, value in filter.items()):
docs.append((doc, scores[0][j]))
else:
docs.append((doc, scores[0][j]))
docs = self._tuple_deduplication(docs)
score_threshold = kwargs.get("score_threshold")
if score_threshold is not None:
cmp = (
operator.ge
if self.distance_strategy
in (DistanceStrategy.MAX_INNER_PRODUCT, DistanceStrategy.JACCARD)
else operator.le
)
docs = [
(doc, similarity)
for doc, similarity in docs
if cmp(similarity, score_threshold)
]
if "doc_callback" in kwargs:
if hasattr(kwargs["doc_callback"], 'after_search'):
docs = kwargs["doc_callback"].after_search(self.docstore,docs,number=k)
return docs[:k]
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter
)
docs_and_scores = self._tuple_deduplication(docs_and_scores)
if "doc_callback" in kwargs:
if hasattr(kwargs["doc_callback"], 'after_search'):
docs_and_scores = kwargs["doc_callback"].after_search(self.docstore,docs_and_scores,number=k)
return [doc for doc, _ in docs_and_scores]
def getFAISS(embedding_model_name:str,store_path:str,info:dict = None,index_name:str = "index",is_pgsql:bool = True,reset:bool = False) -> RE_FAISS:
embeddings = GetEmbding(path=embedding_model_name)
docstore1:PgSqlDocstore = None
if is_pgsql:
if info and "host" in info and "dbname" in info and "username" in info and "password" in info:
docstore1 = PgSqlDocstore(info,reset=reset)
else:
docstore1 = InMemorySecondaryDocstore()
if not path.exists(store_path):
os.makedirs(store_path,exist_ok=True)
if store_path is None or len(store_path) <= 0 or not path.exists(path.join(store_path,index_name+".faiss")) or reset:
print("create new faiss")
index = faiss.IndexFlatL2(len(embeddings.embed_documents(["a"])[0])) #根据embeddings向量维度设置
return RE_FAISS(embedding_function=embeddings.client.encode,index=index,docstore=docstore1,index_to_docstore_id={})
else:
print("load_local faiss")
_faiss = RE_FAISS.load_local(folder_path=store_path,index_name=index_name, embeddings=embeddings)
if docstore1 and is_pgsql: #如果外部参数调整,更新docstore
_faiss.docstore = docstore1
return _faiss
class VectorStore_FAISS(FAISS):
def __init__(self, embedding_model_name:str,store_path:str,index_name:str = "index",info:dict = None, is_pgsql:bool = True,show_number = 5, threshold = 0.8, reset:bool = False,doc_callback:DocumentCallback = DefaultDocumentCallback()):
self.info = info
self.embedding_model_name = embedding_model_name
self.store_path = path.join(store_path,index_name)
if not path.exists(self.store_path):
os.makedirs(self.store_path,exist_ok=True)
self.index_name = index_name
self.show_number = show_number
self.search_number = self.show_number*3
self.threshold = threshold
self._faiss = getFAISS(self.embedding_model_name,self.store_path,info=info,index_name=self.index_name,is_pgsql=is_pgsql,reset=reset)
self.doc_callback = doc_callback
def get_text_similarity_with_score(self, text:str,**kwargs):
score_threshold = (1-self.threshold) * math.sqrt(2)
docs = self._faiss.similarity_search_with_score(query=text,k=self.search_number,score_threshold=score_threshold,doc_callback=self.doc_callback,**kwargs)
return [doc for doc, similarity in docs][:self.show_number]
def get_text_similarity(self, text:str,**kwargs):
docs = self._faiss.similarity_search(query=text,k=self.search_number,doc_callback=self.doc_callback,**kwargs)
return docs[:self.show_number]
# #去重,并保留metadate
# def _tuple_deduplication(self, tuple_input:List[Document]) -> List[Document]:
# deduplicated_dict = OrderedDict()
# for doc in tuple_input:
# page_content = doc.page_content
# metadata = doc.metadata
# if page_content not in deduplicated_dict:
# deduplicated_dict[page_content] = metadata
# deduplicated_documents = [Document(page_content=key,metadata=value) for key, value in deduplicated_dict.items()]
# return deduplicated_documents
def _join_document(self, docs:List[Document]) -> str:
print(docs)
return "".join([doc.page_content for doc in docs])
def get_local_doc(self, docs:List[Document]):
ans = []
for doc in docs:
ans.append({"page_content":doc.page_content, "page_number":doc.metadata["page_number"], "filename":doc.metadata["filename"]})
return ans
# def _join_document_location(self, docs:List[Document]) -> str:
# 持久化到本地
def _save_local(self):
self._faiss.save_local(folder_path=self.store_path,index_name=self.index_name)
# 添加文档
# Document {
# page_content 段落
# metadata {
# page 页码
# }
# }
def _add_documents(self, new_docs:List[Document],need_split:bool = True,pattern:str = r'[?。;\n]'):
list_of_documents:List[Document] = []
if self.doc_callback:
new_docs = self.doc_callback.before_store(self._faiss.docstore,new_docs)
if need_split:
for doc in new_docs:
words_list = re.split(pattern, doc.page_content)
# 去掉重复项
words_list = set(words_list)
words_list = [str(words) for words in words_list]
for words in words_list:
if not words.strip() == '':
metadata = copy.deepcopy(doc.metadata)
metadata["paragraph"] = doc.page_content
list_of_documents.append(Document(page_content=words, metadata=metadata))
else:
list_of_documents = new_docs
self._faiss.add_documents(list_of_documents)
def _add_documents_from_dir(self,filepaths,load_kwargs: Optional[dict] = {"mode":"paged"}):
self._add_documents(load.loads(filepaths,**load_kwargs))
def as_retriever(self, **kwargs: Any) -> VectorStoreRetriever:
"""
Return VectorStoreRetriever initialized from this VectorStore.
Args:
search_type (Optional[str]): Defines the type of search that
the Retriever should perform.
Can be "similarity" (default), "mmr", or
"similarity_score_threshold".
search_kwargs (Optional[Dict]): Keyword arguments to pass to the
search function. Can include things like:
k: Amount of documents to return (Default: 4)
score_threshold: Minimum relevance threshold
for similarity_score_threshold
fetch_k: Amount of documents to pass to MMR algorithm (Default: 20)
lambda_mult: Diversity of results returned by MMR;
1 for minimum diversity and 0 for maximum. (Default: 0.5)
filter: Filter by document metadata
Returns:
VectorStoreRetriever: Retriever class for VectorStore.
Examples:
.. code-block:: python
# Retrieve more documents with higher diversity
# Useful if your dataset has many similar documents
docsearch.as_retriever(
search_type="mmr",
search_kwargs={'k': 6, 'lambda_mult': 0.25}
)
# Fetch more documents for the MMR algorithm to consider
# But only return the top 5
docsearch.as_retriever(
search_type="mmr",
search_kwargs={'k': 5, 'fetch_k': 50}
)
# Only retrieve documents that have a relevance score
# Above a certain threshold
docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={'score_threshold': 0.8}
)
# Only get the single most similar document from the dataset
docsearch.as_retriever(search_kwargs={'k': 1})
# Use a filter to only retrieve documents from a specific paper
docsearch.as_retriever(
search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}}
)
"""
if not kwargs or "similarity_score_threshold" != kwargs["search_type"]:
default_kwargs = {'k': self.show_number}
if "search_kwargs" in kwargs:
default_kwargs.update(kwargs["search_kwargs"])
kwargs["search_kwargs"] = default_kwargs
elif "similarity_score_threshold" == kwargs["search_type"]:
default_kwargs = {'score_threshold': self.threshold,'k': self.show_number}
if "search_kwargs" in kwargs:
default_kwargs.update(kwargs["search_kwargs"])
kwargs["search_kwargs"] = default_kwargs
kwargs["search_kwargs"]["doc_callback"]=self.doc_callback
tags = kwargs.pop("tags", None) or []
tags.extend(self._faiss._get_retriever_tags())
print(kwargs)
return VectorStoreRetriever_FAISS(vectorstore=self._faiss, **kwargs, tags=tags)
class VectorStoreRetriever_FAISS(VectorStoreRetriever):
search_k = 5
def __init__(self,**kwargs):
super().__init__(**kwargs)
if "k" in self.search_kwargs:
self.search_k=self.search_kwargs["k"]
self.search_kwargs["k"]=self.search_k*2
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
docs = super()._get_relevant_documents(query=query,run_manager=run_manager)
return docs[:self.search_k]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
docs = super()._aget_relevant_documents(query=query,run_manager=run_manager)
return docs[:self.search_k]
\ No newline at end of file
import os, sys
import re
sys.path.append("../..")
from contract.documentqa import DocumentQA,GetEmbding,GetRetriever
from llm.chatglm import ChatGLMSerLLM
from llm.ernie_with_sdk import ChatERNIESerLLM
from loader.load import load,loads_path,loads,append
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
)
from langchain.chains import RetrievalQA
from langchain.vectorstores import FAISS
from contract.prompts import (
QA_PROMPT,
REFINE_QA_PROMPT,
SUMMARISE_PROMPT,
REFINE_SUMMARISE_PROMPT,
EXTRACTION_PROMPT,
REFINE_EXTRACTION_PROMPT,
ROUTER_PROMPT,
# CHAT_QUESTION_PROMPT,
# CHAT_COMBINE_PROMPT
)
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager
)
class PrintRetrievalHandler(BaseCallbackHandler):
def on_retriever_start(self, query: str, **kwargs):
print(f"**Question:** {query}")
def on_retriever_end(self, documents, **kwargs):
for idx, doc in enumerate(documents):
source = doc.metadata["source"]
print(f"**Results from {source}**")
print(doc.page_content)
base_llm=ChatGLMSerLLM(url="http://192.168.22.106:8002")
# base_llm=ChatGLMSerLLM(url="http://192.168.0.148:8000")
documentqa = DocumentQA(llm=base_llm)
def test_qa():
filepath = "/dataset/浦发硅谷电子渠道、企业网银、APIbanking、电子回单系统运维专业服务合同(4).docx"
# embedding_path = '/model/text2vec/text2vec-base-chinese'
embedding_path = '/model/moka-ai/m3e-large'
sentence_size = 1024
# retriever=GetRetriever(embdingpath=embedding_path,filepaths=[filepath],
# load_kwargs={"sentence_size":512},
# save_local=False,
# search_kwargs={"k":5})
# search_type="similarity_score_threshold",search_kwargs={"score_threshold":200.0})
# result = documentqa.summarize_document(filepaths=[filepath],load_kwargs={"sentence_size":sentence_size},chain_type="refine", chain_type_kwargs={"verbose":True},temperature=0.8)
start=time.time()
documents = loads(filepaths=[filepath],sentence_size=sentence_size*4/3)
# print("document_len",[len(doc.page_content) for doc in documents])
# print("documents size",len(documents))
# documents = append(documents=documents,sentence_size=sentence_size)
# print("document_len",[len(doc.page_content) for doc in documents])
# print("documents size",len(documents))
# documents = retriever._get_relevant_documents(query="受托人",run_manager=None)
# print([doc.page_content for doc in documents])
result = documentqa.summarize_document(documents=documents,chain_type="map_reduce", chain_type_kwargs={"token_max":sentence_size,"verbose":True})
# result = documentqa.qa_from_document(query="合同中的“受托人”名称",retriever=retriever,chain_type="map_reduce", chain_type_kwargs={"token_max":3000,"verbose":True},callbacks=[PrintRetrievalHandler()])
# result = documentqa.qa_from_document(query="合同服务期限",retriever=retriever,chain_type="refine", chain_type_kwargs={"verbose":True},callbacks=[PrintRetrievalHandler()])
# qa = RetrievalQA.from_chain_type(llm=base_llm, chain_type="map_reduce",
# chain_type_kwargs={"question_prompt":CHAT_QUESTION_PROMPT,"combine_prompt":CHAT_COMBINE_PROMPT,"token_max":3000,"verbose":True},retriever=retriever)
# result = qa.run("合同中的委托人公司名称和受托人公司名称分别是")
cost_time=time.time()-start
# print("document_len",[len(doc.page_content) for doc in documents])
# print("documents size",len(documents))
print("cost_time",cost_time)
print(result)
from vector.pgsql.db import PostgresDB
from similarity import VectorStore_FAISS
from langchain.schema import Document
def test_faiss():
vecstore_faiss = VectorStore_FAISS(
embedding_model_name='../../../model/moka-ai/m3e-large',
store_path=os.path.join(os.path.expanduser('~'),'.beai/vectorstore_enhance'),
index_name="know",
info={"port":"5432","host":"192.168.22.106","dbname":"new_vecdoc","username":"vecdoc","password":"vecdoc"},
show_number=3,
reset=True)
psqldb = PostgresDB("192.168.22.106", "vecdoc", "vecdoc", "vecdoc")
psqldb.connect()
#将当前向量库中的数据全部导出到新的向量库中
db_index = 0
page_size = 2000
print(vecstore_faiss._faiss.index.ntotal)
while True:
# query = f"SELECT text,paragraph_id FROM vec_txt order by vector_id limit %s offset %s" % (page_size,db_index)
query = f"select vec_txt.text,vec_txt.paragraph_id,txt_doc.text,count(*) as count from vec_txt left join txt_doc on vec_txt.paragraph_id=txt_doc.paragraph_id where vec_txt.text not like E'%%\n%%' and vec_txt.text != '' and vec_txt.text != ' ' and vec_txt.paragraph_id in(select DISTINCT on (text) paragraph_id from txt_doc order by text,paragraph_id) group by vec_txt.text,txt_doc.text,vec_txt.paragraph_id limit %s offset %s" % (page_size,db_index)
psqldb.execute(query)
questions = psqldb.fetchall()
if len(questions) <= 0:
break
db_index+=page_size
list_of_documents = []
for question in questions:
list_of_documents.append(Document(page_content=question[0], metadata=dict(paragraph=question[2],page=question[1])))
vecstore_faiss._add_documents(list_of_documents,need_split=False)
print(vecstore_faiss._faiss.index.ntotal)
vecstore_faiss._save_local()
from langchain.document_loaders import UnstructuredFileLoader, TextLoader, CSVLoader,UnstructuredPDFLoader,UnstructuredWordDocumentLoader
def test_faiss_from_dir():
vecstore_faiss = VectorStore_FAISS(
embedding_model_name='../../../model/moka-ai/m3e-large',
store_path=os.path.join(os.path.expanduser('~'),'.beai/vectorstore_enhance'),
index_name="know",
info={"port":5434,"host":"192.168.22.106","dbname":"vecdoc","username":"vecdoc","password":"vecdoc"},
show_number=3,
reset=True)
docs = loads_path("../../../data/docs",mode="elements",sentence_size=1024)
print(len(docs))
docs = vecstore_faiss._tuple_deduplication(docs)
print(len(docs))
print(vecstore_faiss._faiss.index.ntotal)
for i in range(0, len(docs), 300):
vecstore_faiss._add_documents(docs[i:i+300 if i+300<len(docs) else len(docs)],need_split=True)
print(vecstore_faiss._faiss.index.ntotal)
vecstore_faiss._save_local()
def test_faiss_load():
vecstore_faiss = VectorStore_FAISS(
embedding_model_name='../../../model/moka-ai/m3e-large',
store_path=os.path.join(os.path.expanduser('~'),'.beai/vectorstore_enhance'),
index_name="know",
info={"port":5432,"host":"192.168.22.106","dbname":"new_vecdoc","username":"vecdoc","password":"vecdoc"},
show_number=3,
reset=False)
print(vecstore_faiss._join_document(vecstore_faiss.get_text_similarity_with_score("通知存款支取的业务规则")))
if __name__ == "__main__":
test_faiss_from_dir()
import logging
import os, sys
sys.path.append("../..")
logging.basicConfig(filename='web.log', level=logging.INFO)
logger = logging.getLogger(__name__)
from common import consts
import gradio as gr
import mdtex2html
import torch
import transformers
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
)
from argparse import Namespace
from llm.loader import ModelLoader
cfg = Namespace()
#model
cfg.checkout_mode = "lora" # lora or ptuning
cfg.model_name_or_path = consts.MODEL_PATH_ChatGLM2_32K # 32K 利于知识增强模式
# cfg.model_name_or_path = '/home/zfh/models/tunning/chatglm2-6b-lora-spdsvb'
cfg.ptuning_path = '/home/zfh/aird/model/ckpt/chatglm-6b-pt-spdsvb-INSv8-128-5e-3-3000/checkpoint-3000'
cfg.ckpt_path = '/home/zfh/aird/model/ckpt/chatglm-6b-lora-spdsvb-INSv10-1e-03-20'
cfg.pre_seq_len = 0
# cfg.pre_seq_len = 128
cfg.prefix_projection = False
cfg.quantization_bit = None
cfg.source_prefix = consts.INSTRUCTION_V1
## --------- load model --------------
if cfg.checkout_mode == "lora":
# lora 微调 checkpoint 及模型加载
loader = ModelLoader(cfg.model_name_or_path)
loader.load_lora(cfg.ckpt_path)
elif cfg.checkout_mode == "ptuning":
# ptuning v2 微调 checkpoint 及模型加载
loader = ModelLoader(cfg.model_name_or_path, cfg.pre_seq_len, False)
loader.load_prefix(cfg.ptuning_path)
model,tokenizer = loader.models()
if cfg.quantization_bit is not None:
model = loader.quantize(cfg.quantization_bit)
model = model.cuda().eval()
## --------- load model end --------------
"""Override Chatbot.postprocess"""
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert((message)),
None if response is None else mdtex2html.convert(response),
)
return y
gr.Chatbot.postprocess = postprocess
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "&lt;")
line = line.replace(">", "&gt;")
line = line.replace(" ", "&nbsp;")
line = line.replace("*", "&ast;")
line = line.replace("_", "&lowbar;")
line = line.replace("-", "&#45;")
line = line.replace(".", "&#46;")
line = line.replace("!", "&#33;")
line = line.replace("(", "&#40;")
line = line.replace(")", "&#41;")
line = line.replace("$", "&#36;")
lines[i] = "<br>"+line
text = "".join(lines)
return text
# def predict(input, chatbot, max_length, top_p, temperature, enhance, instruct,history):
def predict(input, chatbot, max_length, enhance, instruct, history):
# chatbot.append((parse_text(input), ""))
if enhance:
print("知识增强")
history = []
from similarity import get_text_similarity
similarity = get_text_similarity(input)
print("similarity:",similarity)
if similarity is not None :
prompt=f"{similarity}\n请结合上述内容回答以下问题:\n{input}"
elif cfg.source_prefix is not None and instruct:
prompt = cfg.source_prefix + input
else:
prompt = f"对不起,我没有找到相关内容\n请结合上述内容回答以下问题:\n{input}"
input = f"[知识增强]{input}"
elif instruct:
print("专家指令")
if cfg.source_prefix is not None and instruct:
history = []
prompt = cfg.source_prefix + input
else:
prompt = input
input = f"[专家指令]{input}"
else:
print("普通问答")
prompt = input
input = f"[普通问答]{input}"
chatbot.append((input, ""))
logger.info(f"prompt: {prompt}")
# for response, history in model.stream_chat(tokenizer, prompt, history, max_length=max_length, top_p=top_p, # type: ignore
# temperature=temperature):
for response, history in model.stream_chat(tokenizer, prompt, history, max_length=max_length, # type: ignore
):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history
def reset_user_input():
return gr.update(value='')
def reset_state():
return [], []
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">ChatGLM - SPDSVB</h1>""")
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10)
with gr.Column(min_width=32, scale=1):
submitBtn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
emptyBtn = gr.Button("Clear History")
max_length = gr.Slider(0, 32768, value=2048, step=100.0, label="Maximum length", interactive=True)
if cfg.model_name_or_path == consts.MODEL_PATH_ChatGLM2_32K:
max_length.value = 32768
# top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
# temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
with gr.Row():
enhance = gr.Checkbox(label="知识增强", interactive=True)
instruct = gr.Checkbox(label="专家指令",value=True, interactive=True)
history = gr.State([])
# submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, enhance, instruct, history], [chatbot, history],
submitBtn.click(predict, [user_input, chatbot, max_length, enhance, instruct, history], [chatbot, history],
show_progress="minimal")
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress='minimal')
def main():
demo.queue().launch(share=True,server_port=9999)
if __name__ == "__main__":
main()
\ No newline at end of file
import os, sys
sys.path.append("../..")
import json
import pandas
from llm.spark import SparkLLM
from llm.ernie import ErnieLLM, ModelType
from qa.generator import QAGenerator as QAGeneratorBase
from qa.generator import TrainData
from langchain.chat_models import ChatOpenAI
from langchain.base_language import BaseLanguageModel
class QAGenerator:
def __init__(self, llm: BaseLanguageModel):
"""
初始化 QAGenerator
:param llm: 语言模型
"""
self.generator = TrainData(llm=llm)
def generate_questions_and_answers(self, input_text, question_number=3):
"""
生成问题和答案
:param input_text: 输入文本
:param question_number: 生成问题的数量,默认为 3
:return: 问题和答案的列表
"""
# questions = self.generator.generate_questions(input_text, question_number=question_number)
# result = []
# for q in questions:
# answer = self.generator.generate_answer(input_text, q)
# result.append((q, answer))
result = self.generator.generate(input_text, question_number=question_number)
return result
def read_and_deduplicate_text(self, data_path, text_column):
"""
读取指定路径下的文件并返回去重后的文本列表
:param data_path: 文件路径
:param text_column: 文本所在列的列名
:return: 去重后的文本列表
"""
with open(data_path, "r", encoding="utf-8") as f:
text_list=[]
ext = data_path.split(".")[-1]
if ext == "json":
data = json.load(f)
for item in data:
text_list.append(item[text_column])
elif ext == "csv":
data = pandas.read_csv(f)
for item in data[text_column]:
text_list.append(item)
# 去重
text_list = list(set(text_list))
print(f"共读取到 {len(text_list)} 条文本")
return text_list
def save_questions_and_answers(self, df, output_path):
"""
将问题和答案保存到 CSV 文件中,已追加的方式
:param df: 问题和答案的 DataFrame
:param output_path: 输出文件路径
"""
if os.path.exists(output_path):
df.to_csv(output_path, mode="a", index=False, header=False)
else:
df.to_csv(output_path, index=False)
def generate_questions_csv(self, data_path, text_column, output_path, save_step=100, max_num=1000):
"""
生成问题和答案的 CSV 文件
:param data_path: 数据文件路径
:param text_column: 文本所在列的列名
:param output_path: 输出文件路径
"""
# 读取数据文件并去重
text_list = self.read_and_deduplicate_text(data_path, text_column)
text_list=text_list[:max_num]
# 生成问题和答案
result = []
for i, input in enumerate(text_list):
if len(input) > 200:
num_questions = 5
else:
num_questions = 3
questions = self.generate_questions_and_answers(input, question_number=num_questions)
for q, a in questions:
result.append((input, q, a))
if (i+1) % save_step == 0:
questions_df = pandas.DataFrame(result, columns=["text", "question", "answer"])
self.save_questions_and_answers(questions_df, output_path)
result = []
# 打印进度条
progress = (i + 1) / len(text_list) * 100
bar_length = 50
filled_length = int(bar_length * progress // 100)
bar = "█" * filled_length + "-" * (bar_length - filled_length)
print(f"\r已处理 {i+1}/{len(text_list)} 条数据 |{bar}| {progress:.2f}%", end="")
# 将结果保存到 CSV 文件中
questions_df = pandas.DataFrame(result, columns=["text", "question", "answer"])
self.save_questions_and_answers(questions_df, output_path)
# questions_df = pandas.DataFrame(result, columns=["text", "question", "answer"])
# questions_df.to_csv(output_path, index=False)
import os, sys
sys.path.append("../..")
from dotenv import load_dotenv,find_dotenv
load_dotenv(find_dotenv())
from llm.spark import SparkLLM
from llm.chatglm import ChatGLMSerLLM
from qa.generator import QAGenerator,TrainData
from langchain.chat_models import ChatOpenAI
# llm=SparkLLM()
# llm=ChatOpenAI(model_name="gpt-3.5-turbo")
# llm=ChatOpenAI(model_name="gpt-4")
# llm=ChatGLMSerLLM(url="http://localhost:8002")
from llm.ernie import ErnieLLM, ModelType
llm = ErnieLLM(model_name=ModelType.ERNIE_LITE)
generator=QAGenerator(llm=llm)
# input='''下载网银相关软件分为一下几个步骤
# 1.使用浏览器打开浦发硅谷银行首页:https://www.spd-svbank.com/cn/
# 2.点击右上角“网上银行”登录
# 3.在登录页“登录”按钮下方,点击“下载网银软件”,跳转至软件下载界面
# 4.选择需要下载的软件点击下载按钮即可
# #Windows:
# ##Firefox /IE 10/IE 11/Edge 浏览器:安装 Firefox 扩展,网银管家,USBKEY 驱动, 密码控件,签名和证书控件。
# ##Chrome 浏览器:安装 Chrome 扩展,网银管家,USBKEY 驱动,密码控件,签名和证书控件。
# #Mac
# ##Firefox/Chrome浏览器:安装网银管家,USBKEY 驱动,密码控件,签名和证书控件。
# (为保证您正常使用我行网上银行,使用网银 USBKEY 的客户请先安装我行网银管家安全组件。我行网银管家安全组件包括系统环境、网银控件、IE 设置等安全检测及网银USBKEY 管理工具,可一次性完成网银所需的所有控件及驱动程序的安装。)
# '''
input='''季度对账的页面描述,1、进入银企对账账户列表页面,点击银企对账,进入对账单列表页面,点击对账提交,进入对账单明细页面,选择对账状态,若为对账不相符,对账疑义为必输字段,若为对账相符,对账疑义可不输。
2、进入银企对账账户列表页面,点击对账详情,进入对账单列表页面,点击对账详情,进入对账单明细页面,可选择打印对账单明细数据。
3、对账单列表和对账单明细可下载pdf文件和excel文档。
'''
result=[]
# questions = generator.generate_questions(input,question_number=3)
# print("questions:",questions)
# print("---"*10)
# for q in questions:
# answer=generator.generate_answer(input,q)
# print("Q:",q,"\nA:",answer)
# print("----"*10)
train_data_gen=TrainData(llm=llm)
train_data=train_data_gen.generate(input,question_number=3)
print(train_data)
\ No newline at end of file
from dotenv import load_dotenv
load_dotenv()
from qagenerator import QAGenerator
# from langchain.chat_models import ChatOpenAI
# llm=ChatOpenAI(model_name="gpt-3.5-turbo")
from llm.ernie import ErnieLLM, ModelType
llm = ErnieLLM(model_name=ModelType.ERNIE_LITE)
input_file="../../../data/knowledge_qa.csv"
output_file="../../../data/kownledge_questions_qa_t50.ernie.csv"
qa_generator_csv = QAGenerator(llm=llm)
qa_generator_csv.generate_questions_csv(input_file, "input", output_file, max_num=50, save_step=10)
\ No newline at end of file
import os, sys
sys.path.append("../..")
import logging
logging.basicConfig(filename='web.log', level=logging.INFO)
logger = logging.getLogger(__name__)
from dotenv import load_dotenv
load_dotenv()
from common import consts
import gradio as gr
import mdtex2html
from llm.spark import SparkLLM
from langchain.chat_models import ChatOpenAI
from qa.generator import QAGenerator
llm=SparkLLM()
generator=QAGenerator(llm=llm)
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "&lt;")
line = line.replace(">", "&gt;")
line = line.replace(" ", "&nbsp;")
line = line.replace("*", "&ast;")
line = line.replace("_", "&lowbar;")
line = line.replace("-", "&#45;")
line = line.replace(".", "&#46;")
line = line.replace("!", "&#33;")
line = line.replace("(", "&#40;")
line = line.replace(")", "&#41;")
line = line.replace("$", "&#36;")
lines[i] = "<br>"+line
text = "".join(lines)
return text
# def predict(input, chatbot, max_length, top_p, temperature, enhance, instruct,history):
def predict(input, chatbot, max_length, history):
# chatbot.append((parse_text(input), ""))
result=[]
questions = generator.generate_questions(input,question_number=3)
# print("questions:",questions)
for q in questions:
answer=generator.generate_answer(input,q)
result.append((q,answer))
# print("Q:",q,"A:",answer)
response = "\n\n".join([f"{q}\n{a}" for q,a in result])
# print("response:",response)
chatbot.append((parse_text(response),""))
return chatbot, history
def reset_user_input():
return gr.update(value='')
def reset_state():
return [], []
def change_model(model_name):
global llm, generator
if model_name == "spark":
llm=SparkLLM()
elif model_name == "openai":
llm=ChatOpenAI(model_name="gpt-3.5-turbo")
generator=QAGenerator(llm=llm)
return model_name
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">训练集提取</h1>""")
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10)
with gr.Column(min_width=32, scale=1):
submitBtn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
model_name = gr.Radio(choices=["spark", "openai"], label="Model", value="spark", inline=True,interactive=True)
max_length = gr.Slider(0, 32768, value=2048, step=100.0, label="Maximum length", interactive=True)
emptyBtn = gr.Button("Clear History")
# top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
# temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
model_name.change(change_model,[model_name],[model_name])
history = gr.State([])
# submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, enhance, instruct, history], [chatbot, history],
submitBtn.click(predict, [user_input, chatbot, max_length, history], [chatbot, history],
show_progress="minimal")
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress='minimal')
def main():
demo.queue().launch(share=True,server_port=9999)
if __name__ == "__main__":
main()
\ No newline at end of file
import json
import matplotlib.pyplot as plt
import numpy as np
# 加载 json 文件
# 读取其中的 log_history 列表数据
# 基于 log_history 列表数据,绘制 loss 曲线图
def load_json(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
return data
dataset_chatglm = [
{"chatglm1-6b-spdsvb":"/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/chatglm-6b-spdsvb/eval_results.json"},
# {"chatglm-6b-pt-spdsvb-128-1e-3-3000-v1":'/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/adgen-chatglm-6b-pt-spdsvb-128-1e-3-3000/eval_results.json'},
# {"chatglm-6b-pt-spdsvb-128-1e-3-3000-v2":'/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/adgen-chatglm-6b-pt-spdsvb-128-1e-3-3000-v2/eval_results.json'},
{"chatglm-6b-pt-spdsvb-128-5e-3-3000-base":"/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/adgen-chatglm-6b-pt-128-5e-3-3000-spdsvb-base/eval_results.json"},
{"chatglm-6b-lora-spdsvb-base":"/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/chatglm-6b-lora-spdsvb-base/eval_results.json"},
{"chatglm-6b-lora-spdsvb-base-5e-3-50":"/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/chatglm-6b-lora-spdsvb-base-5e-3-50/eval_results.json"},
{"chatglm-6b-lora-spdsvb-INSv4-1e-03-50":"/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/chatglm-6b-lora-spdsvb-INSv4-1e-03-50/eval_results.json"},
{"chatglm-qlora-t1":"/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/chatglm-qlora-t1/eval_results.json"},
{"chatglm-6b-qlora-spdsvb-v4_t32":"/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/chatglm-6b-qlora-spdsvb-v4_t32/eval_results.json"},
]
labels = [
"base",
# "pt-v1",
# "pt-v2",
"pt-base",
"alora-base",
"alora-5e3-base",
"alora-5e3-v4",
"qlora-base",
"qlora-v4",
]
dataset_chatglm2 = [
{"chatglm2-6b-spdsvb":"/home/zfh/ChatGLM/ChatGLM2-6B/ptuning/output/chatglm2-6b/spdsvb-base/eval_results.json"},
{"chatglm2-6b-pt-spdsvb-base":"/home/zfh/ChatGLM/ChatGLM2-6B/ptuning/output/adgen-chatglm2-6b-pt-128-5e-4-spdsvb-base/eval_results.json"},
{"chatglm2-6b-lora-spdsvb":"/home/zfh/models/tunning/chatglm2-6b-lora-spdsvb/eval_results.json"},
{"chatglm2-6b-lora-spdsvb-v3":"/home/zfh/models/tunning/chatglm2-6b-spdb-v3/eval_results.json"},
]
labels2 = [
"2-base",
"2-pt-base",
"2-lora-v1",
"2-lora-v3",
]
def bar_eval(x,width,data,label):
y = data
plt.bar(x, y, width=width, label=label)
plt.plot(x, y, '-o')
plt.legend()
def create_plot(dataset,labels,name):
plt.figure(figsize=(10, 4))
bleu_4,rouge_1,rouge_2,rouge_l = [],[],[],[]
for item in dataset:
for key,value in item.items():
eval_data = load_json(value)
bleu_4.append(eval_data['eval_bleu-4'])
rouge_1.append(eval_data['eval_rouge-1'])
rouge_2.append(eval_data['eval_rouge-2'])
rouge_l.append(eval_data['eval_rouge-l'])
x=np.arange(len(labels))
plt.xticks(x,labels)
width = 0.1
bar_eval(x-1.5*width,width,bleu_4,'bleu-4')
bar_eval(x-0.5*width,width,rouge_1,'rouge-1')
bar_eval(x+0.5*width,width,rouge_2,'rouge-2')
bar_eval(x+1.5*width,width,rouge_l,'rouge-l')
plt.title("chatglm-6b spdsvb eval")
plt.savefig(f'../../images/eval/{name}.png')
create_plot(dataset_chatglm,labels,'chatglm')
create_plot(dataset_chatglm2,labels2,'chatglm2')
\ No newline at end of file
import json
import matplotlib.pyplot as plt
# 加载 json 文件
# 读取其中的 log_history 列表数据
# 基于 log_history 列表数据,绘制 loss 曲线图
def load_json(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
return data['log_history']
def plot_loss(log_history,label):
x,y=[],[]
ex,ey=[],[]
# for i in range(len(log_history)):
for item in log_history:
if 'loss' in item:
x.append(item["step"])
y.append(item["loss"])
elif 'eval_loss' in item:
ex.append(item["step"])
ey.append(item["eval_loss"])
else:
# print('Error: loss key not found in item:', item)
continue
plt.plot(x, y,'-o', label=label+"train_loss")
if len(ey)>0:
plt.plot(ex, ey,'-o', label=label+"eval_loss")
# plt.scatter(x, y, label='train_loss')
plt.legend()
# plt.show(block=True)
def create_plot(dataset,name):
plt.figure(figsize=(10, 4))
for item in dataset:
for key,value in item.items():
log_history = load_json(value)
plot_loss(log_history,key)
plt.savefig(f'../../images/loss/{name}.png')
def create_plot_one(state_file):
plt.figure(figsize=(10, 4))
log_history = load_json(state_file)
dir = '/'.join(state_file.split('/')[:-1])
plot_loss(log_history,"")
plt.savefig(f'{dir}/loss.png')
dataset_chatglm = [
# {"adgen-chatglm-6b-pt-comb-128-2e-2":'/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/adgen-chatglm-6b-pt-comb-128-2e-2/trainer_state.json'},
# {"adgen-chatglm-6b-pt-comb-128-2e-2-3000":'/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/adgen-chatglm-6b-pt-comb-128-2e-2-3000/trainer_state.json'},
# {"adgen-chatglm-6b-pt-comb-128-5e-3-1000":'/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/adgen-chatglm-6b-pt-comb-128-5e-3-1000/trainer_state.json'},
# {"adgen-chatglm-6b-pt-comb-128-1e-3-3000":'/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/adgen-chatglm-6b-pt-comb-128-1e-3-3000/trainer_state.json'},
{"adgen-chatglm-6b-pt-spdsvb-128-1e-2-3000":'/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/adgen-chatglm-6b-pt-spdsvb-128-1e-3-3000/trainer_state.json'},
{"adgen-chatglm-6b-pt-spdsvb-128-1e-3-3000-v2":'/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/adgen-chatglm-6b-pt-spdsvb-128-1e-3-3000-v2/trainer_state.json'},
{"adgen-chatglm-6b-pt-spdsvb-128-1e-3-3000-v3":'/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/adgen-chatglm-6b-pt-spdsvb-128-1e-3-3000-v3/trainer_state.json'},
{"adgen-chatglm-6b-pt-spdsvb-128-5e-3-3000-base":'/home/zfh/ChatGLM/ChatGLM-6B/ptuning/output/adgen-chatglm-6b-pt-128-5e-3-3000-spdsvb-base/trainer_state.json'},
# {"adgen-chatglm2-6b-pt-128-5e-4-spdsvb-v3":"/home/zfh/ChatGLM/ChatGLM2-6B/ptuning/output/adgen-chatglm2-6b-pt-128-5e-4-spdsvb-v3/trainer_state.json"},
# {"adgen-chatglm2-6b-pt-128-5e-4-spdsvb-base":"/home/zfh/ChatGLM/ChatGLM2-6B/ptuning/output/adgen-chatglm2-6b-pt-128-5e-4-spdsvb-base/trainer_state.json"},
{"chatglm-6b-pt-spdsvb-INSv8-128-5e-3-3000":"/home/zfh/aird/model/ckpt/chatglm-6b-pt-spdsvb-INSv8-128-5e-3-3000/trainer_state.json"},
{"chatglm-6b-pt-spdsvb-INSv9-128-5e-3-3000":"/home/zfh/aird/model/ckpt/chatglm-6b-pt-spdsvb-INSv9-128-5e-3-3000/trainer_state.json"},
{"chatglm-6b-pt-spdsvb-INSv10-128-5e-3-3000":"/home/zfh/aird/model/ckpt/chatglm-6b-pt-spdsvb-INSv10-128-5e-3-3000/trainer_state.json"},
]
dataset_v11 = [
{"chatglm2-6b-pt-spdsvb-INSv11-128-1e-3-3000":"/home/zfh/aird/model/ckpt/chatglm2-6b-pt-spdsvb-INSv11-128-1e-3-3000/trainer_state.json"},
{"chatglm2-6b-pt-spdsvb-INSv11-128-5e-3-3010":"/home/zfh/aird/model/ckpt/chatglm2-6b-pt-spdsvb-INSv11-128-5e-3-3010/trainer_state.json"},
{"chatglm2-6b-qlora-INSv11-rank16-1e-3-30":"/home/zfh/aird/model/ckpt/chatglm2-6b-qlora-INSv11-rank16-1e-3-30/checkpoint-2000/trainer_state.json"},
{"chatglm2-6b-pt-spdsvb-INSv11-128-3e-3-1000+s1000":"/home/zfh/aird/model/ckpt/chatglm2-6b-pt-spdsvb-INSv11-128-3e-3-1000+s1000/trainer_state.json"},
{"chatglm2-6b-32k-qlora-INSv11-rank16-5e-4-30":"/home/zfh/aird/model/ckpt/chatglm2-6b-32k-qlora-INSv11-rank16-5e-4-30/checkpoint-2800/trainer_state.json"},
{"chatglm2-6b-32k-pt-spdsvb-INSv11-128-3e-3-3000":"/home/zfh/aird/model/ckpt/chatglm2-6b-32k-pt-spdsvb-INSv11-128-3e-3-3000/checkpoint-3000/trainer_state.json"}
]
dataset_lora = [
# {"chatglm-6b-lora-spdsvb-base":'/home/zfh/aird/model/ckpt/chatglm-6b-lora-spdsvb-base/train_history.csv'},
{"chatglm2-6b-qlora-INSv11_rank16-1e-3-30":"/home/zfh/aird/model/ckpt/chatglm2-6b-qlora-INSv11_rank16-1e-3-30/checkpoint-2000/trainer_state.json"},
]
create_plot(dataset_chatglm,'spdsvb')
# create_plot(dataset_chatglm2,'chatglm2')
for item in dataset_chatglm:
for key,value in item.items():
create_plot_one(value)
create_plot(dataset_v11,'spdsvb_v11')
for item in dataset_v11:
for key,value in item.items():
create_plot_one(value)
\ No newline at end of file
import csv
import json
import sys
sys.path.append("..")
from common import consts
def data_format(csv_file, jsonl_file):
data = []
with open(csv_file, 'r',encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
prompt = row['prompt']
response = row['response']
data.append({'prompt': prompt, 'response': response})
with open(jsonl_file, 'w',encoding='utf-8') as f:
for d in data:
f.write(json.dumps(d, ensure_ascii=False) + '\n')
csv_file = '../../data/train_comb.csv'
jsonl_file = '../../data/sheet_train_comb.json'
data_format(csv_file, jsonl_file)
csv_file = '../../data/train_comb_eval.csv'
jsonl_file = '../../data/sheet_train_comb_eval.json'
data_format(csv_file, jsonl_file)
import csv
import json
import sys
sys.path.append("..")
from common import consts
csv_file = '../../data/train_spdsvb_v11.csv'
jsonl_file = '../../data/train_spdsvb_v11.jsonl'
data = []
with open(csv_file, 'r',encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
prompt = consts.INSTRUCTION_V1+row['prompt']
response = [[row['response']]]
data.append([{'prompt': prompt, 'response': response}])
with open(jsonl_file, 'w',encoding='utf-8') as f:
for d in data:
f.write(json.dumps(d, ensure_ascii=False) + '\n')
\ No newline at end of file
#!/bin/bash
DATA_TRAIN_FILE='../../../data/train_spdsvb_v10.csv'
DATA_VAL_FILE='../../../data/val_spdsvb_v4.csv'
PROMPT_PREFIX="你是浦发硅谷银行网银系统的专家,请帮助解答用户在使用过程中遇到的问题。"
MODEL_PATH_ChatGLM="/home/zfh/models/chatglm-6b"
MODEL_PATH_ChatGLM2="/home/zfh/models/chatglm2-6b"
MODEL_PATH_ChatGLM2_32K="/home/zfh/models/chatglm2-6b-32k"
MODEL_NAME_ChatGLM="THUDM/chatglm-6b"
MODEL_NAME_ChatGLM2="THUDM/chatglm2-6b"
INSTRUCTION_V1="你是浦发硅谷银行网银系统的专家,请帮助解答用户在使用过程中遇到的问题。\n"
\ No newline at end of file
{
"output_dir": "../../../model/ckpt/chatGLM2_6B_QLoRA_t32",
"per_device_train_batch_size": 4,
"gradient_accumulation_steps": 16,
"per_device_eval_batch_size": 4,
"learning_rate": 1e-3,
"num_train_epochs": 15.0,
"lr_scheduler_type": "linear",
"warmup_ratio": 0.1,
"logging_first_step": false,
"logging_steps": 100,
"logging_strategy": "steps",
"save_strategy": "steps",
"save_steps": 100,
"evaluation_strategy": "steps",
"eval_steps": 100,
"optim": "adamw_torch",
"fp16": false,
"remove_unused_columns": false,
"ddp_find_unused_parameters": false,
"seed": 42
}
import sys
sys.path.append("../..")
from common import consts
## --------- config -------------
from argparse import Namespace
cfg = Namespace()
#dataset
cfg.train_file = '../../../data/train_spdsvb.v3.csv'
cfg.val_file = '../../../data/val_spdsvb.v3.csv'
cfg.prompt_column = 'prompt'
cfg.response_column = 'response'
cfg.history_column = None
cfg.source_prefix = '' #添加到每个prompt开头的前缀引导语
cfg.max_source_length = 128
cfg.max_target_length = 128
#model
cfg.model_name_or_path = consts.MODEL_PATH_ChatGLM #远程'THUDM/chatglm-6b'
cfg.quantization_bit = None #仅仅预测时可以选 4 or 8
#train
cfg.epochs = 50
cfg.lr = 5e-3
cfg.batch_size = 1
cfg.gradient_accumulation_steps = 16 #梯度累积
cfg.ckpt_path = '../../model/ckpt/chatglm-6b-lora-spdsvb-v3'
## --------- end config -------------
\ No newline at end of file
from torchkeras import KerasModel
from accelerate import Accelerator
import torch
class StepRunner:
def __init__(self, net, loss_fn, accelerator=None, stage = "train", metrics_dict = None,
optimizer = None, lr_scheduler = None
):
self.net,self.loss_fn,self.metrics_dict,self.stage = net,loss_fn,metrics_dict,stage
self.optimizer,self.lr_scheduler = optimizer,lr_scheduler
self.accelerator = accelerator if accelerator is not None else Accelerator()
if self.stage=='train':
self.net.train()
else:
self.net.eval()
def __call__(self, batch):
#loss
with self.accelerator.autocast():
loss = self.net(input_ids=batch["input_ids"],labels=batch["labels"]).loss
#backward()
if self.optimizer is not None and self.stage=="train":
self.accelerator.backward(loss)
if self.accelerator.sync_gradients:
self.accelerator.clip_grad_norm_(self.net.parameters(), 1.0)
self.optimizer.step()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
self.optimizer.zero_grad()
all_loss = self.accelerator.gather(loss).sum()
#losses (or plain metrics that can be averaged)
step_losses = {self.stage+"_loss":all_loss.item()}
#metrics (stateful metrics)
step_metrics = {}
if self.stage=="train":
if self.optimizer is not None:
step_metrics['lr'] = self.optimizer.state_dict()['param_groups'][0]['lr']
else:
step_metrics['lr'] = 0.0
return step_losses,step_metrics
KerasModel.StepRunner = StepRunner
#仅仅保存lora可训练参数
def save_ckpt(self, ckpt_path='checkpoint', accelerator = None):
unwrap_net = accelerator.unwrap_model(self.net)
unwrap_net.save_pretrained(ckpt_path)
def load_ckpt(self, ckpt_path='checkpoint'):
import os
self.net.load_state_dict(
torch.load(os.path.join(ckpt_path,'adapter_model.bin')),strict =False)
self.from_scratch = False
KerasModel.save_ckpt = save_ckpt
KerasModel.load_ckpt = load_ckpt
\ No newline at end of file
import sys
sys.path.append("../..")
from common import consts
## --------- config -------------
# from argparse import Namespace
# cfg = Namespace()
# #dataset
# cfg.train_file = '../../../data/train_spdsvb_v10.csv'
# cfg.val_file = '../../../data/val_spdsvb_v4.csv'
# cfg.prompt_column = 'prompt'
# cfg.response_column = 'response'
# cfg.history_column = None
# cfg.source_prefix = consts.INSTRUCTION_V1 #添加到每个prompt开头的前缀引导语
# cfg.max_source_length = 64
# cfg.max_target_length = 128
# #model
# cfg.model_name_or_path = consts.MODEL_PATH_ChatGLM2_32K #远程'THUDM/chatglm-6b'
# cfg.quantization_bit = None #仅仅预测时可以选 4 or 8
# #train
# cfg.epochs = 20
# cfg.lr = 1e-3
# cfg.batch_size = 4
# cfg.gradient_accumulation_steps = 16 #梯度累积
# cfg.ckpt_path = '../../../model/ckpt/chatglm-6b-lora-spdsvb-INSv10-{:.0e}-{}'.format(cfg.lr, cfg.epochs)
import argparse
parser = argparse.ArgumentParser(description='Lora Chatbot Training')
# dataset
parser.add_argument('--train_file', type=str, default='../../../data/train_spdsvb_v10.csv', help='path to training dataset')
parser.add_argument('--val_file', type=str, default='../../../data/val_spdsvb_v4.csv', help='path to validation dataset')
parser.add_argument('--prompt_column', type=str, default='prompt', help='name of the prompt column')
parser.add_argument('--response_column', type=str, default='response', help='name of the response column')
parser.add_argument('--history_column', type=str, default=None, help='name of the history column')
parser.add_argument('--source_prefix', type=str, default=consts.INSTRUCTION_V1, help='prefix added to each prompt')
parser.add_argument('--max_source_length', type=int, default=64, help='maximum length of the input sequence')
parser.add_argument('--max_target_length', type=int, default=128, help='maximum length of the output sequence')
# model
parser.add_argument('--model_name_or_path', type=str, default=consts.MODEL_PATH_ChatGLM2_32K, help='path to the pre-trained model')
parser.add_argument('--quantization_bit', type=int, default=None, help='number of bits for quantization (4 or 8)')
# train
parser.add_argument('--epochs', type=int, default=20, help='number of training epochs')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--batch_size', type=int, default=4, help='batch size')
parser.add_argument('--gradient_accumulation_steps', type=int, default=16, help='number of gradient accumulation steps')
parser.add_argument('--ckpt_path', type=str, default="", help='path to save the trained model')
cfg = parser.parse_args()
## --------- end config -------------
## --------- load model -------------
import transformers
from transformers import AutoModel,AutoTokenizer,AutoConfig,DataCollatorForSeq2Seq
config = AutoConfig.from_pretrained(cfg.model_name_or_path, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(
cfg.model_name_or_path, trust_remote_code=True)
model = AutoModel.from_pretrained(cfg.model_name_or_path,config=config,
trust_remote_code=True).half()
#先量化瘦身
if cfg.quantization_bit is not None:
print(f"Quantized to {cfg.quantization_bit} bit")
model = model.quantize(cfg.quantization_bit)
#再移动到GPU上
model = model.cuda()
## --------- end load model -------------
## --------- load data -------------
def preprocess(examples):
max_seq_length = cfg.max_source_length + cfg.max_target_length
model_inputs = {
"input_ids": [],
"labels": [],
}
for i in range(len(examples[cfg.prompt_column])):
if examples[cfg.prompt_column][i] and examples[cfg.response_column][i]:
query, answer = examples[cfg.prompt_column][i], examples[cfg.response_column][i]
history = examples[cfg.history_column][i] if cfg.history_column is not None else None
# prompt = tokenizer.build_prompt(query, history)
prompt = query
prompt = cfg.source_prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=True, truncation=True,
max_length=cfg.max_source_length)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False, truncation=True,
max_length=cfg.max_target_length)
context_length = len(a_ids)
input_ids = a_ids + b_ids + [tokenizer.eos_token_id]
labels = [tokenizer.pad_token_id] * context_length + b_ids + [tokenizer.eos_token_id]
pad_len = max_seq_length - len(input_ids)
input_ids = input_ids + [tokenizer.pad_token_id] * pad_len
labels = labels + [tokenizer.pad_token_id] * pad_len
labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels]
model_inputs["input_ids"].append(input_ids)
model_inputs["labels"].append(labels)
return model_inputs
from datasets import load_dataset
data_files = {"train": cfg.train_file, "val": cfg.val_file}
data_sets = load_dataset(cfg.train_file.split(".")[-1], data_files=data_files)
ds_train = ds_val = None
if cfg.train_file is not None:
ds_train = data_sets["train"].map(preprocess, batched=True,remove_columns=data_sets["train"].column_names)
print(data_sets["train"])
if cfg.val_file is not None:
ds_val = data_sets["val"].map(preprocess, batched=True,remove_columns=data_sets["val"].column_names)
print(data_sets["val"])
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=None,
label_pad_token_id=-100,
pad_to_multiple_of=None,
padding=False
)
from torch.utils.data import DataLoader
dl_train = DataLoader(ds_train, batch_size = cfg.batch_size,
num_workers = 2, shuffle = True, collate_fn = data_collator
)
dl_val = DataLoader(ds_val, batch_size = cfg.batch_size,
num_workers = 2, shuffle = False, collate_fn = data_collator
)
## --------- end load data -------------
## --------- train -------------
from peft import get_peft_model, AdaLoraConfig, TaskType
#训练时节约GPU占用
model.config.use_cache=False
model.supports_gradient_checkpointing = True #
model.gradient_checkpointing_enable()
model.enable_input_require_grads()
peft_config = AdaLoraConfig(
task_type=TaskType.CAUSAL_LM, inference_mode=False,
r=8,
lora_alpha=32, lora_dropout=0.1,
target_modules=["query", "value"]
)
peft_model = get_peft_model(model, peft_config)
peft_model.is_parallelizable = True
peft_model.model_parallel = True
peft_model.print_trainable_parameters()
from keras_model import KerasModel
import torch
optimizer = torch.optim.AdamW(peft_model.parameters(),lr=cfg.lr)
keras_model = KerasModel(peft_model, loss_fn = None, optimizer=optimizer)
df = keras_model.fit(train_data = dl_train,
val_data = dl_val,
epochs=cfg.epochs,
patience=20,
monitor='val_loss',
mode='min',
ckpt_path = cfg.ckpt_path,
mixed_precision='fp16',
gradient_accumulation_steps = cfg.gradient_accumulation_steps
)
df.to_json(f"{cfg.ckpt_path}/train_history.json")
import sys
sys.path.append("../..")
from common import consts
## --------- config -------------
from argparse import Namespace
cfg = Namespace()
#dataset
cfg.train_file = '../../../data/train_spdsvb_v7.csv'
cfg.val_file = '../../../data/val_spdsvb_v4.csv'
cfg.prompt_column = 'prompt'
cfg.response_column = 'response'
cfg.history_column = None
cfg.source_prefix = consts.INSTRUCTION_V1 #添加到每个prompt开头的前缀引导语
cfg.max_source_length = 64
cfg.max_target_length = 128
#model
cfg.model_name_or_path = consts.MODEL_PATH_ChatGLM #远程'THUDM/chatglm-6b'
cfg.quantization_bit = None #仅仅预测时可以选 4 or 8
#train
cfg.epochs = 50
cfg.lr = 1e-3
cfg.batch_size = 1
cfg.gradient_accumulation_steps = 16 #梯度累积
cfg.ckpt_path = '../../../model/ckpt/chatglm-6b-lora-single-INSq1-{:.0e}-{}'.format(cfg.lr, cfg.epochs)
## --------- end config -------------
## --------- load model -------------
import transformers
from transformers import AutoModel,AutoTokenizer,AutoConfig,DataCollatorForSeq2Seq
config = AutoConfig.from_pretrained(cfg.model_name_or_path, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(
cfg.model_name_or_path, trust_remote_code=True)
model = AutoModel.from_pretrained(cfg.model_name_or_path,config=config,
trust_remote_code=True).half()
#先量化瘦身
if cfg.quantization_bit is not None:
print(f"Quantized to {cfg.quantization_bit} bit")
model = model.quantize(cfg.quantization_bit)
#再移动到GPU上
model = model.cuda()
## --------- end load model -------------
## --------- load data -------------
def preprocess(examples):
max_seq_length = cfg.max_source_length + cfg.max_target_length
model_inputs = {
"input_ids": [],
"labels": [],
}
for i in range(len(examples[cfg.prompt_column])):
if examples[cfg.prompt_column][i] and examples[cfg.response_column][i]:
query, answer = examples[cfg.prompt_column][i], examples[cfg.response_column][i]
history = examples[cfg.history_column][i] if cfg.history_column is not None else None
# prompt = tokenizer.build_prompt(query, history)
prompt = query
prompt = cfg.source_prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=True, truncation=True,
max_length=cfg.max_source_length)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False, truncation=True,
max_length=cfg.max_target_length)
context_length = len(a_ids)
input_ids = a_ids + b_ids + [tokenizer.eos_token_id]
labels = [tokenizer.pad_token_id] * context_length + b_ids + [tokenizer.eos_token_id]
pad_len = max_seq_length - len(input_ids)
input_ids = input_ids + [tokenizer.pad_token_id] * pad_len
labels = labels + [tokenizer.pad_token_id] * pad_len
labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels]
model_inputs["input_ids"].append(input_ids)
model_inputs["labels"].append(labels)
return model_inputs
import datasets
from datasets import load_dataset
# data_files = {"train": cfg.train_file, "val": cfg.val_file}
# data_sets = load_dataset(cfg.train_file.split(".")[-1], data_files=data_files)
import pandas as pd
description = """1.只有产品代码输入正确时才能进入购买页面;
2.取当前操作员可操作账号,如果账号为监管账户,则不允许操作。
3.产品说明书、产品合同,必须要客户客户阅读后才能进行提交;
4.交易全部落地流程引擎处理;
5.会根据活期账户对应的核心客户号查询该客户是否设置KYC笔笔落地的标识。设置KYC笔笔落地会发送对应的KYC落地邮件信息。(RM, CAS@spd-svbank.com,asu@spd-svbank.com)
"""
keyword = "结构性存款购买的业务规则"
def get_prompt_list(keyword):
return [f'{keyword}',
f'{keyword}是什么?',
f'介绍一下{keyword}',
f'你听过{keyword}吗?',
f'啥是{keyword}?',
f'{keyword}是什么样的?',
f'{keyword}有什么要求?',
]
data =[{'prompt':x,'response':description} for x in get_prompt_list(keyword) ]
dfdata = pd.DataFrame(data)
for index, row in dfdata.iterrows():
print(f"[{index}] {row['prompt']}: {row['response'][:10]}...")
ds_train_raw = ds_val_raw = datasets.Dataset.from_pandas(dfdata)
data_sets = {
"train": ds_train_raw,
"val": ds_val_raw
}
ds_train = ds_val = None
if cfg.train_file is not None:
ds_train = data_sets["train"].map(preprocess, batched=True,remove_columns=data_sets["train"].column_names)
print(data_sets["train"])
if cfg.val_file is not None:
ds_val = data_sets["val"].map(preprocess, batched=True,remove_columns=data_sets["val"].column_names)
print(data_sets["val"])
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=None,
label_pad_token_id=-100,
pad_to_multiple_of=None,
padding=False
)
from torch.utils.data import DataLoader
dl_train = DataLoader(ds_train, batch_size = cfg.batch_size,
num_workers = 2, shuffle = True, collate_fn = data_collator
)
dl_val = DataLoader(ds_val, batch_size = cfg.batch_size,
num_workers = 2, shuffle = False, collate_fn = data_collator
)
## --------- end load data -------------
## --------- train -------------
from peft import get_peft_model, AdaLoraConfig, TaskType
#训练时节约GPU占用
model.config.use_cache=False
model.supports_gradient_checkpointing = True #
model.gradient_checkpointing_enable()
model.enable_input_require_grads()
peft_config = AdaLoraConfig(
task_type=TaskType.CAUSAL_LM, inference_mode=False,
r=8,
lora_alpha=32, lora_dropout=0.1,
target_modules=["query", "value"]
)
peft_model = get_peft_model(model, peft_config)
peft_model.is_parallelizable = True
peft_model.model_parallel = True
peft_model.print_trainable_parameters()
from keras_model import KerasModel
import torch
optimizer = torch.optim.AdamW(peft_model.parameters(),lr=cfg.lr)
keras_model = KerasModel(peft_model, loss_fn = None, optimizer=optimizer)
df = keras_model.fit(train_data = dl_train,
val_data = dl_val,
epochs=cfg.epochs,
patience=20,
monitor='val_loss',
mode='min',
ckpt_path = cfg.ckpt_path,
mixed_precision='fp16',
gradient_accumulation_steps = cfg.gradient_accumulation_steps
)
df.to_json(f"{cfg.ckpt_path}/train_history.json")
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
import logging
import os
import sys
sys.path.append("../..")
import json
import numpy as np
from datasets import load_dataset
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import torch
import transformers
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
)
from ptuning.trainer_seq2seq import Seq2SeqTrainer
from ptuning.arguments import ModelArguments, DataTrainingArguments
logger = logging.getLogger(__name__)
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
# datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Load dataset
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# Load pretrained model and tokenizer
config = AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=True)
config.pre_seq_len = model_args.pre_seq_len
config.prefix_projection = model_args.prefix_projection
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, trust_remote_code=True)
if model_args.ptuning_checkpoint is not None:
# Evaluation
# Loading extra state dict of prefix encoder
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True)
prefix_state_dict = torch.load(os.path.join(model_args.ptuning_checkpoint, "pytorch_model.bin"))
new_prefix_state_dict = {}
for k, v in prefix_state_dict.items():
if k.startswith("transformer.prefix_encoder."):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
else:
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True)
if model_args.quantization_bit is not None:
print(f"Quantized to {model_args.quantization_bit} bit")
model = model.quantize(model_args.quantization_bit)
if model_args.pre_seq_len is not None:
# P-tuning v2
model = model.half()
model.transformer.prefix_encoder.float()
else:
if training_args.do_eval:
model = model.half().cuda()
else:
# Finetune
model = model.float()
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
column_names = raw_datasets["validation"].column_names
elif training_args.do_predict:
column_names = raw_datasets["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# Get the column names for input/target.
prompt_column = data_args.prompt_column
response_column = data_args.response_column
history_column = data_args.history_column
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
def preprocess_function_eval(examples):
inputs, targets = [], []
for i in range(len(examples[prompt_column])):
if examples[prompt_column][i] and examples[response_column][i]:
query = examples[prompt_column][i]
history = examples[history_column][i] if history_column is not None else None
prompt = tokenizer.build_prompt(query, history)
inputs.append(prompt)
targets.append(examples[response_column][i])
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, truncation=True, padding=True)
labels = tokenizer(text_target=targets, max_length=max_target_length, truncation=True)
if data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def preprocess_function_train(examples):
max_seq_length = data_args.max_source_length + data_args.max_target_length + 1
model_inputs = {
"input_ids": [],
"labels": [],
}
for i in range(len(examples[prompt_column])):
if examples[prompt_column][i] and examples[response_column][i]:
query, answer = examples[prompt_column][i], examples[response_column][i]
# history = examples[history_column][i] if history_column is not None else None
if history_column is None:
prompt = query
else:
prompt = ""
history = examples[history_column][i]
for turn_idx, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(turn_idx, old_query, response)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=True, truncation=True,
max_length=data_args.max_source_length)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False, truncation=True,
max_length=data_args.max_target_length)
context_length = len(a_ids)
input_ids = a_ids + b_ids + [tokenizer.eos_token_id]
labels = [tokenizer.pad_token_id] * context_length + b_ids + [tokenizer.eos_token_id]
pad_len = max_seq_length - len(input_ids)
input_ids = input_ids + [tokenizer.pad_token_id] * pad_len
labels = labels + [tokenizer.pad_token_id] * pad_len
if data_args.ignore_pad_token_for_loss:
labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels]
model_inputs["input_ids"].append(input_ids)
model_inputs["labels"].append(labels)
return model_inputs
def print_dataset_example(example):
print("input_ids", example["input_ids"])
print("inputs", tokenizer.decode(example["input_ids"]))
print("label_ids", example["labels"])
print("labels", tokenizer.decode(example["labels"]))
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function_train,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
print_dataset_example(train_dataset[0])
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function_eval,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
print_dataset_example(eval_dataset[0])
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = raw_datasets["test"]
if data_args.max_predict_samples is not None:
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function_eval,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
print_dataset_example(predict_dataset[0])
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=None,
padding=False
)
# Metric
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
score_dict = {
"rouge-1": [],
"rouge-2": [],
"rouge-l": [],
"bleu-4": []
}
for pred, label in zip(decoded_preds, decoded_labels):
if not pred and not label:
continue
hypothesis = list(jieba.cut(pred))
reference = list(jieba.cut(label))
rouge = Rouge()
hypothesis = ' '.join(hypothesis)
reference = ' '.join(reference)
if not hypothesis.strip() or not reference.strip():
continue
scores = rouge.get_scores(hypothesis , reference)
# scores = rouge.get_scores(' '.join(hypothesis) , ' '.join(reference))
result = scores[0]
for k, v in result.items():
score_dict[k].append(round(v["f"] * 100, 4))
bleu_score = sentence_bleu([list(label)], list(pred), smoothing_function=SmoothingFunction().method3)
score_dict["bleu-4"].append(round(bleu_score * 100, 4))
for k, v in score_dict.items():
score_dict[k] = float(np.mean(v))
return score_dict
# Override the decoding parameters of Seq2SeqTrainer
training_args.generation_max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
training_args.generation_num_beams = (
data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
)
# Initialize our Trainer
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
save_changed=model_args.pre_seq_len is not None
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
# elif last_checkpoint is not None:
# checkpoint = last_checkpoint
model.gradient_checkpointing_enable()
model.enable_input_require_grads()
train_result = trainer.train(resume_from_checkpoint=checkpoint)
# trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
results = {}
max_seq_length = data_args.max_source_length + data_args.max_target_length + 1
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(metric_key_prefix="eval", do_sample=True, top_p=0.7, max_length=max_seq_length, temperature=0.95)
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.do_predict:
logger.info("*** Predict ***")
predict_results = trainer.predict(predict_dataset, metric_key_prefix="predict", max_length=max_seq_length, do_sample=True, top_p=0.7, temperature=0.95)
metrics = predict_results.metrics
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
if trainer.is_world_process_zero():
if training_args.predict_with_generate:
predictions = tokenizer.batch_decode(
predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
predictions = [pred.strip() for pred in predictions]
labels = tokenizer.batch_decode(
predict_results.label_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
labels = [label.strip() for label in labels]
output_prediction_file = os.path.join(training_args.output_dir, "generated_predictions.txt")
with open(output_prediction_file, "w", encoding="utf-8") as writer:
for p, l in zip(predictions, labels):
res = json.dumps({"labels": l, "predict": p}, ensure_ascii=False)
writer.write(f"{res}\n")
return results
def train():
## --------- config -------------
from common import consts
from argparse import Namespace
cfg = Namespace()
#dataset
cfg.train_file = '../../../data/train_spdsvb_base.csv'
cfg.val_file = '../../../data/val_spdsvb_base.csv'
cfg.prompt_column = 'prompt'
cfg.response_column = 'response'
cfg.history_column = None
cfg.source_prefix = '' #添加到每个prompt开头的前缀引导语
cfg.max_source_length = 64
cfg.max_target_length = 128
#model
cfg.model_name_or_path = consts.MODEL_PATH_ChatGLM #远程'THUDM/chatglm-6b'
cfg.quantization_bit = None #仅仅预测时可以选 4 or 8
#train
cfg.do_train = True
cfg.do_eval = True
cfg.epochs = 100
cfg.steps = 3000
cfg.lr = 5e-3
cfg.batch_size = 1
cfg.gradient_accumulation_steps = 16 #梯度累积
cfg.pre_seq_len = 128 # None
cfg.output_dir = '../../../model/ckpt/chatglm-6b-pt-spdsvb-base-{:.0e}-{}'.format(cfg.lr, cfg.epochs)
## --------- end config -------------
## --------- load model -------------
import transformers
from transformers import AutoModel,AutoTokenizer,AutoConfig,DataCollatorForSeq2Seq
config = AutoConfig.from_pretrained(cfg.model_name_or_path, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(cfg.model_name_or_path, trust_remote_code=True)
model = AutoModel.from_pretrained(cfg.model_name_or_path,config=config,trust_remote_code=True).half()
#先量化瘦身
if cfg.quantization_bit is not None:
print(f"Quantized to {cfg.quantization_bit} bit")
model = model.quantize(cfg.quantization_bit)
if cfg.pre_seq_len is not None:
print(f"Pre set seq len to {cfg.pre_seq_len}")
config.pre_seq_len = cfg.pre_seq_len
config.prefix_projection = cfg.prefix_projection if cfg.prefix_projection is not None else False
model.transformer.prefix_encoder.float()
#再移动到GPU上
model = model.cuda()
## --------- end load model -------------
## load data
# Load dataset
data_files = {
"train": cfg.train_file if cfg.train_file is not None else None,
"validation": cfg.val_file if cfg.val_file is not None else None,
"test": cfg.val_file if cfg.val_file is not None else None,
}
data_files = {k: v for k, v in data_files.items() if v is not None}
extension = data_files[list(data_files.keys())[0]].split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
)
def preprocess_function_train(examples):
max_seq_length = cfg.max_source_length + cfg.max_target_length + 1
model_inputs = {
"input_ids": [],
"labels": [],
}
for i in range(len(examples[prompt_column])):
if examples[prompt_column][i] and examples[response_column][i]:
query, answer = examples[prompt_column][i], examples[response_column][i]
history = examples[history_column][i] if history_column is not None else None
prompt = tokenizer.build_prompt(query, history)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=True, truncation=True,
max_length=data_args.max_source_length)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False, truncation=True,
max_length=data_args.max_target_length)
context_length = len(a_ids)
input_ids = a_ids + b_ids + [tokenizer.eos_token_id]
labels = [tokenizer.pad_token_id] * context_length + b_ids + [tokenizer.eos_token_id]
pad_len = max_seq_length - len(input_ids)
input_ids = input_ids + [tokenizer.pad_token_id] * pad_len
labels = labels + [tokenizer.pad_token_id] * pad_len
if data_args.ignore_pad_token_for_loss:
labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels]
model_inputs["input_ids"].append(input_ids)
model_inputs["labels"].append(labels)
return model_inputs
def print_dataset_example(example):
print("input_ids", example["input_ids"])
print("inputs", tokenizer.decode(example["input_ids"]))
print("label_ids", example["labels"])
print("labels", tokenizer.decode(example["labels"]))
# if cfg.do_train:
# train_dataset = raw_datasets["train"]
# train_dataset = train_dataset.map(
# preprocess_function_train,
# batched=True,
# num_proc=data_args.preprocessing_num_workers,
# remove_columns=column_names,
# load_from_cache_file=not data_args.overwrite_cache,
# desc="Running tokenizer on train dataset",
# )
print_dataset_example(train_dataset[0])
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
ptuning_checkpoint: str = field(
default=None, metadata={"help": "Path to p-tuning v2 checkpoints"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
resize_position_embeddings: Optional[bool] = field(
default=None,
metadata={
"help": (
"Whether to automatically resize the position embeddings if `max_source_length` exceeds "
"the model's position embeddings."
)
},
)
quantization_bit: Optional[int] = field(
default=None
)
pre_seq_len: Optional[int] = field(
default=None
)
prefix_projection: bool = field(
default=False
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
lang: Optional[str] = field(default=None, metadata={"help": "Language id for summarization."})
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
prompt_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
response_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."},
)
history_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the history of chat."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": (
"An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)."
)
},
)
test_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": (
"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
)
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
source_prefix: Optional[str] = field(
default="", metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
forced_bos_token: Optional[str] = field(
default=None,
metadata={
"help": (
"The token to force as the first generated token after the decoder_start_token_id."
"Useful for multilingual models like mBART where the first generated token"
"needs to be the target language token (Usually it is the target language token)"
)
},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None and self.test_file is None:
raise ValueError("Need either a dataset name or a training/validation/test file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import os
from typing import Optional
from transformers import Trainer
import torch
from transformers.modeling_utils import PreTrainedModel, unwrap_model
from transformers.utils import logging
logger = logging.get_logger(__name__)
WEIGHTS_NAME = "pytorch_model.bin"
TRAINING_ARGS_NAME = "training_args.bin"
class PrefixTrainer(Trainer):
def __init__(self, *args, save_changed=False, **kwargs):
self.save_changed = save_changed
super().__init__(*args, **kwargs)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
if self.save_changed:
print("Saving PrefixEncoder")
state_dict = self.model.state_dict()
filtered_state_dict = {}
for k, v in self.model.named_parameters():
if v.requires_grad:
filtered_state_dict[k] = state_dict[k]
self.model.save_pretrained(output_dir, state_dict=filtered_state_dict)
else:
print("Saving the whole model")
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers.deepspeed import is_deepspeed_zero3_enabled
from .trainer import PrefixTrainer
from transformers.trainer_utils import PredictionOutput
from transformers.utils import logging
logger = logging.get_logger(__name__)
class Seq2SeqTrainer(PrefixTrainer):
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
**gen_kwargs
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init `compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (`Dataset`, *optional*):
Pass a dataset if you wish to override `self.eval_dataset`. If it is an [`~datasets.Dataset`], columns
not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
method.
ignore_keys (`List[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is `"eval"` (default)
max_length (`int`, *optional*):
The maximum target length to use when predicting with the generate method.
num_beams (`int`, *optional*):
Number of beams for beam search that will be used when predicting with the generate method. 1 means no
beam search.
gen_kwargs:
Additional `generate` specific kwargs.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
gen_kwargs = gen_kwargs.copy()
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
gen_kwargs["max_length"] = self.args.generation_max_length
gen_kwargs["num_beams"] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams
)
self._gen_kwargs = gen_kwargs
return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
def predict(
self,
test_dataset: Dataset,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "test",
**gen_kwargs
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in `evaluate()`.
Args:
test_dataset (`Dataset`):
Dataset to run the predictions on. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed. Has to implement the method `__len__`
ignore_keys (`List[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is `"eval"` (default)
max_length (`int`, *optional*):
The maximum target length to use when predicting with the generate method.
num_beams (`int`, *optional*):
Number of beams for beam search that will be used when predicting with the generate method. 1 means no
beam search.
gen_kwargs:
Additional `generate` specific kwargs.
<Tip>
If your predictions or labels have different sequence lengths (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
</Tip>
Returns: *NamedTuple* A namedtuple with the following keys:
- predictions (`np.ndarray`): The predictions on `test_dataset`.
- label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
- metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
labels).
"""
gen_kwargs = gen_kwargs.copy()
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
gen_kwargs["max_length"] = self.args.generation_max_length
gen_kwargs["num_beams"] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams
)
self._gen_kwargs = gen_kwargs
return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on `model` using `inputs`.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to evaluate.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (`bool`):
Whether or not to return the loss only.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
if not self.args.predict_with_generate or prediction_loss_only:
return super().prediction_step(
model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys
)
has_labels = "labels" in inputs
inputs = self._prepare_inputs(inputs)
# XXX: adapt synced_gpus for fairscale as well
gen_kwargs = self._gen_kwargs.copy()
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
gen_kwargs["max_length"] = self.model.config.max_length
gen_kwargs["num_beams"] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.model.config.num_beams
)
default_synced_gpus = True if is_deepspeed_zero3_enabled() else False
gen_kwargs["synced_gpus"] = (
gen_kwargs["synced_gpus"] if gen_kwargs.get("synced_gpus") is not None else default_synced_gpus
)
if "attention_mask" in inputs:
gen_kwargs["attention_mask"] = inputs.get("attention_mask", None)
if "position_ids" in inputs:
gen_kwargs["position_ids"] = inputs.get("position_ids", None)
if "global_attention_mask" in inputs:
gen_kwargs["global_attention_mask"] = inputs.get("global_attention_mask", None)
# prepare generation inputs
# some encoder-decoder models can have varying encoder's and thus
# varying model input names
if hasattr(self.model, "encoder") and self.model.encoder.main_input_name != self.model.main_input_name:
generation_inputs = inputs[self.model.encoder.main_input_name]
else:
generation_inputs = inputs[self.model.main_input_name]
gen_kwargs["input_ids"] = generation_inputs
generated_tokens = self.model.generate(**gen_kwargs)
generated_tokens = generated_tokens[:, generation_inputs.size()[-1]:]
# in case the batch is shorter than max length, the output should be padded
if gen_kwargs.get("max_length") is not None and generated_tokens.shape[-1] < gen_kwargs["max_length"]:
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])
elif gen_kwargs.get("max_new_tokens") is not None and generated_tokens.shape[-1] < (
gen_kwargs["max_new_tokens"] + 1
):
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_new_tokens"] + 1)
loss = None
if self.args.prediction_loss_only:
return (loss, None, None)
if has_labels:
labels = inputs["labels"]
if gen_kwargs.get("max_length") is not None and labels.shape[-1] < gen_kwargs["max_length"]:
labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"])
elif gen_kwargs.get("max_new_tokens") is not None and labels.shape[-1] < (
gen_kwargs["max_new_tokens"] + 1
):
labels = self._pad_tensors_to_max_len(labels, (gen_kwargs["max_new_tokens"] + 1))
else:
labels = None
return (loss, generated_tokens, labels)
def _pad_tensors_to_max_len(self, tensor, max_length):
if self.tokenizer is not None and hasattr(self.tokenizer, "pad_token_id"):
# If PAD token is not defined at least EOS token has to be defined
pad_token_id = (
self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
)
else:
if self.model.config.pad_token_id is not None:
pad_token_id = self.model.config.pad_token_id
else:
raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors")
padded_tensor = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device
)
padded_tensor[:, : tensor.shape[-1]] = tensor
return padded_tensor
# -*- coding: utf-8 -*-
# time: 2023/6/1 17:19
# file: train_qlora.py
# author: zmfy
# email: shuxueslpi@163.com
import os
import argparse
from typing import List, Dict, Optional
import torch
from loguru import logger
from datasets import load_dataset
from transformers import (
AutoModel,
AutoTokenizer,
HfArgumentParser,
set_seed,
TrainingArguments,
Trainer,
BitsAndBytesConfig
)
from peft import (
TaskType,
LoraConfig,
get_peft_model,
set_peft_model_state_dict,
prepare_model_for_kbit_training
)
from peft.utils import TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING
_compute_dtype_map = {
'fp32': torch.float32,
'fp16': torch.float16,
'bf16': torch.bfloat16
}
def parse_args():
parser = argparse.ArgumentParser(description='ChatGLM-6B QLoRA')
parser.add_argument('--train_args_json', type=str, required=True, help='TrainingArguments的json文件')
parser.add_argument('--model_name_or_path', type=str, default='THUDM/chatglm-6b', help='模型id或local path')
parser.add_argument('--train_data_path', type=str, required=True, help='训练数据路径')
parser.add_argument('--eval_data_path', type=str, default=None, help='验证数据路径')
parser.add_argument('--xname', type=str, default='', help='统一添加在所有数据前的指令文本')
parser.add_argument('--yname', type=str, default='', help='统一添加在所有数据前的指令文本')
parser.add_argument('--output_dir', type=str, default='output', help='输出路径')
parser.add_argument('--learning_rate', type=float, default=5e-5, help='学习率')
parser.add_argument('--num_train_epochs', type=int, default=1, help='训练轮数')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--max_input_length', type=int, default=512, help='instruction + input的最大长度')
parser.add_argument('--max_output_length', type=int, default=1536, help='output的最大长度')
parser.add_argument('--lora_rank', type=int, default=4, help='lora rank')
parser.add_argument('--lora_alpha', type=int, default=32, help='lora_alpha')
parser.add_argument('--lora_dropout', type=float, default=0.05, help='lora dropout')
parser.add_argument('--resume_from_checkpoint', type=str, default=None, help='恢复训练的checkpoint路径')
parser.add_argument('--prompt_text', type=str, default='', help='统一添加在所有数据前的指令文本')
parser.add_argument('--compute_dtype', type=str, default='fp32',
choices=['fp32', 'fp16', 'bf16'], help='计算数据类型')
return parser.parse_args()
def tokenize_func(example, tokenizer, global_args, ignore_label_id=-100):
"""单样本tokenize处理"""
question = global_args.prompt_text + example[global_args.xname]
if example.get('input', None):
if example['input'].strip():
question += f'''\n{example['input']}'''
answer = example[global_args.yname]
q_ids = tokenizer.encode(text=question, add_special_tokens=False)
a_ids = tokenizer.encode(text=answer, add_special_tokens=False)
if len(q_ids) > global_args.max_input_length - 2: # 2 - gmask, bos
q_ids = q_ids[: global_args.max_input_length - 2]
if len(a_ids) > global_args.max_output_length - 1: # 1 - eos
a_ids = a_ids[: global_args.max_output_length - 1]
input_ids = tokenizer.build_inputs_with_special_tokens(q_ids, a_ids)
# question_length = input_ids.index(tokenizer.bos_token_id)
question_length = len(q_ids) + 2 # chatglm1 - gmask, bos, chatglm2 - gmask, sop
labels = [ignore_label_id] * question_length + input_ids[question_length:]
return {'input_ids': input_ids, 'labels': labels}
def get_datset(data_path, tokenizer, global_args):
"""读取本地数据文件,并tokenize,shuffle,返回datasets.dataset"""
data = load_dataset(data_path.split(".")[-1], data_files=data_path)
dataset = data['train'].filter(lambda example: example[global_args.xname] is not None)
# display(dataset)
column_names = dataset.column_names
dataset = dataset.map(lambda example: tokenize_func(example, tokenizer, global_args),
batched=False, remove_columns=column_names)
dataset = dataset.shuffle(seed=global_args.seed)
dataset = dataset.flatten_indices()
return dataset
class DataCollatorForChatGLM:
def __init__(self,
pad_token_id: int,
max_length: int = 2048,
ignore_label_id: int = -100):
self.pad_token_id = pad_token_id
self.ignore_label_id = ignore_label_id
self.max_length = max_length
def __call__(self, batch_data: List[Dict[str, List]]) -> Dict[str, torch.Tensor]:
"""根据batch最大长度做padding"""
len_list = [len(d['input_ids']) for d in batch_data]
batch_max_len = max(len_list)
input_ids, labels = [], []
for len_of_d, d in sorted(zip(len_list, batch_data), key=lambda x: -x[0]):
pad_len = batch_max_len - len_of_d
ids = d['input_ids'] + [self.pad_token_id] * pad_len
label = d['labels'] + [self.ignore_label_id] * pad_len
if batch_max_len > self.max_length:
ids = ids[: self.max_length]
label = label[: self.max_length]
input_ids.append(torch.LongTensor(ids))
labels.append(torch.LongTensor(label))
input_ids = torch.stack(input_ids)
labels = torch.stack(labels)
return {'input_ids': input_ids, 'labels': labels}
class LoRATrainer(Trainer):
def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False):
"""只保存adapter"""
if output_dir is None:
output_dir = self.args.output_dir
self.model.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def train(global_args):
hf_parser = HfArgumentParser(TrainingArguments)
hf_train_args, = hf_parser.parse_json_file(json_file=global_args.train_args_json)
set_seed(global_args.seed)
hf_train_args.seed = global_args.seed
hf_train_args.output_dir = global_args.output_dir
hf_train_args.learning_rate = global_args.learning_rate
hf_train_args.num_train_epochs = global_args.num_train_epochs
model_max_length = global_args.max_input_length + global_args.max_output_length
tokenizer = AutoTokenizer.from_pretrained(global_args.model_name_or_path, trust_remote_code=True)
# Quantization
q_config = BitsAndBytesConfig(load_in_4bit=True,
bnb_4bit_quant_type='nf4',
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=_compute_dtype_map[global_args.compute_dtype])
model = AutoModel.from_pretrained(global_args.model_name_or_path,
quantization_config=q_config,
device_map='auto',
trust_remote_code=True)
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=True)
# LoRA
target_modules = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING['chatglm']
lora_config = LoraConfig(
r=global_args.lora_rank,
lora_alpha=global_args.lora_alpha,
target_modules=target_modules,
lora_dropout=global_args.lora_dropout,
bias='none',
inference_mode=False,
task_type=TaskType.CAUSAL_LM
)
model = get_peft_model(model, lora_config)
resume_from_checkpoint = global_args.resume_from_checkpoint
if resume_from_checkpoint is not None:
checkpoint_name = os.path.join(resume_from_checkpoint, 'pytorch_model.bin')
if not os.path.exists(checkpoint_name):
checkpoint_name = os.path.join(
resume_from_checkpoint, 'adapter_model.bin'
)
resume_from_checkpoint = False
if os.path.exists(checkpoint_name):
logger.info(f'Restarting from {checkpoint_name}')
adapters_weights = torch.load(checkpoint_name)
set_peft_model_state_dict(model, adapters_weights)
else:
logger.info(f'Checkpoint {checkpoint_name} not found')
model.print_trainable_parameters()
# data
train_dataset = get_datset(global_args.train_data_path, tokenizer, global_args)
eval_dataset = None
if global_args.eval_data_path:
eval_dataset = get_datset(global_args.eval_data_path, tokenizer, global_args)
data_collator = DataCollatorForChatGLM(pad_token_id=tokenizer.pad_token_id,
max_length=model_max_length)
# train
trainer = LoRATrainer(
model=model,
args=hf_train_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
data_collator=data_collator
)
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
trainer.model.save_pretrained(hf_train_args.output_dir)
if __name__ == "__main__":
args = parse_args()
train(args)
import os
import platform
import signal
import sys
sys.path.append("../..")
from llm.loader import ModelLoader
from common import consts
## --------- config -------------
from argparse import Namespace
cfg = Namespace()
cfg.max_source_length = 64
cfg.max_target_length = 128
#model
cfg.model_name_or_path = consts.MODEL_PATH_ChatGLM #远程'THUDM/chatglm-6b'
cfg.quantization_bit = None #仅仅预测时可以选 4 or 8
cfg.ckpt_path = '../../../model/ckpt/chatglm-6b-lora-spdsvb-INSv4-1e-03-50'
## --------- end config -------------
loader = ModelLoader(cfg.model_name_or_path, cfg.quantization_bit)
loader.load_lora(cfg.ckpt_path,)
model,tokenizer = loader.models()
model = model.eval()
# response=model.chat(tokenizer,"客户在对账时,提示:请您先修改PIN码后才能使用USBKey进行安全操作")
# print(response)
os_name = platform.system()
clear_command = 'cls' if os_name == 'Windows' else 'clear'
stop_stream = False
def signal_handler(signal, frame):
global stop_stream
stop_stream = True
def build_prompt(history):
prompt = "欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序"
for query, response in history:
prompt += f"\n\n用户:{query}"
prompt += f"\n\nChatGLM-6B:{response}"
return prompt
def main():
history = []
global stop_stream
print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序")
while True:
query = input("\n用户:")
if query.strip() == "stop":
break
if query.strip() == "clear":
history = []
os.system(clear_command)
print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序")
continue
count = 0
for response, history in model.stream_chat(tokenizer, query, history=history):
if stop_stream:
stop_stream = False
break
else:
count += 1
if count % 8 == 0:
os.system(clear_command)
print(build_prompt(history), flush=True)
signal.signal(signal.SIGINT, signal_handler)
os.system(clear_command)
print(build_prompt(history), flush=True)
if __name__ == "__main__":
main()
\ No newline at end of file
import os
import platform
import signal
import sys
sys.path.append("../..")
from llm.loader import ModelLoader
from common import consts
## --------- config -------------
from argparse import Namespace
cfg = Namespace()
#model
cfg.source_prefix = consts.INSTRUCTION_V1
cfg.model_name_or_path = consts.MODEL_PATH_ChatGLM #远程'THUDM/chatglm-6b'
cfg.quantization_bit = None #仅仅预测时可以选 4 or 8
cfg.ckpt_path_v7 = '../../../model/ckpt/chatglm-6b-lora-spdsvb-INSv7-1e-03-50'
cfg.ckpt_path_q1 = '../../../model/ckpt/chatglm-6b-lora-single-INSq1-1e-03-50'
## --------- end config -------------
loader = ModelLoader(cfg.model_name_or_path, cfg.quantization_bit)
model,tokenizer = loader.models()
model = model.eval()
from peft import PeftModel
print("+++++++++++++++++++++++++++")
query = "你知道结构性存款购买的业务规则是什么吗?"
if cfg.source_prefix is not None:
query = cfg.source_prefix + query
description = """1.只有产品代码输入正确时才能进入购买页面;
2.取当前操作员可操作账号,如果账号为监管账户,则不允许操作。
3.产品说明书、产品合同,必须要客户客户阅读后才能进行提交;
4.交易全部落地流程引擎处理;
5.会根据活期账户对应的核心客户号查询该客户是否设置KYC笔笔落地的标识。设置KYC笔笔落地会发送对应的KYC落地邮件信息。(RM, CAS@spd-svbank.com,asu@spd-svbank.com)
"""
print(query)
print("--------- 标准答案 ---------")
print(description)
print("+++++++++++++++++++++++++++")
print("--------- origin ---------")
response,history = model.chat(tokenizer,query,history=None)
print(response)
# model = PeftModel.from_pretrained(model, cfg.ckpt_path_v7, adapter_name="v7").half().cuda()
# model.load_adapter(cfg.ckpt_path_q1, adapter_name="q1")
loader.load_loras({
"v7": cfg.ckpt_path_v7,
"q1": cfg.ckpt_path_q1,
},"v7")
model,_= loader.models()
print("----------- v7 -----------")
model.set_adapter("v7")
model = model.half()
response,history = model.chat(tokenizer,query,history=None)
print(response)
print("----------- q1 -----------")
# loader.load_lora(cfg.ckpt_path_q1,"q1")
model.set_adapter("q1")
model = model.half()
response,history = model.chat(tokenizer,query,history=None)
print(response)
print("---------------------------")
import os
import sys
sys.path.append("../..")
from llm.loader import ModelLoader
from common import consts
## --------- config -------------
from argparse import Namespace
cfg = Namespace()
cfg.max_source_length = 64
cfg.max_target_length = 128
#model
cfg.model_name_or_path = consts.MODEL_PATH_ChatGLM #远程'THUDM/chatglm-6b'
cfg.quantization_bit = None #仅仅预测时可以选 4 or 8
cfg.ckpt_path = '../../../model/ckpt/chatglm-6b-lora-spdsvb-INSv4-1e-03-50'
# cfg.ckpt_path = '/home/ljj/saved_files_new_range16_batch4_epoth30/chatGLM_6B_QLoRA_t32'
cfg.save_path = '/home/zfh/models/tunning/chatglm-6b-lora-spdsvb-INSv4-1e-03-50'
## --------- end config -------------
loader = ModelLoader(cfg.model_name_or_path, cfg.quantization_bit)
loader.load_lora(cfg.ckpt_path,)
model,tokenizer = loader.models()
model = model.eval()
print(model)
model.save_pretrained(cfg.save_path,max_shard_size='2GB')
tokenizer.save_pretrained(cfg.save_path)
os.system(f"cp {cfg.model_name_or_path}/*.py {cfg.save_path}/")
# # from torchkeras import summary
# # summary(model)
import os
train_history_path = os.path.join(cfg.ckpt_path, 'train_history.json')
val_metrics_path = os.path.join(cfg.ckpt_path, 'val_metrics.json')
if os.path.exists(train_history_path):
os.system(f"cp {train_history_path} {cfg.save_path}/")
else:
print(f"File {train_history_path} does not exist.")
if os.path.exists(val_metrics_path):
os.system(f"cp {val_metrics_path} {cfg.save_path}/")
else:
print(f"File {val_metrics_path} does not exist.")
#!/bin/bash
source common.sh
lr=1e-3
epochs=50
batch_size=4
gradient_accumulation_steps=16
python lora.py \
--train_file ../../../data/train_spdsvb_v11.csv \
--val_file ../../../data/val_spdsvb_v11.csv \
--prompt_column prompt \
--response_column response \
--max_source_length 64 \
--max_target_length 128 \
--model_name_or_path $MODEL_PATH_ChatGLM2_32K \
--epochs $epochs \
--lr $lr \
--batch_size $batch_size \
--gradient_accumulation_steps $gradient_accumulation_steps \
--ckpt_path ../../../model/ckpt/chatglm2-6b-32k-lora-spdsvb-INSv11-1e-3-50
\ No newline at end of file
#!/bin/bash
source common.sh
PRE_SEQ_LEN=128
LR=3e-3
STEP=3000
output_dir=../../../model/ckpt/chatglm2-6b-32k-pt-spdsvb-INSv11-$PRE_SEQ_LEN-$LR-$STEP+s1000
# train
CUDA_VISIBLE_DEVICES=0 python pt.py \
--do_train \
--train_file ../../../data/train_spdsvb_v11.csv \
--validation_file ../../../data/val_spdsvb_v11.csv \
--prompt_column prompt \
--response_column response \
--source_prefix $INSTRUCTION_V1 \
--overwrite_cache \
--model_name_or_path $MODEL_PATH_ChatGLM2_32K \
--output_dir $output_dir \
--overwrite_output_dir \
--max_source_length 64 \
--max_target_length 128 \
--per_device_train_batch_size 4 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 16 \
--predict_with_generate \
--max_steps $STEP \
--logging_steps 50 \
--save_steps 500 \
--evaluation_strategy steps \
--eval_steps 100 \
--learning_rate $LR \
--pre_seq_len $PRE_SEQ_LEN
# --quantization_bit 4
# --ptuning_checkpoint ../../../model/ckpt/chatglm2-6b-pt-spdsvb-INSv11-128-5e-3-3010/checkpoint-1000/ \
\ No newline at end of file
#!/bin/bash
source common.sh
lr=5e-4
epochs=30
rank=16
train_file='../../../data/train_spdsvb_v11.csv'
val_file='../../../data/val_spdsvb_v11.csv'
output_dir="../../../model/ckpt/chatglm2-6b-32k-qlora-INSv11-rank${rank}-$lr-$epochs"
log_file=${output_dir}/prefix.log
echo $log_file
prompt_text=$INSTRUCTION_V1
# --resume_from_checkpoint $output_dir/checkpoint-1000 \
python3 qlora.py \
--train_args_json ./config_qlora.json \
--model_name_or_path $MODEL_PATH_ChatGLM2_32K \
--train_data_path $train_file \
--eval_data_path $val_file \
--xname prompt \
--yname response \
--prompt_text $prompt_text \
--output_dir $output_dir \
--lora_rank $rank \
--lora_dropout 0.05 \
--compute_dtype fp32 \
--learning_rate $lr \
--num_train_epochs $epochs
\ No newline at end of file
#说明
sync_curvector.py 用于同步原来的pgsql,将之前的向量库迁移至新的向量库,并将faiss向量索引存储至本地
pgsqldocstore.py 实现自定义docstore,使用pgsql存储,调整原数据库存储表结构,同时实现序列化和反序列化用于本地持久化
load_vector.py 读取本地faiss向量索引持久化数据,实例化后可正常连接pgsql并查询向量库
pgsql/* 数据库连接包装
\ No newline at end of file
import os, sys
from os import path
sys.path.append("../")
from abc import ABC, abstractmethod
import json
from typing import List,Any,Tuple,Dict
from langchain.schema import Document
from vector.pgsqldocstore import PgSqlDocstore,str2hash_base64
class DocumentCallback(ABC):
@abstractmethod #向量库储存前文档处理--
def before_store(self,docstore:PgSqlDocstore,documents):
pass
@abstractmethod #向量库查询后文档处理--用于结构建立
def after_search(self,docstore:PgSqlDocstore,documents:List[Tuple[Document, float]],number:int = 1000) -> List[Tuple[Document, float]]: #向量库查询后文档处理
pass
class DefaultDocumentCallback(DocumentCallback):
def before_store(self,docstore:PgSqlDocstore,documents):
output_doc = []
for doc in documents:
if "next_doc" in doc.metadata:
doc.metadata["next_hash"] = str2hash_base64(doc.metadata["next_doc"])
doc.metadata.pop("next_doc")
output_doc.append(doc)
return output_doc
def after_search(self,docstore:PgSqlDocstore,documents:List[Tuple[Document, float]],number:int = 1000) -> List[Tuple[Document, float]]: #向量库查询后文档处理
output_doc:List[Tuple[Document, float]] = []
exist_hash = []
for doc,score in documents:
print(exist_hash)
dochash = str2hash_base64(doc.page_content)
if dochash in exist_hash:
continue
else:
exist_hash.append(dochash)
output_doc.append((doc,score))
if len(output_doc) > number:
return output_doc
fordoc = doc
while ("next_hash" in fordoc.metadata):
if len(fordoc.metadata["next_hash"])>0:
if fordoc.metadata["next_hash"] in exist_hash:
break
else:
exist_hash.append(fordoc.metadata["next_hash"])
content = docstore.TXT_DOC.search(fordoc.metadata["next_hash"])
if content:
fordoc = Document(page_content=content[0], metadata=json.loads(content[1]))
output_doc.append((fordoc,score))
if len(output_doc) > number:
return output_doc
else:
break
else:
break
return output_doc
\ No newline at end of file
from langchain.vectorstores import FAISS
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
)
from pgsqldocstore import PgSqlDocstore
embeddings = HuggingFaceEmbeddings(model_name='/model/moka-ai/m3e-large')
# embeddings.embed_documents()
# embedding = HuggingFaceInstructEmbeddings(model_name='hkunlp/instructor-xl')
new_db = FAISS.load_local(folder_path="./faiss_store",index_name="know", embeddings=embeddings)
query = "忘记密码怎么办"
docs = new_db.similarity_search(query)
print(docs)
\ No newline at end of file
import psycopg2
class PostgresDB:
'''
psycopg2.connect(
dsn #指定连接参数。可以使用参数形式或 DSN 形式指定。
host #指定连接数据库的主机名。
dbname #指定数据库名。
user #指定连接数据库使用的用户名。
password #指定连接数据库使用的密码。
port #指定连接数据库的端口号。
connection_factory #指定创建连接对象的工厂类。
cursor_factory #指定创建游标对象的工厂类。
async_ #指定是否异步连接(默认False)。
sslmode #指定 SSL 模式。
sslrootcert #指定证书文件名。
sslkey #指定私钥文件名。
sslcert #指定公钥文件名。
)
'''
def __init__(self, host, database, user, password,port = 5432):
self.host = host
self.database = database
self.user = user
self.password = password
self.port = port
self.conn = None
self.cur = None
def connect(self):
self.conn = psycopg2.connect(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
port = self.port
)
self.cur = self.conn.cursor()
def execute(self, query):
try:
self.cur.execute(query)
self.conn.commit()
except Exception as e:
print(f"An error occurred: {e}")
self.conn.rollback()
def execute_args(self, query, args):
try:
self.cur.execute(query, args)
self.conn.commit()
except Exception as e:
print(f"An error occurred: {e}")
self.conn.rollback()
def search(self, query, params=None):
self.cur.execute(query, params)
def fetchall(self):
return self.cur.fetchall()
def close(self):
self.cur.close()
self.conn.close()
def format(self, query):
try:
self.cur.execute(query)
self.conn.commit()
except Exception as e:
print(f"An error occurred: {e}")
self.conn.rollback()
from .db import PostgresDB
# paragraph_id BIGSERIAL primary key,
TABLE_TXT_DOC = """
create table txt_doc (
hash varchar(40) primary key,
text text not null,
matadate text
);
"""
TABLE_TXT_DOC_HASH_INDEX = """
CREATE UNIQUE INDEX hash_index ON txt_doc (hash);
"""
# CREATE UNIQUE INDEX idx_name ON your_table (column_name);
class TxtDoc:
def __init__(self, db: PostgresDB) -> None:
self.db = db
def insert(self, texts):
query = f"INSERT INTO txt_doc(hash,text,matadate) VALUES "
args = []
for value in texts:
value = list(value)
query+= "(%s,%s,%s),"
args.extend(value)
query = query[:len(query)-1]
query += f"ON conflict(hash) DO UPDATE SET text = EXCLUDED.text;"
self.db.execute_args(query,args)
def delete(self,ids):
for id in ids:
query = f"delete FROM txt_doc WHERE hash = %s" % (id)
self.db.execute(query)
def search(self, id):
query = "SELECT text,matadate FROM txt_doc WHERE hash = %s"
self.db.execute_args(query,[id])
answer = self.db.fetchall()
if len(answer) > 0:
return answer[0]
else:
return None
# return Document(page_content=self.db.fetchall()[0][0], metadata=dict(page=self.db.fetchall()[0][1]))
# answer = self.db.fetchall()[0][0]
# return answer
def create_table(self):
query = f"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'txt_doc')"
self.db.execute(query)
exists = self.db.fetchall()[0][0]
if not exists:
query = TABLE_TXT_DOC
self.db.execute(query)
# self.db.execute(TABLE_TXT_DOC_HASH_INDEX)
def drop_table(self):
query = f"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'txt_doc')"
self.db.execute(query)
exists = self.db.fetchall()[0][0]
if exists:
query = "DROP TABLE txt_doc"
self.db.format(query)
print("drop table txt_doc ok")
from .db import PostgresDB
TABLE_VEC_TXT = """
CREATE TABLE vec_txt (
vector_id varchar(36) PRIMARY KEY,
text text,
paragraph_id varchar(40) not null
)
"""
#025a9bee-2eb2-47f5-9722-525e05a0442b
class TxtVector:
def __init__(self, db: PostgresDB) -> None:
self.db = db
def insert(self, vectors):
query = f"INSERT INTO vec_txt(vector_id,text,paragraph_id) VALUES"
args = []
for value in vectors:
value = list(value)
query+= "(%s,%s,%s),"
args.extend(value)
query = query[:len(query)-1]
query += f"ON conflict(vector_id) DO UPDATE SET text = EXCLUDED.text,paragraph_id = EXCLUDED.paragraph_id;"
# query += ";"
self.db.execute_args(query,args)
def delete(self,ids):
for id in ids:
query = f"delete FROM vec_txt WHERE vector_id = '%s'" % (id,)
self.db.execute(query)
def search(self, search: str):
query = f"SELECT paragraph_id,text FROM vec_txt WHERE vector_id = %s"
self.db.execute_args(query,[search])
answer = self.db.fetchall()
print(answer)
return answer[0]
def create_table(self):
query = f"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'vec_txt')"
self.db.execute(query)
exists = self.db.fetchall()[0][0]
if not exists:
query = TABLE_VEC_TXT
self.db.execute(query)
def drop_table(self):
query = f"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'vec_txt')"
self.db.execute(query)
exists = self.db.fetchall()[0][0]
if exists:
query = "DROP TABLE vec_txt"
self.db.format(query)
print("drop table vec_txt ok")
\ No newline at end of file
# dir/main.py
import sys,time
from os import path
# 这里相当于把当前目录添加到pythonpath中
sys.path.append(path.dirname(path.abspath(__file__)))
from typing import List,Union,Dict,Optional
from langchain.docstore.base import AddableMixin, Docstore
from pgsql.db import PostgresDB
from pgsql.txt_doc import TxtDoc
from pgsql.vec_txt import TxtVector
import json,hashlib,base64
from langchain.schema import Document
def str2hash_base64(input:str) -> str:
# return f"%s" % hash(input)
return base64.b64encode(hashlib.sha1(input.encode()).digest()).decode()
class InMemorySecondaryDocstore(Docstore, AddableMixin):
"""Simple in memory docstore in the form of a dict."""
def __init__(self, _dict: Optional[Dict[str, Document]] = None,_sec_dict: Optional[Dict[str, Document]] = None):
"""Initialize with dict."""
self._dict = _dict if _dict is not None else {}
self._sec_dict = _sec_dict if _sec_dict is not None else {}
def add(self, texts: Dict[str, Document]) -> None:
"""Add texts to in memory dictionary.
Args:
texts: dictionary of id -> document.
Returns:
None
"""
overlapping = set(texts).intersection(self._dict)
if overlapping:
raise ValueError(f"Tried to add ids that already exist: {overlapping}")
self._dict = {**self._dict, **texts}
dict1 = {}
dict_sec = {}
for vec,doc in texts.items():
txt_hash = str2hash_base64(doc.metadata["paragraph"])
metadata=doc.metadata
paragraph = metadata.pop('paragraph')
# metadata.update({"paragraph_id":txt_hash})
metadata['paragraph_id']=txt_hash
dict_sec[txt_hash] = Document(page_content=paragraph,metadata=metadata)
dict1[vec] = Document(page_content=doc.page_content,metadata={'paragraph_id':txt_hash})
self._dict = {**self._dict, **dict1}
self._sec_dict = {**self._sec_dict, **dict_sec}
def delete(self, ids: List) -> None:
"""Deleting IDs from in memory dictionary."""
overlapping = set(ids).intersection(self._dict)
if not overlapping:
raise ValueError(f"Tried to delete ids that does not exist: {ids}")
for _id in ids:
self._sec_dict.pop(self._dict[id].metadata['paragraph_id'])
self._dict.pop(_id)
def search(self, search: str) -> Union[str, Document]:
"""Search via direct lookup.
Args:
search: id of a document to search for.
Returns:
Document if found, else error message.
"""
if search not in self._dict:
return f"ID {search} not found."
else:
print(self._dict[search].page_content)
return self._sec_dict[self._dict[search].metadata['paragraph_id']]
class PgSqlDocstore(Docstore,AddableMixin):
host:str
dbname:str
username:str
password:str
port:str
'''
说明,重写__getstate__,__setstate__,适用于langchain的序列化存储,基于pickle进行存储。返回数组包含pgsql连接信息。
'''
def __getstate__(self):
return {"host":self.host,"dbname":self.dbname,"username":self.username,"password":self.password,"port":self.port}
def __setstate__(self, info):
self.__init__(info)
def __init__(self,info:dict,reset:bool = False):
self.host = info["host"]
self.dbname = info["dbname"]
self.username = info["username"]
self.password = info["password"]
self.port = info["port"] if "port" in info else "5432";
self.pgdb = PostgresDB(self.host, self.dbname, self.username, self.password,port=self.port)
self.TXT_DOC = TxtDoc(self.pgdb)
self.VEC_TXT = TxtVector(self.pgdb)
if reset:
self.__sub_init__()
self.TXT_DOC.drop_table()
self.VEC_TXT.drop_table()
self.TXT_DOC.create_table()
self.VEC_TXT.create_table()
def __sub_init__(self):
if not self.pgdb.conn:
self.pgdb.connect()
'''
从本地库中查找向量对应的文本段落,封装成Document返回
'''
def search(self, search: str) -> Union[str, Document]:
if not self.pgdb.conn:
self.__sub_init__()
anwser = self.VEC_TXT.search(search)
content = self.TXT_DOC.search(anwser[0])
if content:
return Document(page_content=content[0], metadata=json.loads(content[1]))
else:
return Document()
'''
从本地库中删除向量对应的文本,批量删除
'''
def delete(self, ids: List) -> None:
if not self.pgdb.conn:
self.__sub_init__()
pids = []
for id in ids:
anwser = self.VEC_TXT.search(id)
pids.append(anwser[0])
self.VEC_TXT.delete(ids)
self.TXT_DOC.delete(pids)
'''
向本地库添加向量和文本信息
[vector_id,Document(page_content=问题, metadata=dict(paragraph=段落文本))]
'''
def add(self, texts: Dict[str, Document]) -> None:
# for vec,doc in texts.items():
# paragraph_id = self.TXT_DOC.insert(doc.metadata["paragraph"])
# self.VEC_TXT.insert(vector_id=vec,paragraph_id=paragraph_id,text=doc.page_content)
if not self.pgdb.conn:
self.__sub_init__()
paragraph_hashs = [] #hash,text
paragraph_txts = []
vec_inserts = []
for vec,doc in texts.items():
txt_hash = str2hash_base64(doc.metadata["paragraph"])
print(txt_hash)
vec_inserts.append((vec,doc.page_content,txt_hash))
if txt_hash not in paragraph_hashs:
paragraph_hashs.append(txt_hash)
paragraph = doc.metadata["paragraph"]
doc.metadata.pop("paragraph")
paragraph_txts.append((txt_hash,paragraph,json.dumps(doc.metadata,ensure_ascii=False)))
# print(paragraph_txts)
self.TXT_DOC.insert(paragraph_txts)
self.VEC_TXT.insert(vec_inserts)
\ No newline at end of file
python-docx
numpy
faiss-gpu==1.7.2
flask
psycopg2==2.9.7
langchain==0.0.278
\ No newline at end of file
import os, sys
import re,time
from os import path
sys.path.append("../")
import copy
from typing import List,OrderedDict,Any,Optional,Tuple,Dict
from vector.pgsqldocstore import InMemorySecondaryDocstore
from langchain.vectorstores.faiss import FAISS,dependable_faiss_import
from langchain.schema import Document
from vector.pgsqldocstore import PgSqlDocstore
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
)
import math
import faiss
from langchain.vectorstores.utils import DistanceStrategy
from langchain.vectorstores.base import VectorStoreRetriever
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from loader import load
from langchain.embeddings.base import Embeddings
from vector.VectorCallback import DocumentCallback,DefaultDocumentCallback
def singleton(cls):
instances = {}
def get_instance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return get_instance
@singleton
class EmbeddingFactory:
def __init__(self, path:str):
self.path = path
self.embedding = HuggingFaceEmbeddings(model_name=path)
def get_embedding(self):
return self.embedding
def GetEmbding(path:str) -> Embeddings:
# return HuggingFaceEmbeddings(model_name=path)
return EmbeddingFactory(path).get_embedding()
import operator
from langchain.vectorstores.utils import DistanceStrategy
import numpy as np
class RE_FAISS(FAISS):
#去重,并保留metadate
def _tuple_deduplication(self, tuple_input:List[Tuple[Document, float]]) -> List[Tuple[Document, float]]:
deduplicated_dict = OrderedDict()
for doc,scores in tuple_input:
page_content = doc.page_content
metadata = doc.metadata
if page_content not in deduplicated_dict:
deduplicated_dict[page_content] = (metadata,scores)
deduplicated_documents = [(Document(page_content=key,metadata=value[0]),value[1]) for key, value in deduplicated_dict.items()]
return deduplicated_documents
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
faiss = dependable_faiss_import()
vector = np.array([embedding], dtype=np.float32)
if self._normalize_L2:
faiss.normalize_L2(vector)
scores, indices = self.index.search(vector, k if filter is None else fetch_k)
docs = []
for j, i in enumerate(indices[0]):
if i == -1:
# This happens when not enough docs are returned.
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
if filter is not None:
filter = {
key: [value] if not isinstance(value, list) else value
for key, value in filter.items()
}
if all(doc.metadata.get(key) in value for key, value in filter.items()):
docs.append((doc, scores[0][j]))
else:
docs.append((doc, scores[0][j]))
docs = self._tuple_deduplication(docs)
score_threshold = kwargs.get("score_threshold")
if score_threshold is not None:
cmp = (
operator.ge
if self.distance_strategy
in (DistanceStrategy.MAX_INNER_PRODUCT, DistanceStrategy.JACCARD)
else operator.le
)
docs = [
(doc, similarity)
for doc, similarity in docs
if cmp(similarity, score_threshold)
]
if "doc_callback" in kwargs:
if hasattr(kwargs["doc_callback"], 'after_search'):
docs = kwargs["doc_callback"].after_search(self.docstore,docs,number=k)
return docs[:k]
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter
)
docs_and_scores = self._tuple_deduplication(docs_and_scores)
if "doc_callback" in kwargs:
if hasattr(kwargs["doc_callback"], 'after_search'):
docs_and_scores = kwargs["doc_callback"].after_search(self.docstore,docs_and_scores,number=k)
return [doc for doc, _ in docs_and_scores]
def getFAISS(embedding_model_name:str,store_path:str,info:dict = None,index_name:str = "index",is_pgsql:bool = True,reset:bool = False) -> RE_FAISS:
embeddings = GetEmbding(path=embedding_model_name)
docstore1:PgSqlDocstore = None
if is_pgsql:
if info and "host" in info and "dbname" in info and "username" in info and "password" in info:
docstore1 = PgSqlDocstore(info,reset=reset)
else:
docstore1 = InMemorySecondaryDocstore()
if not path.exists(store_path):
os.makedirs(store_path,exist_ok=True)
if store_path is None or len(store_path) <= 0 or not path.exists(path.join(store_path,index_name+".faiss")) or reset:
print("create new faiss")
index = faiss.IndexFlatL2(len(embeddings.embed_documents(["a"])[0])) #根据embeddings向量维度设置
return RE_FAISS(embedding_function=embeddings.client.encode,index=index,docstore=docstore1,index_to_docstore_id={})
else:
print("load_local faiss")
_faiss = RE_FAISS.load_local(folder_path=store_path,index_name=index_name, embeddings=embeddings)
if docstore1 and is_pgsql: #如果外部参数调整,更新docstore
_faiss.docstore = docstore1
return _faiss
class VectorStore_FAISS(FAISS):
def __init__(self, embedding_model_name:str,store_path:str,index_name:str = "index",info:dict = None, is_pgsql:bool = True,show_number = 5, threshold = 0.8, reset:bool = False,doc_callback:DocumentCallback = DefaultDocumentCallback()):
self.info = info
self.embedding_model_name = embedding_model_name
self.store_path = path.join(store_path,index_name)
if not path.exists(self.store_path):
os.makedirs(self.store_path,exist_ok=True)
self.index_name = index_name
self.show_number = show_number
self.search_number = self.show_number*3
self.threshold = threshold
self._faiss = getFAISS(self.embedding_model_name,self.store_path,info=info,index_name=self.index_name,is_pgsql=is_pgsql,reset=reset)
self.doc_callback = doc_callback
def get_text_similarity_with_score(self, text:str,**kwargs):
score_threshold = (1-self.threshold) * math.sqrt(2)
docs = self._faiss.similarity_search_with_score(query=text,k=self.search_number,score_threshold=score_threshold,doc_callback=self.doc_callback,**kwargs)
return [doc for doc, similarity in docs][:self.show_number]
def get_text_similarity(self, text:str,**kwargs):
docs = self._faiss.similarity_search(query=text,k=self.search_number,doc_callback=self.doc_callback,**kwargs)
return docs[:self.show_number]
# #去重,并保留metadate
# def _tuple_deduplication(self, tuple_input:List[Document]) -> List[Document]:
# deduplicated_dict = OrderedDict()
# for doc in tuple_input:
# page_content = doc.page_content
# metadata = doc.metadata
# if page_content not in deduplicated_dict:
# deduplicated_dict[page_content] = metadata
# deduplicated_documents = [Document(page_content=key,metadata=value) for key, value in deduplicated_dict.items()]
# return deduplicated_documents
def _join_document(self, docs:List[Document]) -> str:
print(docs)
return "".join([doc.page_content for doc in docs])
def get_local_doc(self, docs:List[Document]):
ans = []
for doc in docs:
ans.append({"page_content":doc.page_content, "page_number":doc.metadata["page_number"], "filename":doc.metadata["filename"]})
return ans
# def _join_document_location(self, docs:List[Document]) -> str:
# 持久化到本地
def _save_local(self):
self._faiss.save_local(folder_path=self.store_path,index_name=self.index_name)
# 添加文档
# Document {
# page_content 段落
# metadata {
# page 页码
# }
# }
def _add_documents(self, new_docs:List[Document],need_split:bool = True,pattern:str = r'[?。;\n]'):
list_of_documents:List[Document] = []
if self.doc_callback:
new_docs = self.doc_callback.before_store(self._faiss.docstore,new_docs)
if need_split:
for doc in new_docs:
words_list = re.split(pattern, doc.page_content)
# 去掉重复项
words_list = set(words_list)
words_list = [str(words) for words in words_list]
for words in words_list:
if not words.strip() == '':
metadata = copy.deepcopy(doc.metadata)
metadata["paragraph"] = doc.page_content
list_of_documents.append(Document(page_content=words, metadata=metadata))
else:
list_of_documents = new_docs
self._faiss.add_documents(list_of_documents)
def _add_documents_from_dir(self,filepaths = [],load_kwargs: Optional[dict] = {"mode":"paged"}):
self._add_documents(load.loads(filepaths,**load_kwargs))
def as_retriever(self, **kwargs: Any) -> VectorStoreRetriever:
"""
Return VectorStoreRetriever initialized from this VectorStore.
Args:
search_type (Optional[str]): Defines the type of search that
the Retriever should perform.
Can be "similarity" (default), "mmr", or
"similarity_score_threshold".
search_kwargs (Optional[Dict]): Keyword arguments to pass to the
search function. Can include things like:
k: Amount of documents to return (Default: 4)
score_threshold: Minimum relevance threshold
for similarity_score_threshold
fetch_k: Amount of documents to pass to MMR algorithm (Default: 20)
lambda_mult: Diversity of results returned by MMR;
1 for minimum diversity and 0 for maximum. (Default: 0.5)
filter: Filter by document metadata
Returns:
VectorStoreRetriever: Retriever class for VectorStore.
Examples:
.. code-block:: python
# Retrieve more documents with higher diversity
# Useful if your dataset has many similar documents
docsearch.as_retriever(
search_type="mmr",
search_kwargs={'k': 6, 'lambda_mult': 0.25}
)
# Fetch more documents for the MMR algorithm to consider
# But only return the top 5
docsearch.as_retriever(
search_type="mmr",
search_kwargs={'k': 5, 'fetch_k': 50}
)
# Only retrieve documents that have a relevance score
# Above a certain threshold
docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={'score_threshold': 0.8}
)
# Only get the single most similar document from the dataset
docsearch.as_retriever(search_kwargs={'k': 1})
# Use a filter to only retrieve documents from a specific paper
docsearch.as_retriever(
search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}}
)
"""
if not kwargs or "similarity_score_threshold" != kwargs["search_type"]:
default_kwargs = {'k': self.show_number}
if "search_kwargs" in kwargs:
default_kwargs.update(kwargs["search_kwargs"])
kwargs["search_kwargs"] = default_kwargs
elif "similarity_score_threshold" == kwargs["search_type"]:
default_kwargs = {'score_threshold': self.threshold,'k': self.show_number}
if "search_kwargs" in kwargs:
default_kwargs.update(kwargs["search_kwargs"])
kwargs["search_kwargs"] = default_kwargs
kwargs["search_kwargs"]["doc_callback"]=self.doc_callback
tags = kwargs.pop("tags", None) or []
tags.extend(self._faiss._get_retriever_tags())
print(kwargs)
return VectorStoreRetriever_FAISS(vectorstore=self._faiss, **kwargs, tags=tags)
class VectorStoreRetriever_FAISS(VectorStoreRetriever):
search_k = 5
def __init__(self,**kwargs):
super().__init__(**kwargs)
if "k" in self.search_kwargs:
self.search_k=self.search_kwargs["k"]
self.search_kwargs["k"]=self.search_k*2
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
docs = super()._get_relevant_documents(query=query,run_manager=run_manager)
return docs[:self.search_k]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
docs = super()._aget_relevant_documents(query=query,run_manager=run_manager)
return docs[:self.search_k]
\ No newline at end of file
from langchain.vectorstores import FAISS
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
)
import faiss
from pgsql.db import PostgresDB
from langchain.schema import Document
from pgsqldocstore import PgSqlDocstore
docstore1 = PgSqlDocstore(["192.168.0.148","new_vecdoc","vecdoc","vecdoc"],reset=True)
index = faiss.IndexFlatL2(1024)
embeddings = HuggingFaceEmbeddings(model_name='/model/moka-ai/m3e-large')
db = FAISS(embedding_function=embeddings.client.encode,index=index,docstore=docstore1,index_to_docstore_id={})
psqldb = PostgresDB("192.168.0.148", "vecdoc", "vecdoc", "vecdoc")
psqldb.connect()
#将当前向量库中的数据全部导出到新的向量库中
db_index = 0
page_size = 500
print(db.index.ntotal)
while True:
query = f"SELECT text,paragraph_id FROM vec_txt order by vector_id limit %s offset %s" % (page_size,db_index)
psqldb.execute(query)
questions = psqldb.fetchall()
if len(questions) <= 0:
break
db_index+=page_size
list_of_documents = []
for question in questions:
query = f"SELECT text FROM txt_doc WHERE paragraph_id = %s" % (question[1])
psqldb.execute(query)
paragraph = psqldb.fetchall()[0]
list_of_documents.append(Document(page_content=question[0], metadata=dict(paragraph=paragraph[0])))
db.add_documents(list_of_documents)
print(db.index.ntotal)
# break
#导出完毕,查询向量库测试
results_with_scores = db.similarity_search_with_score("忘记密码怎么办")
page_ids = []
for doc, score in results_with_scores:
if doc.metadata["page"] in page_ids:
continue
else:
page_ids.append(doc.metadata["page"])
print(f"Content: {doc.page_content}, Metadata: {doc.metadata}, Score: {score}")
# faiss.write_index(db.index, "./faiss_store/know.faiss")
#将向量库持久化到本地
db.save_local(folder_path="./faiss_store",index_name="know")
import gradio as gr
from flask import Flask, request, jsonify
import torch
from contract.extraction import ElementsExtractor
from llm.chatglm import ChatGLMLocLLM
from llm.ernie import ErnieLLM
from llm.baichuan import BaichuanLLM
from loader.load import load_file,load
from flask.cli import load_dotenv
load_dotenv()
# Load the model
llms = ["ChatGLM","ChatGLM2","Ernie"]
llm = ChatGLMLocLLM(model_name="../../models/chatglm2-6b")
# llm = ErnieLLM()
extractor=ElementsExtractor(llm=llm)
elements = ["合同号","买方","卖方","合同金额","合同签订日期","装运标记","甲方","乙方","甲方地址","乙方地址"]
# max_length=1000
# Define the Gradio interface
def contract(file,elements,max_length):
print(llm.model_name)
print(file,elements)
if file is None:
return "Error: could not load file"
docs = load(file.name)
if docs is None:
return "Error: could not load file"
print(len(docs))
content = []
content_len = 0
values={}
for d in docs:
if content_len+len(d.page_content)>max_length:
doc = "\n".join(content)
eles = extractor.extract(doc,elements)
for e in eles:
try:
k,v = e.split(":",maxsplit=1)
k = k.strip()
v = v.strip()
if v is not None and v != "" and v!="未知" and k in elements:
values[k]=v+","+values[k] if k in values else v
except Exception as exp:
print(exp)
print(e)
continue
print("\n".join([f"{k}:{v}" for k,v in values.items()]))
content=[d.page_content]
content_len=len(d.page_content)
else:
content.append(d.page_content)
content_len+=len(d.page_content)
return "\n".join([f"{k}:{v}" for k,v in values.items()])
def change_llm_type(llm_type):
print("change_llm_type",llm_type)
global llm,extractor
del llm
llm=ErnieLLM()
torch.cuda.empty_cache()
if llm_type=="ChatGLM":
llm = ChatGLMLocLLM(model_name="../../models/chatglm-6b")
elif llm_type=="ChatGLM2":
llm = ChatGLMLocLLM(model_name="../../models/chatglm2-6b")
elif llm_type=="Ernie":
llm = ErnieLLM()
elif llm_type=="baichuan-13b":
llm = BaichuanLLM(model_name="../../models/Baichuan-13B-Chat",quantization_bit=8)
else:
llm = ErnieLLM()
if llm is not None:
extractor=ElementsExtractor(llm=llm)
return llm_type
def add_element(ele_new):
print("add_element",elements,ele_new)
elements.append(ele_new)
return {ele_group:gr.update(choices=elements),
ele_new_box:gr.update(value="")}
def reset():
output.value=""
file.value=None
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">商业合同要素提取</h1>""")
with gr.Row():
with gr.Column(scale=4):
with gr.Row():
file = gr.File(label="上传文件")
with gr.Row():
submit_btn=gr.Button("开始提取", type="submit")
# reset_btn=gr.Button("重置", type="reset")
# reset_btn.click(reset)
with gr.Row():
output=gr.Textbox(label="提取结果", type="text", lines=20)
with gr.Column(scale=1):
with gr.Row():
max_length = gr.Slider(1000, 10000, value=5000, step=1000, label="单次提取使用的文本长度", interactive=True)
with gr.Row():
llm_type = gr.Radio(llms, label="语言模型类型", value="ChatGLM2", interactive=True)
llm_type.change(change_llm_type, inputs=[llm_type],outputs=[llm_type])
with gr.Row():
ele_group = gr.CheckboxGroup(choices=elements, label="需要提取的元素", value=elements, interactive=True)
with gr.Row():
ele_new_box = gr.Textbox(label="新增元素", type="text", lines=1)
ele_new_btn = gr.Button("新增", type="submit")
ele_new_btn.click(add_element,inputs=[ele_new_box],outputs=[ele_group,ele_new_box])
submit_btn.click(contract,inputs=[file,ele_group,max_length],outputs=output)
demo.queue().launch(share=True)
\ No newline at end of file
import gradio as gr
from flask import Flask, request, jsonify
import torch
from qa.generator import QAGenerator
from llm.chatglm import ChatGLMLocLLM
from llm.ernie import ErnieLLM
from loader.load import load_file
from flask.cli import load_dotenv
load_dotenv()
# Load the model
# llm = ChatGLMLocLLM(model_name="../../models/chatglm2-6b")
llm = ErnieLLM()
qa = QAGenerator(llm=llm)
# Define the Gradio interface
def qa_interface(file,step,question_numbers):
print(file,step,question_numbers)
# Load the file
docs = load_file(file.name)
if docs is None:
return "Error: could not load file"
for i in range(0,len(docs),step):
print(i)
content = "\n".join([d.page_content for d in docs[i:i+step]])
knowledge=content
lines = qa.generate_questions(knowledge=knowledge,question_number=question_numbers)
answers = []
for line in lines:
answers.append(line)
answer=qa.generate_answer(knowledge=knowledge,question=line)
answers.append(answer)
answers.append("----")
yield "\n".join(answers)
def change_llm_type(llm_type):
print("change_llm_type",llm_type)
global llm,qa
del llm
torch.cuda.empty_cache()
if llm_type=="ChatGLM":
llm = ChatGLMLocLLM(model_name="../../models/chatglm-6b")
elif llm_type=="ChatGLM2":
llm = ChatGLMLocLLM(model_name="../../models/chatglm2-6b")
elif llm_type=="Ernie":
llm = ErnieLLM()
else:
llm = ErnieLLM()
if llm is not None:
qa = QAGenerator(llm=llm)
return llm_type
def reset():
output.value=""
file.value=None
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">邮储新一代知识库-QA提取</h1>""")
with gr.Row():
with gr.Column(scale=4):
with gr.Row():
file = gr.File(label="上传文件",type="file")
with gr.Row():
submit_btn=gr.Button("开始提取", type="submit")
# reset_btn=gr.Button("重置", type="reset")
# reset_btn.click(reset)
with gr.Row():
output=gr.Textbox(label="提取结果", type="text", lines=10, readonly=True)
with gr.Column(scale=1):
with gr.Row():
step = gr.Slider(10, 1000, value=100, step=10, label="单次提取使用的知识句子数量", interactive=True)
question_numbers = gr.Slider(0, 10, value=5, step=1, label="单次提取问题数量", interactive=True)
with gr.Row():
llm_type = gr.Radio(["ChatGLM","ChatGLM2","Ernie"], label="语言模型类型", value="Ernie", interactive=True)
llm_type.change(change_llm_type, inputs=[llm_type],outputs=[llm_type])
submit_btn.click(qa_interface,inputs=[file,step,question_numbers],outputs=output)
demo.queue().launch(share=True)
\ No newline at end of file
#!/bin/bash
task() {
echo "执行任务"
set -x
sh /home/zfh/ChatGLM/ChatGLM-6B/ptuning/train_spdsvb.sh
set +x
}
# 设置执行时间为 2023-02-22 22:30
exec_time='2023-07-26T00:01:00'
# 将exec_time转换为时间戳格式
exec_sec=$(date -d $exec_time +%s)
# 获取当前时间的时间戳
now_sec=$(date +%s)
# 计算等待的秒数
seconds=$((exec_sec-now_sec))
echo $seconds
if [ $seconds -gt 0 ]; then
sleep $seconds
task
fi
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment