和chatglm3通信,分别基于chromadb和faiss

一、和chatglm3通信,基于chromadb

import json
import requests
import os
from pdfminer.high_level import extract_pages
from pdfminer.layout import LTTextContainer
import chromadb
from chromadb.config import Settings
import numpy as np
from numpy import dot
from numpy.linalg import norm
from sentence_transformers import SentenceTransformer
###########################classes&functions########################
model = SentenceTransformer('/home/helu/milvus/m3e-base')
# 调用本地GLM接口
def get_completion_glm(prompt):
    endpoint_url = "http://192.168.212.211:6006/"
    chat_messages = [
        {
"role": "system",
"content": "从现在开始扮演一个专业人士和我对话",
        }]
    chat_messages.append({"role": "user", "content": prompt})
    data = {
"model": "chatglm3", # 模型名称
"messages": chat_messages, # 会话历史
"stream": False, # 是否流式响应
"max_tokens": 2000, # 最多生成字数
"temperature": 0.8, # 温度
"top_p": 0.8, # 采样概率
    }
    headers = {'Content-Type': 'application/json'}
#response = requests.request("POST", url, headers=headers, data=payload).json()
    response = requests.post(f"{endpoint_url}v1/chat/completions", headers=headers, json=data,stream=False)
    parsed_response = response.json()
    text = parsed_response.get("choices", [{}])[0].get("message", "").get("content", "")
return text
def extract_text_from_pdf(filename, page_numbers=None, min_line_length=1):
'''从 PDF 文件中(按指定页码)提取文字'''
    paragraphs = []
    buffer = ''
    full_text = ''
# 提取全部文本
for i, page_layout in enumerate(extract_pages(filename)):
# 如果指定了页码范围,跳过范围外的页
if page_numbers is not None and i not in page_numbers:
continue
for element in page_layout:
if isinstance(element, LTTextContainer):
                full_text += element.get_text() + '\n'
# 按空行分隔,将文本重新组织成段落
    lines = full_text.split('\n')
for text in lines:
if len(text) >= min_line_length:
            buffer += (' '+text) if not text.endswith('-') else text.strip('-')
elif buffer:
            paragraphs.append(buffer)
            buffer = ''
if buffer:
        paragraphs.append(buffer)
return paragraphs
prompt_template = """
你是一个问答机器人。
你的任务是根据下述给定的已知信息回答用户问题。
确保你的回复完全依据下述已知信息。不要编造答案。
如果下述已知信息不足以回答用户的问题,请直接回复"我无法回答您的问题"。
已知信息:
__INFO__
用户问:
__QUERY__
请用中文回答用户问题。
"""
def build_prompt(prompt_template, **kwargs):
'''将 Prompt 模板赋值'''
    prompt = prompt_template
for k, v in kwargs.items():
if isinstance(v, str):
            val = v
elif isinstance(v, list) and all(isinstance(elem, str) for elem in v):
            val = '\n'.join(v)
else:
            val = str(v)
        prompt = prompt.replace(f"__{k.upper()}__", val)
return prompt
###需要换成本地接口###
def get_embeddings(texts):
#data = embedding.create(input=texts).data
    embeddings = model.encode(texts)
#return [x.embedding for x in data]
return embeddings
class MyVectorDBConnector:
def __init__(self, collection_name, embedding_fn):
        chroma_client = chromadb.Client(Settings(allow_reset=True))
#chroma_client.reset()
# 创建一个 collection
self.collection = chroma_client.get_or_create_collection(name=collection_name)
self.embedding_fn = embedding_fn
def add_documents(self, documents):
'''向 collection 中添加文档与向量'''
self.collection.add(
            embeddings=self.embedding_fn(documents),  # 每个文档的向量
            documents=documents,  # 文档的原文
            ids=[f"id{i}" for i in range(len(documents))]  # 每个文档的 id
        )
def search(self, query, top_n):
'''检索向量数据库'''
        results = self.collection.query(
            query_embeddings=self.embedding_fn([query]),
            n_results=top_n
        )
return results
class RAG_Bot:
def __init__(self, vector_db, llm_api, n_results=2):
self.vector_db = vector_db
self.llm_api = llm_api
self.n_results = n_results
def chat(self, user_query):
# 1. 检索
        search_results = self.vector_db.search(user_query, self.n_results)
# 2. 构建 Prompt
        prompt = build_prompt(
            prompt_template, info=search_results['documents'][0], query=user_query)
# 3. 调用 LLM
        response = self.llm_api(prompt)
return response
############################################################################
#  只取两页(第一章)
paragraphs = extract_text_from_pdf("llama2.pdf", page_numbers=[2, 3], min_line_length=10)
# 创建一个向量数据库对象(注意这种写法)
vector_db = MyVectorDBConnector("demo", get_embeddings)
# 向向量数据库中添加文档
vector_db.add_documents(paragraphs)
# 创建一个RAG机器人
bot = RAG_Bot(vector_db,llm_api=get_completion_glm)
user_query = "llama 2有对话版吗?"
response = bot.chat(user_query)
print(response)

二、和chatglm3通信,基于faiss

import json
import requests
import os
from pdfminer.high_level import extract_pages
from pdfminer.layout import LTTextContainer
import faiss
import numpy as np
from numpy import dot
from numpy.linalg import norm
from sentence_transformers import SentenceTransformer
###########################classes&functions########################
model = SentenceTransformer('/home/helu/milvus/m3e-base')
# 调用对话接口
def get_completion_glm(prompt):
    endpoint_url = "http://192.168.212.211:6006/" #DL4 GPU,回答10秒
#endpoint_url = "http://127.0.0.1:8000/"         #笔记本虚拟机,回答2分钟
    chat_messages = [
        {
"role": "system",
"content": "从现在开始扮演一个专业人士和我对话",
        }]
    chat_messages.append({"role": "user", "content": prompt})
    data = {
"model": "chatglm3", # 模型名称
"messages": chat_messages, # 会话历史
"stream": False, # 是否流式响应
"max_tokens": 2000, # 最多生成字数
"temperature": 0.8, # 温度
"top_p": 0.8, # 采样概率
    }
    headers = {'Content-Type': 'application/json'}
    response = requests.post(f"{endpoint_url}v1/chat/completions", headers=headers, json=data,stream=False)
    parsed_response = response.json()
    text = parsed_response.get("choices", [{}])[0].get("message", "").get("content", "")
return text
def extract_text_from_pdf(filename, page_numbers=None, min_line_length=1):
'''从 PDF 文件中(按指定页码)提取文字'''
    paragraphs = []
    buffer = ''
    full_text = ''
# 提取全部文本
for i, page_layout in enumerate(extract_pages(filename)):
# 如果指定了页码范围,跳过范围外的页
if page_numbers is not None and i not in page_numbers:
continue
for element in page_layout:
if isinstance(element, LTTextContainer):
                full_text += element.get_text() + '\n'
# 按空行分隔,将文本重新组织成段落
    lines = full_text.split('\n')
for text in lines:
if len(text) >= min_line_length:
            buffer += (' '+text) if not text.endswith('-') else text.strip('-')
elif buffer:
            paragraphs.append(buffer)
            buffer = ''
if buffer:
        paragraphs.append(buffer)
return paragraphs
prompt_template = """
你是一个问答机器人。
你的任务是根据下述给定的已知信息回答用户问题。
确保你的回复完全依据下述已知信息。不要编造答案。
如果下述已知信息不足以回答用户的问题,请直接回复"我无法回答您的问题"。
已知信息:
__INFO__
用户问:
__QUERY__
请用中文回答用户问题。
"""
def build_prompt(prompt_template, **kwargs):
'''将 Prompt 模板赋值'''
    prompt = prompt_template
for k, v in kwargs.items():
if isinstance(v, str):
            val = v
elif isinstance(v, list) and all(isinstance(elem, str) for elem in v):
            val = '\n'.join(v)
else:
            val = str(v)
        prompt = prompt.replace(f"__{k.upper()}__", val)
return prompt
def get_embeddings(texts):
    embeddings = model.encode(texts)
return embeddings
def create_index(datas_embedding):
    index = faiss.IndexFlatL2(datas_embedding.shape[1])  # 这里必须传入一个向量的维度,创建一个空的索引
    index.add(datas_embedding)   # 把向量数据加入索引
return index
###需要换成本地接口###
class RAG_Bot:
def __init__(self, vector_db, llm_api, paragraphs,n_results=2):
self.vector_db = vector_db
self.llm_api = llm_api
self.n_results = n_results
self.para = paragraphs
def chat(self, user_query):
# 1. 检索
        query_embedding = model.encode([user_query])
        Distance, Index = self.vector_db.search(query_embedding, self.n_results)
# 2. 构建 Prompt
        par = paragraphs[int(Index[0][0])]
        prompt = build_prompt(
            prompt_template, info=par, query=user_query)
# 3. 调用 LLM
        response = self.llm_api(prompt)
return response
############################################################################
#  只取两页(第一章)
paragraphs = extract_text_from_pdf("llama2.pdf", page_numbers=[2, 3], min_line_length=10)
datas_embedding = get_embeddings(paragraphs)
faiss_index = create_index(datas_embedding)
# 创建一个RAG机器人
bot = RAG_Bot(faiss_index,llm_api=get_completion_glm,paragraphs = paragraphs)
user_query = "llama2相比较llama1有哪些提高?"
response = bot.chat(user_query)
print(response)

初步感觉,chromadb更接近一个数据库,它有完整的检索过程,可以得到原文和索引

def chat(self, user_query):

# 1. 检索
        search_results = self.vector_db.search(user_query, self.n_results)
# 2. 构建 Prompt
        prompt = build_prompt(
            prompt_template, info=search_results['documents'][0], query=user_query)
# 3. 调用 LLM
        response = self.llm_api(prompt)
return response

而faiss更像是一个轻量的检索工具,搜索过程中只能够得到索引,构建prompt的过程中需要原文

def chat(self, user_query):

# 1. 检索
        query_embedding = model.encode([user_query])
        Distance, Index = self.vector_db.search(query_embedding, self.n_results)
# 2. 构建 Prompt
        par = paragraphs[int(Index[0][0])]
        prompt = build_prompt(
            prompt_template, info=par, query=user_query)
# 3. 调用 LLM
        response = self.llm_api(prompt)
return response

posted on 2024-01-24 07:03  jsxyhelu  阅读(90)  评论(0编辑  收藏  举报

导航