AI的学习之路_7_会话

临时会话

LangChain提供了History功能,帮助模型在有记忆的情况下回答问题

  • 基于RunnableWithMessageHistor在原有链的基础上创建具有历史记录功能的新链
  • 基于InMemoryChatMessageHistor为历史记录提供内存存储(临时使用)

先看一下示例代码

from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory

conversation_chain = RunnableWithMessageHistory(
    some_chain, # 被附加历史记录消息的Runnable,通常是chain
    None,   # 获取指定会话ID的历史会话函数
    input_messages_key="input", # 声明用户输入消息在模版中的占位符
    history_messages_key="chat_history" # 声明历史消息在模版中的占位符
)

# 获取指定会话ID的历史会话记录函数
chat_history_store = {}

# 函数传入会话ID(字符串类型)
# 函数要求返回BaseChainHistory类的派生类
# BaseChainHistory类专用于存放某个会话的历史记录
# InMemoryChatMessageHistor是官方自带的基于内存存放历史记录的类
def get_history(session_id):
    if session_id not in chat_history_store:
        # 返回一个空的实例
        chat_history_store[session_id] = InMemoryChatMessageHistory()
    return chat_history_store[session_id]

写个代码运行一下

from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain_core.prompts import PromptTemplate

model = ChatTongyi(
    model = "qwen-max"
)

promat = PromptTemplate.from_template(
    "你需要根据历史对话{history},来回答用户提出的{input}问题"
)

str_parser = StrOutputParser()

# 用于中途打印会话信息
def printf_prompt(full_prompt):
    print("=" * 20, full_prompt.to_string(), "=" * 20)
    return full_prompt

base_chain = promat | printf_prompt | model | str_parser

# 存储会话ID对应的示例对象
store = {}

# 更具会话ID获取示例对象
def get_history(session_id):
    if session_id not in store:
        store[session_id] = InMemoryChatMessageHistory()
    return store[session_id]

# 创建一个新的增强链──自动附加历史消息
conversation_chain = RunnableWithMessageHistory(
    base_chain, # 被增强的原有链
    get_history,    # 通过会话ID获取InMemoryChatMessageHistory类对象
    input_messages_key = "input",   # 用户输入模版中的占位符
    history_messages_key = "history"    # 用户输入模版中的历史记录占位符
)

if __name__ == '__main__':
    # 固定格式,为当前会话配置session_id
    session_config = {
        "configurable": {
            "session_id": "user_lykf"
        }
    }
    res = conversation_chain.invoke({"input": "我有10块钱"}, session_config)
    print("第一次提问:", res)

    res = conversation_chain.invoke({"input": "我花了3块"}, session_config)
    print("第二次提问:", res)

    res = conversation_chain.invoke({"input": "我还有多少钱"}, session_config)
    print("第三次提问:", res)

image

或者这样写

from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder

model = ChatTongyi(
    model = "qwen-max"
)

chat_prompt = ChatPromptTemplate.from_messages(
    [
        ("system", "你需要根据用户的会话历史进行对话"),
        MessagesPlaceholder("history"),
        ("human", "请回答如下问题: {input}")
    ]
)

# promat = PromptTemplate.from_template(
#     "你需要根据历史对话{history},来回答用户提出的{input}问题"
# )

str_parser = StrOutputParser()

# 用于中途打印会话信息
def printf_prompt(full_prompt):
    print("=" * 20, full_prompt.to_string(), "=" * 20)
    return full_prompt

base_chain = chat_prompt | printf_prompt | model | str_parser

# 存储会话ID对应的示例对象
store = {}

# 更具会话ID获取示例对象
def get_history(session_id):
    if session_id not in store:
        store[session_id] = InMemoryChatMessageHistory()
    return store[session_id]

# 创建一个新的增强链──自动附加历史消息
conversation_chain = RunnableWithMessageHistory(
    base_chain, # 被增强的原有链
    get_history,    # 通过会话ID获取InMemoryChatMessageHistory类对象
    input_messages_key = "input",   # 用户输入模版中的占位符
    history_messages_key = "history"    # 用户输入模版中的历史记录占位符
)

if __name__ == '__main__':
    # 固定格式,为当前会话配置session_id
    session_config = {
        "configurable": {
            "session_id": "user_lykf"
        }
    }
    res = conversation_chain.invoke({"input": "我有10块钱"}, session_config)
    print("第一次提问:", res)

    res = conversation_chain.invoke({"input": "我花了3块"}, session_config)
    print("第二次提问:", res)

    res = conversation_chain.invoke({"input": "我还有多少钱"}, session_config)
    print("第三次提问:", res)

image
ChatPromptTemplate的效果要更好

其中,调用流程如下:
第一次提问:
image

第二次提问:
image

Memory长期会话记忆

基于Json格式和本地文件的会话存储功能

FileChatMessageHistory类实现,核心思路:

  • 基于文件存储会话记录,以session_id为文件名称,不同session_id有不同文件存储消息

继承BaseChatMessageHistory实现如下3个方法

  • ass_messages:同步模式,添加消息

  • messages:同步模式,获取消息

  • clear:同步模式,清除消息

看一下示例代码:

import json
from pathlib import Path
from typing import List, Optional

from langchain_core.chat_history import (
    BaseChatMessageHistory,
)
from langchain_core.messages import BaseMessage, messages_from_dict, messages_to_dict


class FileChatMessageHistory(BaseChatMessageHistory):
    """Chat message history that stores history in a local file."""

    def __init__(
        self,
        file_path: str,
        *,
        encoding: Optional[str] = None,
        ensure_ascii: bool = True,
    ) -> None:
        """Initialize the file path for the chat history.
        Args:
            file_path: The path to the local file to store the chat history.
            encoding: The encoding to use for file operations. Defaults to None.
            ensure_ascii: If True, escape non-ASCII in JSON. Defaults to True.
        """
        self.file_path = Path(file_path)# 文件夹
        self.encoding = encoding
        self.ensure_ascii = ensure_ascii

        if not self.file_path.exists():
            self.file_path.touch()
            self.file_path.write_text(
                json.dumps([], ensure_ascii=self.ensure_ascii), encoding=self.encoding
            )

    @property
    def messages(self) -> List[BaseMessage]:  # type: ignore[override]
        """Retrieve the messages from the local file"""
        items = json.loads(self.file_path.read_text(encoding=self.encoding))
        messages = messages_from_dict(items)
        return messages

    def add_message(self, message: BaseMessage) -> None:
        """Append the message to the record in the local file"""
        messages = messages_to_dict(self.messages)
        messages.append(messages_to_dict([message])[0])
        self.file_path.write_text(
            json.dumps(messages, ensure_ascii=self.ensure_ascii), encoding=self.encoding
        )

    def clear(self) -> None:
        """Clear session memory from the local file"""
        self.file_path.write_text(
            json.dumps([], ensure_ascii=self.ensure_ascii), encoding=self.encoding
        )

然后我们自己实现一个长期记忆的会话代码

import os, json
from typing import Sequence

from langchain_core.messages import messages_to_dict, messages_from_dict, BaseMessage
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_ollama.chat_models import ChatOllama
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableWithMessageHistory


class FileChatMessageHistory(BaseChatMessageHistory):
    def __init__(self, session_id, storage_path):
        self.session_id = session_id
        self.storage_path = storage_path
        self.file_path = os.path.join(self.storage_path, f"{self.session_id}.json")
        # 确保存储目录存在
        os.makedirs(self.storage_path, exist_ok=True)

    def add_messages(self, messages: Sequence[BaseMessage]) -> None:
        # 合并历史消息与新消息
        all_messages = list(self.messages)
        all_messages.extend(messages)
        # 直接转换为字典列表(不要嵌套)
        messages_dict = messages_to_dict(all_messages)
        # 写入 JSON 文件(保证中文正常显示)
        with open(self.file_path, "w", encoding="utf-8") as f:
            json.dump(messages_dict, f, ensure_ascii=False, indent=2)

    @property
    def messages(self) -> list[BaseMessage]:
        try:
            with open(self.file_path, "r", encoding="utf-8") as f:
                messages_data = json.load(f)
                # 直接解析为消息列表(不要套 list())
                return messages_from_dict(messages_data)
        except FileNotFoundError:
            return []

    def clear(self) -> None:
        # 清空历史消息
        with open(self.file_path, "w", encoding="utf-8") as f:
            json.dump([], f)


# 模型初始化
model = ChatOllama(
    base_url="http://localhost:11434",
    model="qwen2.5:3b-instruct-q4_0"
)

# 提示词模板
chat_prompt = ChatPromptTemplate.from_messages(
    [
        ("system", "你需要根据用户的会话历史进行对话"),
        MessagesPlaceholder("history"),
        ("human", "请回答如下问题: {input}")
    ]
)

# 输出解析器
str_parser = StrOutputParser()

# 调试用:打印完整提示词
def printf_prompt(full_prompt):
    print("=" * 20)
    print(full_prompt.to_string())
    print("=" * 20)
    return full_prompt

# 获取会话历史
def get_history(session_id):
    return FileChatMessageHistory(session_id, "./chat_history")

# 构建基础链
base_chain = chat_prompt | printf_prompt | model | str_parser

# 构建带记忆的会话链
conversation_chain = RunnableWithMessageHistory(
    base_chain,
    get_history,
    input_messages_key="input",
    history_messages_key="history"
)

if __name__ == '__main__':
    session_config = {
        "configurable": {
            "session_id": "user_lykf"
        }
    }
    # 测试对话
    res = conversation_chain.invoke({"input": "我有10块钱"}, session_config)
    print("第一次提问:", res)
    res = conversation_chain.invoke({"input": "我花了3块"}, session_config)
    print("第二次提问:", res)
    # res = conversation_chain.invoke({"input": "我还有多少钱"}, session_config)
    # print("第三次提问:", res)

2026-03-29-10-05-39-image
会创建如下文件夹和json文件
2026-03-29-10-06-05-image

[
  {
    "type": "human",
    "data": {
      "content": "我有10块钱",
      "additional_kwargs": {},
      "response_metadata": {},
      "type": "human",
      "name": null,
      "id": null
    }
  },
  {
    "type": "ai",
    "data": {
      "content": "您好,您目前手头上有10块钱。需要帮助吗?",
      "additional_kwargs": {},
      "response_metadata": {},
      "type": "ai",
      "name": null,
      "id": null,
      "tool_calls": [],
      "invalid_tool_calls": [],
      "usage_metadata": null
    }
  },
  {
    "type": "human",
    "data": {
      "content": "我花了3块",
      "additional_kwargs": {},
      "response_metadata": {},
      "type": "human",
      "name": null,
      "id": null
    }
  },
  {
    "type": "ai",
    "data": {
      "content": "好的,如果您已经花费了3元,那您现在手里还剩下7元。还需要其他帮助吗?",
      "additional_kwargs": {},
      "response_metadata": {},
      "type": "ai",
      "name": null,
      "id": null,
      "tool_calls": [],
      "invalid_tool_calls": [],
      "usage_metadata": null
    }
  }
]

然后运行下面的代码

    res = conversation_chain.invoke({"input": "我还有多少钱"}, session_config)
    print("第三次提问:", res)

2026-03-29-10-07-47-image
json就会变成下面这样

[
  {
    "type": "human",
    "data": {
      "content": "我有10块钱",
      "additional_kwargs": {},
      "response_metadata": {},
      "type": "human",
      "name": null,
      "id": null
    }
  },
  {
    "type": "ai",
    "data": {
      "content": "您好,您目前手头上有10块钱。需要帮助吗?",
      "additional_kwargs": {},
      "response_metadata": {},
      "type": "ai",
      "name": null,
      "id": null,
      "tool_calls": [],
      "invalid_tool_calls": [],
      "usage_metadata": null
    }
  },
  {
    "type": "human",
    "data": {
      "content": "我花了3块",
      "additional_kwargs": {},
      "response_metadata": {},
      "type": "human",
      "name": null,
      "id": null
    }
  },
  {
    "type": "ai",
    "data": {
      "content": "好的,如果您已经花费了3元,那您现在手里还剩下7元。还需要其他帮助吗?",
      "additional_kwargs": {},
      "response_metadata": {},
      "type": "ai",
      "name": null,
      "id": null,
      "tool_calls": [],
      "invalid_tool_calls": [],
      "usage_metadata": null
    }
  },
  {
    "type": "human",
    "data": {
      "content": "我还有多少钱",
      "additional_kwargs": {},
      "response_metadata": {},
      "type": "human",
      "name": null,
      "id": null
    }
  },
  {
    "type": "ai",
    "data": {
      "content": "您目前手头上有7块钱。",
      "additional_kwargs": {},
      "response_metadata": {},
      "type": "ai",
      "name": null,
      "id": null,
      "tool_calls": [],
      "invalid_tool_calls": [],
      "usage_metadata": null
    }
  }
]

这样便实现了长期的历史记忆对话

posted @ 2026-03-24 22:16  灵垚克府  阅读(2)  评论(0)    收藏  举报