MoneyPrinterTurbo扩展开发指南:添加新的LLM提供商 - 详解
摘要
MoneyPrinterTurbo支持多种大语言模型(LLM)提供商,但随着AI技术的快速发展,新的LLM服务不断涌现。本文将详细介绍如何为MoneyPrinterTurbo添加新的LLM提供商支持,包括代码结构分析、接口设计、实现步骤和测试方法,帮助开发者快速扩展项目功能。
正文
1. MoneyPrinterTurbo LLM架构概述
MoneyPrinterTurbo采用了插件化的设计模式来支持多种LLM提供商,这种架构使得添加新的LLM服务变得相对简单和标准化。
1.1 LLM服务架构图
1.2 核心代码结构
# app/services/llm.py (简化版)
import g4f
from openai import AzureOpenAI, OpenAI
import google.generativeai as genai
import dashscope
import qianfan
def _generate_response(prompt: str) -> str:
"""
核心响应生成函数
"""
llm_provider = config.app.get("llm_provider", "openai")
if llm_provider == "g4f":
# G4F实现
pass
elif llm_provider == "openai":
# OpenAI实现
pass
elif llm_provider == "azure":
# Azure实现
pass
# ... 其他提供商实现
2. 添加新LLM提供商的准备工作
在开始实现之前,需要完成以下准备工作:
2.1 研究目标LLM提供商
以Claude AI为例,需要了解:
- API文档:https://docs.anthropic.com/claude/reference
- 认证方式:API密钥
- 请求格式:HTTP请求参数和格式
- 响应格式:返回数据结构
- Python SDK:是否有官方SDK
2.2 环境准备
# 1. 克隆项目代码
git clone https://github.com/harry0703/MoneyPrinterTurbo.git
cd MoneyPrinterTurbo
# 2. 创建虚拟环境
conda create -n MoneyPrinterTurbo-dev python=3.11
conda activate MoneyPrinterTurbo-dev
# 3. 安装依赖
pip install -r requirements.txt
# 4. 安装目标LLM提供商的SDK(以Claude为例)
pip install anthropic
3. 实现步骤详解
3.1 配置文件扩展
首先需要在配置文件中添加新提供商的相关配置项:
# config.example.toml
[app]
# ... 其他配置 ...
# Claude API配置
claude_api_key = ""
claude_base_url = "https://api.anthropic.com"
claude_model_name = "claude-3-haiku-20240307"
# ... 其他配置 ...
3.2 核心实现代码
# app/services/llm.py
import anthropic
from loguru import logger
def _generate_response(prompt: str) -> str:
"""
根据提示生成响应
Args:
prompt (str): 提示文本
Returns:
str: 生成的响应文本
"""
try:
content = ""
llm_provider = config.app.get("llm_provider", "openai")
logger.info(f"使用的LLM提供商: {llm_provider}")
# 现有提供商实现...
if llm_provider == "g4f":
# ... 现有代码 ...
pass
elif llm_provider == "claude":
# 新增Claude实现
api_key = config.app.get("claude_api_key")
model_name = config.app.get("claude_model_name", "claude-3-haiku-20240307")
base_url = config.app.get("claude_base_url", "https://api.anthropic.com")
# 创建Claude客户端
client = anthropic.Anthropic(
api_key=api_key,
base_url=base_url
)
# 发起API调用
message = client.messages.create(
model=model_name,
max_tokens=1024,
messages=[
{
"role": "user",
"content": prompt,
}
]
)
# 提取响应内容
content = message.content[0].text
else:
# ... 其他现有提供商实现 ...
pass
return content
except Exception as e:
logger.error(f"生成响应时出错: {e}")
return ""
3.3 完整的Claude适配器实现
# app/services/llm/claude_adapter.py
import anthropic
from typing import Optional, List, Dict
from loguru import logger
class ClaudeAdapter:
"""
Claude LLM适配器
"""
def __init__(self, api_key: str, model_name: str = "claude-3-haiku-20240307",
base_url: str = "https://api.anthropic.com"):
"""
初始化Claude适配器
Args:
api_key (str): Claude API密钥
model_name (str): 模型名称
base_url (str): API基础URL
"""
self.api_key = api_key
self.model_name = model_name
self.base_url = base_url
self.client = None
self._initialize_client()
def _initialize_client(self):
"""
初始化Claude客户端
"""
try:
self.client = anthropic.Anthropic(
api_key=self.api_key,
base_url=self.base_url
)
logger.info("Claude客户端初始化成功")
except Exception as e:
logger.error(f"Claude客户端初始化失败: {e}")
raise
def generate_text(self, prompt: str, max_tokens: int = 1024,
temperature: float = 0.7, **kwargs) -> Optional[str]:
"""
生成文本
Args:
prompt (str): 提示文本
max_tokens (int): 最大令牌数
temperature (float): 温度参数
**kwargs: 其他参数
Returns:
Optional[str]: 生成的文本,失败时返回None
"""
try:
message = self.client.messages.create(
model=self.model_name,
max_tokens=max_tokens,
temperature=temperature,
messages=[
{
"role": "user",
"content": prompt,
}
],
**kwargs
)
if message.content and len(message.content) > 0:
return message.content[0].text
else:
logger.warning("Claude返回空内容")
return ""
except anthropic.APIError as e:
logger.error(f"Claude API错误: {e}")
return None
except Exception as e:
logger.error(f"Claude生成文本时出错: {e}")
return None
def generate_with_system_prompt(self, system_prompt: str, user_prompt: str,
max_tokens: int = 1024, temperature: float = 0.7) -> Optional[str]:
"""
使用系统提示生成文本
Args:
system_prompt (str): 系统提示
user_prompt (str): 用户提示
max_tokens (int): 最大令牌数
temperature (float): 温度参数
Returns:
Optional[str]: 生成的文本,失败时返回None
"""
try:
message = self.client.messages.create(
model=self.model_name,
max_tokens=max_tokens,
temperature=temperature,
system=system_prompt,
messages=[
{
"role": "user",
"content": user_prompt,
}
]
)
if message.content and len(message.content) > 0:
return message.content[0].text
else:
logger.warning("Claude返回空内容")
return ""
except Exception as e:
logger.error(f"Claude生成文本时出错: {e}")
return None
# 在主LLM服务中集成
def _generate_response_with_claude(prompt: str) -> str:
"""
使用Claude生成响应
"""
try:
api_key = config.app.get("claude_api_key")
model_name = config.app.get("claude_model_name", "claude-3-haiku-20240307")
base_url = config.app.get("claude_base_url", "https://api.anthropic.com")
if not api_key:
raise ValueError("Claude API密钥未配置")
adapter = ClaudeAdapter(api_key, model_name, base_url)
response = adapter.generate_text(prompt)
return response or ""
except Exception as e:
logger.error(f"Claude生成响应时出错: {e}")
return ""
3.4 集成到主服务
# app/services/llm.py (更新后的完整实现)
import json
import logging
import re
import requests
from typing import List
import g4f
from loguru import logger
from openai import AzureOpenAI, OpenAI
from openai.types.chat import ChatCompletion
# 导入新添加的适配器
from .claude_adapter import ClaudeAdapter
from app.config import config
_max_retries = 5
def _generate_response(prompt: str) -> str:
"""
根据提示生成响应
Args:
prompt (str): 提示文本
Returns:
str: 生成的响应文本
"""
try:
content = ""
llm_provider = config.app.get("llm_provider", "openai")
logger.info(f"使用的LLM提供商: {llm_provider}")
if llm_provider == "g4f":
# G4F免费模型调用
model_name = config.app.get("g4f_model_name", "")
if not model_name:
model_name = "gpt-3.5-turbo-16k-0613"
content = g4f.ChatCompletion.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
)
elif llm_provider == "claude":
# Claude实现
content = _generate_response_with_claude(prompt)
else:
api_version = "" # for azure
if llm_provider == "moonshot":
api_key = config.app.get("moonshot_api_key")
model_name = config.app.get("moonshot_model_name")
base_url = "https://api.moonshot.cn/v1"
elif llm_provider == "ollama":
api_key = "ollama"
model_name = config.app.get("ollama_model_name")
base_url = config.app.get("ollama_base_url", "")
if not base_url:
base_url = "http://localhost:11434/v1"
elif llm_provider == "openai":
api_key = config.app.get("openai_api_key")
model_name = config.app.get("openai_model_name")
base_url = config.app.get("openai_base_url", "")
if not base_url:
base_url = "https://api.openai.com/v1"
elif llm_provider == "oneapi":
api_key = config.app.get("oneapi_api_key")
model_name = config.app.get("oneapi_model_name")
base_url = config.app.get("oneapi_base_url", "")
elif llm_provider == "azure":
api_key = config.app.get("azure_api_key")
model_name = config.app.get("azure_model_name")
base_url = config.app.get("azure_base_url", "")
api_version = config.app.get("azure_api_version", "2024-02-15-preview")
elif llm_provider == "gemini":
import google.generativeai as genai
api_key = config.app.get("gemini_api_key")
model_name = config.app.get("gemini_model_name")
genai.configure(api_key=api_key)
model = genai.GenerativeModel(model_name)
response = model.generate_content(prompt)
return response.text
elif llm_provider == "qwen":
import dashscope
api_key = config.app.get("qwen_api_key")
model_name = config.app.get("qwen_model_name")
dashscope.api_key = api_key
response = dashscope.Generation.call(
model=model_name,
prompt=prompt,
)
if response.status_code == 200:
return response.output.text
else:
logger.error(f"通义千问调用失败: {response}")
return ""
elif llm_provider == "deepseek":
api_key = config.app.get("deepseek_api_key")
model_name = config.app.get("deepseek_model_name")
base_url = config.app.get("deepseek_base_url", "https://api.deepseek.com")
elif llm_provider == "ernie":
import qianfan
api_key = config.app.get("ernie_api_key")
secret_key = config.app.get("ernie_secret_key")
model_name = config.app.get("ernie_model_name")
chat_comp = qianfan.ChatCompletion(ak=api_key, sk=secret_key)
response = chat_comp.do(model=model_name, messages=[{"role": "user", "content": prompt}])
if response.get("result"):
return response["result"]
else:
logger.error(f"文心一言调用失败: {response}")
return ""
elif llm_provider == "siliconflow":
api_key = config.app.get("siliconflow_api_key")
model_name = config.app.get("siliconflow_model_name")
base_url = "https://api.siliconflow.cn/v1"
elif llm_provider == "pollinations":
api_key = config.app.get("pollinations_api_key")
model_name = config.app.get("pollinations_model_name")
base_url = config.app.get("pollinations_base_url", "https://pollinations.ai/api/v1")
else:
api_key = config.app.get("openai_api_key")
model_name = config.app.get("openai_model_name")
base_url = config.app.get("openai_base_url", "")
if not base_url:
base_url = "https://api.openai.com/v1"
if llm_provider == "azure":
client = AzureOpenAI(
api_key=api_key,
api_version=api_version,
azure_endpoint=base_url,
)
else:
client = OpenAI(
api_key=api_key,
base_url=base_url,
)
response = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
)
content = response.choices[0].message.content
return content
except Exception as e:
logger.error(f"生成响应时出错: {e}")
return ""
4. 配置管理扩展
4.1 配置验证
# app/config/config.py (配置验证扩展)
from typing import Dict, Any
class ConfigValidator:
"""
配置验证器
"""
@staticmethod
def validate_llm_config(config: Dict[str, Any]) -> bool:
"""
验证LLM配置
Args:
config (Dict[str, Any]): 配置字典
Returns:
bool: 验证是否通过
"""
llm_provider = config.get("llm_provider", "openai")
if llm_provider == "claude":
required_keys = ["claude_api_key"]
for key in required_keys:
if not config.get(key):
logger.error(f"Claude配置缺失: {key}")
return False
# ... 其他提供商验证 ...
return True
# 在配置加载时进行验证
def load_and_validate_config():
"""
加载并验证配置
"""
config = load_config()
if not ConfigValidator.validate_llm_config(config.app):
raise ValueError("LLM配置验证失败")
return config
4.2 动态配置支持
# app/config/dynamic_config.py
import os
from typing import Optional
class DynamicConfig:
"""
动态配置管理
"""
@staticmethod
def get_claude_config() -> dict:
"""
获取Claude配置
"""
return {
"api_key": os.getenv("CLAUDE_API_KEY") or config.app.get("claude_api_key", ""),
"model_name": os.getenv("CLAUDE_MODEL_NAME") or config.app.get("claude_model_name", "claude-3-haiku-20240307"),
"base_url": os.getenv("CLAUDE_BASE_URL") or config.app.get("claude_base_url", "https://api.anthropic.com")
}
@staticmethod
def update_claude_config(api_key: Optional[str] = None,
model_name: Optional[str] = None,
base_url: Optional[str] = None):
"""
动态更新Claude配置
Args:
api_key (Optional[str]): API密钥
model_name (Optional[str]): 模型名称
base_url (Optional[str]): 基础URL
"""
if api_key:
config.app["claude_api_key"] = api_key
if model_name:
config.app["claude_model_name"] = model_name
if base_url:
config.app["claude_base_url"] = base_url
5. 测试实现
5.1 单元测试
# test/services/test_claude_adapter.py
import unittest
from unittest.mock import Mock, patch
import sys
import os
# 添加项目根目录到Python路径
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from app.services.llm.claude_adapter import ClaudeAdapter
class TestClaudeAdapter(unittest.TestCase):
"""
Claude适配器测试
"""
def setUp(self):
"""
测试前准备
"""
self.api_key = "test-api-key"
self.model_name = "claude-3-haiku-20240307"
self.base_url = "https://api.anthropic.com"
@patch('app.services.llm.claude_adapter.anthropic.Anthropic')
def test_initialization(self, mock_anthropic):
"""
测试初始化
"""
# 创建适配器实例
adapter = ClaudeAdapter(self.api_key, self.model_name, self.base_url)
# 验证客户端是否正确初始化
mock_anthropic.assert_called_once_with(
api_key=self.api_key,
base_url=self.base_url
)
self.assertEqual(adapter.api_key, self.api_key)
self.assertEqual(adapter.model_name, self.model_name)
self.assertEqual(adapter.base_url, self.base_url)
@patch('app.services.llm.claude_adapter.anthropic.Anthropic')
def test_generate_text_success(self, mock_anthropic):
"""
测试文本生成成功
"""
# 模拟API响应
mock_response = Mock()
mock_response.content = [Mock(text="这是测试响应")]
mock_client = Mock()
mock_client.messages.create.return_value = mock_response
mock_anthropic.return_value = mock_client
# 创建适配器并测试
adapter = ClaudeAdapter(self.api_key, self.model_name, self.base_url)
result = adapter.generate_text("测试提示")
# 验证结果
self.assertEqual(result, "这是测试响应")
mock_client.messages.create.assert_called_once()
@patch('app.services.llm.claude_adapter.anthropic.Anthropic')
def test_generate_text_api_error(self, mock_anthropic):
"""
测试API错误处理
"""
# 模拟API错误
mock_client = Mock()
mock_client.messages.create.side_effect = Exception("API错误")
mock_anthropic.return_value = mock_client
# 创建适配器并测试
adapter = ClaudeAdapter(self.api_key, self.model_name, self.base_url)
result = adapter.generate_text("测试提示")
# 验证结果
self.assertIsNone(result)
@patch('app.services.llm.claude_adapter.anthropic.Anthropic')
def test_generate_with_system_prompt(self, mock_anthropic):
"""
测试带系统提示的文本生成
"""
# 模拟API响应
mock_response = Mock()
mock_response.content = [Mock(text="带系统提示的响应")]
mock_client = Mock()
mock_client.messages.create.return_value = mock_response
mock_anthropic.return_value = mock_client
# 创建适配器并测试
adapter = ClaudeAdapter(self.api_key, self.model_name, self.base_url)
result = adapter.generate_with_system_prompt(
"你是一个助手",
"你好"
)
# 验证结果
self.assertEqual(result, "带系统提示的响应")
if __name__ == '__main__':
unittest.main()
5.2 集成测试
# test/integration/test_claude_integration.py
import unittest
import os
from unittest.mock import patch
# 添加项目根目录到Python路径
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from app.services.llm import _generate_response
class TestClaudeIntegration(unittest.TestCase):
"""
Claude集成测试
"""
def setUp(self):
"""
测试前准备
"""
# 保存原始配置
self.original_provider = os.getenv('LLM_PROVIDER') or config.app.get('llm_provider')
self.original_api_key = config.app.get('claude_api_key')
# 设置测试配置
config.app['llm_provider'] = 'claude'
config.app['claude_api_key'] = 'test-key'
def tearDown(self):
"""
测试后清理
"""
# 恢复原始配置
if self.original_provider:
config.app['llm_provider'] = self.original_provider
if self.original_api_key:
config.app['claude_api_key'] = self.original_api_key
@patch('app.services.llm.ClaudeAdapter')
def test_claude_integration(self, mock_adapter):
"""
测试Claude集成
"""
# 模拟适配器行为
mock_instance = Mock()
mock_instance.generate_text.return_value = "集成测试响应"
mock_adapter.return_value = mock_instance
# 调用生成响应函数
result = _generate_response("测试提示")
# 验证调用
mock_adapter.assert_called_once()
mock_instance.generate_text.assert_called_once_with("测试提示")
self.assertEqual(result, "集成测试响应")
6. 文档更新
6.1 README更新
## 支持的LLM提供商
MoneyPrinterTurbo支持以下大语言模型提供商:
- **openai**: OpenAI官方API
- **moonshot**: 月之暗面(推荐国内用户使用)
- **azure**: Azure OpenAI服务
- **qwen**: 通义千问
- **deepseek**: DeepSeek
- **gemini**: Google Gemini
- **ollama**: 本地Ollama模型
- **g4f**: G4F免费模型
- **oneapi**: OneAPI统一接口
- **ernie**: 文心一言
- **claude**: Anthropic Claude (新增)
- **siliconflow**: 硅基流动
- **pollinations**: Pollinations AI
### Claude配置示例
```toml
[app]
llm_provider = "claude"
claude_api_key = "your_claude_api_key"
claude_model_name = "claude-3-haiku-20240307"
claude_base_url = "https://api.anthropic.com"
#### 6.2 配置文件模板更新
```toml
# config.example.toml
[app]
# ... 其他配置 ...
########## Claude API Key
# 获取API密钥: https://console.anthropic.com/settings/keys
claude_api_key = ""
# Claude API基础URL
claude_base_url = "https://api.anthropic.com"
# Claude模型名称
# 可选模型: claude-3-opus-20240229, claude-3-sonnet-20240229, claude-3-haiku-20240307
claude_model_name = "claude-3-haiku-20240307"
# ... 其他配置 ...
7. 部署和发布
7.1 依赖更新
# requirements.txt
# ... 其他依赖 ...
anthropic>=0.20.0
# ... 其他依赖 ...
7.2 Dockerfile更新
# Dockerfile
# ... 其他内容 ...
# 安装Python依赖
RUN pip install --no-cache-dir -r requirements.txt
# 如果需要特定版本的Claude SDK
RUN pip install anthropic>=0.20.0
# ... 其他内容 ...
8. 最佳实践和注意事项
8.1 错误处理最佳实践
# 错误处理示例
def robust_claude_call(prompt: str, max_retries: int = 3) -> Optional[str]:
"""
健壮的Claude调用实现
"""
for attempt in range(max_retries):
try:
adapter = ClaudeAdapter(
api_key=config.app.get("claude_api_key"),
model_name=config.app.get("claude_model_name"),
base_url=config.app.get("claude_base_url")
)
result = adapter.generate_text(prompt)
return result
except anthropic.RateLimitError:
# 处理速率限制
wait_time = 2 ** attempt
logger.warning(f"Claude速率限制,等待{wait_time}秒后重试")
time.sleep(wait_time)
except anthropic.AuthenticationError:
# 处理认证错误
logger.error("Claude认证失败,请检查API密钥")
return None
except Exception as e:
# 处理其他错误
logger.error(f"Claude调用失败 (尝试 {attempt + 1}/{max_retries}): {e}")
if attempt == max_retries - 1:
return None
time.sleep(1)
return None
8.2 性能优化建议
# 性能优化示例
class OptimizedClaudeAdapter(ClaudeAdapter):
"""
优化的Claude适配器
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._session_cache = {}
def generate_text_with_cache(self, prompt: str, cache_ttl: int = 3600) -> Optional[str]:
"""
带缓存的文本生成
"""
# 生成缓存键
cache_key = hashlib.md5(prompt.encode()).hexdigest()
# 检查缓存
if cache_key in self._session_cache:
cached_result, timestamp = self._session_cache[cache_key]
if time.time() - timestamp < cache_ttl:
logger.info("使用缓存的Claude响应")
return cached_result
# 调用API
result = self.generate_text(prompt)
# 缓存结果
if result:
self._session_cache[cache_key] = (result, time.time())
return result
总结
通过本文的详细指导,开发者可以为MoneyPrinterTurbo添加新的LLM提供商支持。整个过程包括:
- 架构分析:理解现有LLM服务的架构设计
- 准备工作:研究目标提供商API和准备环境
- 实现步骤:从配置扩展到核心代码实现
- 测试验证:编写单元测试和集成测试
- 文档更新:完善相关文档和配置模板
- 部署发布:更新依赖和部署配置
添加新LLM提供商的关键要点:
- 保持接口一致性:遵循现有适配器的设计模式
- 完善的错误处理:处理各种可能的API错误
- 配置管理:合理管理API密钥和相关配置
- 测试覆盖:确保新功能的稳定性和可靠性
- 文档完善:为用户提供清晰的使用指导
通过这种方式,MoneyPrinterTurbo可以持续支持新兴的LLM服务,为用户提供更多选择和更好的体验。
实践建议
- 逐步实现:按照本文的步骤逐步实现,不要一次性完成所有工作
- 充分测试:在不同场景下测试新添加的LLM提供商
- 性能监控:监控新提供商的性能表现和成本
- 用户反馈:收集用户对新提供商的使用反馈
- 持续维护:关注提供商API的变化并及时更新适配器
参考资料
- MoneyPrinterTurbo GitHub项目: https://github.com/harry0703/MoneyPrinterTurbo
- Anthropic Claude API文档: https://docs.anthropic.com/claude/reference
- Claude Python SDK: https://github.com/anthropics/anthropic-sdk-python
- Python单元测试文档: https://docs.python.org/3/library/unittest.html
- 设计模式 - 适配器模式: https://refactoring.guru/design-patterns/adapter
浙公网安备 33010602011771号