LLM免费 API token provider
豆包推荐
zhipu
https://www.bigmodel.cn/pricing
Flash 免费系列全面覆盖语言模型、视觉理解、图像生成及视频生成等多种模型。可以准确理解各任务场景语言描述及指令,更精确的完成多模态理解类任务,或生成高质量的图片、视频等多模态内容。
模型 简介 单价 Batch API 定价 GLM-4-Flash 语言模型 免费 免费 GLM-4V-Flash 图像理解 免费 不支持 CogView-3-Flash 图像生成 免费 不支持 CogVideoX-Flash 视频生成 免费 不支持
测试代码
https://python.langchain.com/docs/integrations/chat/zhipuai/
https://github.com/arcstep/langchain_zhipuai
from langchain_zhipu import ChatZhipuAI from langchain_core.prompts import ChatPromptTemplate import os os.environ["ZHIPUAI_API_KEY"] = "“ llm4v = ChatZhipuAI(model="glm-4v-flash") prompt = ChatPromptTemplate.from_messages([ ("human", [ { "type": "text", "text": "图里有什么" }, { "type": "image_url", "image_url": { "url" : "https://img1.baidu.com/it/u=1369931113,3388870256&fm=253&app=138&size=w931&n=0&f=JPEG&fmt=auto?sec=1703696400&t=f3028c7a1dca43a080aeb8239f09cc2f" } } ]), ]) ret = (prompt|llm4v).invoke({}) print(ret)
siliconflow
https://siliconflow.cn/zh-cn/siliconcloud
https://siliconflow.cn/zh-cn/models
https://blog.csdn.net/u012899618/article/details/145620482#:~:text=%E5%9C%A8%E6%9C%AC%E6%96%87%E4%B8%AD%EF%BC%8C%E6%88%91%E4%BB%AC%E5%B0%86%E4%BB%8B%E7%BB%8D%E5%A6%82%E4%BD%95%E4%BD%BF%E7%94%A8%20LangChain%20%E6%A1%86%E6%9E%B6%E5%B0%86%E7%A1%85%E5%9F%BA%E6%B5%81%E5%8A%A8%EF%BC%88SiliconFlow%EF%BC%89%E7%9A%84%20API%20%E9%9B%86%E6%88%90%E5%88%B0%E5%BA%94%E7%94%A8%E4%B8%AD%EF%BC%8C%E5%AE%9E%E7%8E%B0%E4%B8%80%E4%B8%AA%E5%9F%BA%E4%BA%8E%20LLM%20%E7%9A%84%E5%AF%B9%E8%AF%9D%E7%B3%BB%E7%BB%9F%E3%80%82%20%E4%BB%80%E4%B9%88%E6%98%AF%E7%A1%85%E5%9F%BA%E6%B5%81%E5%8A%A8%EF%BC%88SiliconFlow%EF%BC%89%EF%BC%9F,%E8%AF%AD%E8%A8%80%E6%A8%A1%E5%9E%8B%20%EF%BC%88%E5%A6%82%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E6%A8%A1%E5%9E%8B%EF%BC%89%E6%8F%90%E4%BE%9B%E8%87%AA%E7%84%B6%E8%AF%AD%E8%A8%80%E5%A4%84%E7%90%86%E8%83%BD%E5%8A%9B%E7%9A%84%20API%20%E6%9C%8D%E5%8A%A1%E3%80%82%20%E5%AE%83%E6%94%AF%E6%8C%81%E5%A4%9A%E7%A7%8D%E7%B1%BB%E5%9E%8B%E7%9A%84%E6%A8%A1%E5%9E%8B%EF%BC%8C%E5%8F%AF%E4%BB%A5%E9%80%9A%E8%BF%87%20REST%20API%20%E6%8E%A5%E5%8F%A3%E4%B8%8E%E5%85%B6%E8%BF%9B%E8%A1%8C%E4%BA%A4%E4%BA%92%EF%BC%8C%E8%BF%9B%E8%A1%8C%E8%87%AA%E7%84%B6%E8%AF%AD%E8%A8%80%E7%94%9F%E6%88%90%E3%80%81%E9%97%AE%E7%AD%94%E3%80%81%E6%96%87%E6%9C%AC%E5%A4%84%E7%90%86%E7%AD%89%E4%BB%BB%E5%8A%A1%E3%80%82
from langchain.llms.base import LLM from langchain_community.llms.utils import enforce_stop_tokens import requests import os # 设置API密钥和基础URL环境变量 API_KEY = os.getenv("CUSTOM_API_KEY", "sk-xxx") BASE_URL = "https://api.siliconflow.cn/v1/chat/completions" class SiliconFlow(LLM): def __init__(self): super().__init__() @property def _llm_type(self) -> str: return "siliconflow" def siliconflow_completions(self, model: str, prompt: str) -> str: payload = { "model": model, "messages": [{"role": "user", "content": prompt}], "stream": False } headers = { "accept": "application/json", "content-type": "application/json", "authorization": f"Bearer {API_KEY}" } response = requests.post(BASE_URL, json=payload, headers=headers) response.raise_for_status() return response.json()["choices"][0]["message"]["content"] def _call(self, prompt: str, stop: list = None, model: str = "default-model") -> str: response = self.siliconflow_completions(model=model, prompt=prompt) if stop is not None: response = enforce_stop_tokens(response, stop) return response if __name__ == "__main__": llm = SiliconFlow() response = llm._call(prompt="你是谁?", model="deepseek-ai/DeepSeek-V2.5") print(response)
https://python.langchain.com.cn/docs/modules/model_io/models/llms/how_to/custom_llm
from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM class CustomLLM(LLM): n: int @property def _llm_type(self) -> str: return "custom" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: if stop is not None: raise ValueError("stop kwargs are not permitted.") return prompt[: self.n] @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"n": self.n}
https://blog.csdn.net/sinat_29950703/article/details/143770491
from langchain.chat_models import ChatOpenAI llm = ChatOpenAI( openai_api_base="https://api.siliconflow.cn/v1", # 硅基流动的url openai_api_key="sk-pkyanizrjkpbawsbxuoqwwusndzwwojwmapmdjwpkwmwe", # 自己的api-key model = "Pro/THUDM/chatglm3-6b" # 启用模型 ) res = llm.predict("hello,你是谁?") print(res)
Gemini
需要VPN
Groq
需要VPN
类似硅基流动