from openai import OpenAI
base_url = 'https://open.bigmodel.cn/api/paas/v4'
api_key = '9eb77fe2543c68240713c55742979c85.X54mEVPMMJk7WJGw'
model_name = 'glm-4.5-flash'
client = OpenAI(
base_url = base_url,
api_key = api_key
)
completion = client.chat.completions.create(
model=model_name,
messages=[
# {"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "你是谁"}
],
)
print(f'-- completion : {completion.choices[0].message.content} ' )
import litellm
import os
response = litellm.completion(
model="openai/glm-4.5-flash", # add `openai/` prefix to model so litellm knows to route to OpenAI
api_key="9eb77fe2543c68240713c55742979c85.X54mEVPMMJk7WJGw", # api key to your openai compatible endpoint
api_base="https://open.bigmodel.cn/api/paas/v4", # set API Base of your Custom OpenAI Endpoint
messages=[
{
"role": "user",
"content": "Hey, how's it going?",
}
],
)
print(response.choices[0].message.content)