diff --git a/README.md b/README.md index 7791b46..6a4480f 100644 --- a/README.md +++ b/README.md @@ -11,10 +11,9 @@ ## ✨ Core Features ### 🤖 **Chat Completions** -- **Standard Chat**: Create chat completions with various models including `glm-4.7` +- **Standard Chat**: Create chat completions with various models including `glm-5.1` - **Streaming Support**: Real-time streaming responses for interactive applications - **Tool Calling**: Function calling capabilities for enhanced AI interactions -- **Character Role-Playing**: Support for character-based conversations with `charglm-3` model - **Multimodal Chat**: Image understanding capabilities with vision models ### 🧠 **Embeddings** @@ -105,7 +104,7 @@ client = ZhipuAiClient(api_key="your-api-key") # Create chat completion response = client.chat.completions.create( - model="glm-5", + model="glm-5.1", messages=[ {"role": "user", "content": "Hello, Z.ai!"} ] @@ -169,7 +168,7 @@ client = ZaiClient(api_key="your-api-key") # Create chat completion response = client.chat.completions.create( - model='glm-4.7', + model='glm-5.1', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Tell me a story about AI.'}, @@ -192,7 +191,7 @@ client = ZaiClient(api_key="your-api-key") # Create chat completion response = client.chat.completions.create( - model='glm-4.7', + model='glm-5.1', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'What is artificial intelligence?'}, @@ -228,7 +227,7 @@ client = ZaiClient(api_key="your-api-key") base64_image = encode_image('examples/test_multi_modal.jpeg') response = client.chat.completions.create( - model='glm-4.6v', + model='glm-5v-turbo', messages=[ { 'role': 'user', @@ -279,7 +278,7 @@ client = ZaiClient(api_key="your-api-key") try: response = client.chat.completions.create( - model="glm-5", + model="glm-5.1", messages=[ {"role": "user", "content": "Hello, Z.ai!"} ] diff --git a/README_CN.md b/README_CN.md index 4cbd0a5..1d953cd 100644 --- a/README_CN.md +++ b/README_CN.md @@ -11,10 +11,9 @@ ## ✨ 核心功能 ### 🤖 **对话补全** -- **标准对话**: 支持 `glm-4.7` 等多种模型的对话补全 +- **标准对话**: 支持 `glm-5.1` 等多种模型的对话补全 - **流式支持**: 实时流式响应,适用于交互式应用 - **工具调用**: 函数调用能力,增强 AI 交互体验 -- **角色扮演**: 支持基于 `charglm-3` 模型的角色对话 - **多模态对话**: 支持图像理解的视觉模型 ### 🧠 **向量嵌入** @@ -107,7 +106,7 @@ client = ZhipuAiClient(api_key="your-api-key") # Create chat completion response = client.chat.completions.create( - model="glm-5", + model="glm-5.1", messages=[ {"role": "user", "content": "Hello, Z.ai!"} ] @@ -175,7 +174,7 @@ client = ZaiClient(api_key="your-api-key") # 创建对话 response = client.chat.completions.create( - model='glm-4.6', + model='glm-5.1', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Tell me a story about AI.'}, @@ -198,7 +197,7 @@ client = ZaiClient(api_key="your-api-key") # 创建对话 response = client.chat.completions.create( - model='glm-4.6', + model='glm-5.1', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'What is artificial intelligence?'}, @@ -234,7 +233,7 @@ client = ZaiClient(api_key="your-api-key") base64_image = encode_image('examples/test_multi_modal.jpeg') response = client.chat.completions.create( - model='glm-4.6v', + model='glm-5v-turbo', messages=[ { 'role': 'user', @@ -286,7 +285,7 @@ client = ZaiClient(api_key="your-api-key") # 请填写您自己的APIKey try: response = client.chat.completions.create( - model="glm-5", + model="glm-5.1", messages=[ {"role": "user", "content": "你好, Z.ai !"} ] diff --git a/Release-Note.md b/Release-Note.md index cefed25..bb28a8e 100644 --- a/Release-Note.md +++ b/Release-Note.md @@ -104,7 +104,7 @@ This release brings significant improvements to the SDK structure, comprehensive #### 📖 **New Example Files** - **`examples/video_models_examples.py`**: Complete guide for all video generation models - **`examples/agent_examples.py`**: Agent invocation patterns and best practices -- **`examples/glm4_example.py`**: GLM-4 model usage in all modes (sync, async, streaming) +- **`examples/glm_example.py`**: GLM-4 model usage in all modes (sync, async, streaming) - **`examples/web_search_example.py`**: Web search integration and configuration - **`examples/video_generator.py`**: Enhanced async video generation (updated) diff --git a/examples/basic_usage.py b/examples/basic_usage.py index cce0689..d774363 100644 --- a/examples/basic_usage.py +++ b/examples/basic_usage.py @@ -6,7 +6,7 @@ def completion(): # Create chat completion response = client.chat.completions.create( - model='glm-5', + model='glm-5.1', messages=[{'role': 'user', 'content': 'Hello, Z.ai!'}], temperature=1.0, ) @@ -19,7 +19,7 @@ def completion_with_stream(): # Create chat completion response = client.chat.completions.create( - model='glm-5', + model='glm-5.1', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Tell me a story about AI.'}, @@ -38,7 +38,7 @@ def completion_with_websearch(): # Create chat completion response = client.chat.completions.create( - model='glm-5', + model='glm-5.1', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'What is artificial intelligence?'}, @@ -66,7 +66,7 @@ def completion_with_mcp_server_url(): # Create chat completion with MCP server URL response = client.chat.completions.create( - model='glm-5', + model='glm-5.1', stream=False, messages=[{'role': 'user', 'content': 'Hello, please introduce GPT?'}], tools=[ @@ -95,7 +95,7 @@ def completion_with_mcp_server_label(): # Create chat completion with MCP server label response = client.chat.completions.create( - model='glm-5', + model='glm-5.1', stream=False, messages=[{'role': 'user', 'content': 'Hello, please introduce GPT?'}], tools=[ @@ -217,7 +217,7 @@ def ofZai(): client = ZaiClient() print(client.base_url) response = client.chat.completions.create( - model='glm-5', + model='glm-5.1', messages=[{'role': 'user', 'content': 'Hello, Z.ai!'}], temperature=0.7, ) @@ -227,7 +227,7 @@ def ofZhipu(): client = ZhipuAiClient() print(client.base_url) response = client.chat.completions.create( - model='glm-5', + model='glm-5.1', messages=[{'role': 'user', 'content': 'Hello, Z.ai!'}], temperature=0.7, ) diff --git a/examples/function_call_example.py b/examples/function_call_example.py index 8e661e3..ce83491 100644 --- a/examples/function_call_example.py +++ b/examples/function_call_example.py @@ -36,7 +36,7 @@ def parse_function_call(model_response, messages): "tool_call_id": tool_call.id }) response = client.chat.completions.create( - model="glm-5", # Specify the model name to use + model="glm-5.1", # Specify the model name to use messages=messages, tools=tools, ) @@ -99,7 +99,7 @@ def parse_function_call(model_response, messages): messages.append({"role": "user", "content": "Help me check the flights from Beijing to Guangzhou on January 23."}) response = client.chat.completions.create( - model="glm-5", # Specify the model name to use + model="glm-5.1", # Specify the model name to use messages=messages, tools=tools, ) @@ -110,7 +110,7 @@ def parse_function_call(model_response, messages): messages.append({"role": "user", "content": "What is the price of flight 8321?"}) response = client.chat.completions.create( - model="glm-5", # Specify the model name to use + model="glm-5.1", # Specify the model name to use messages=messages, tools=tools, ) diff --git a/examples/glm4_example.py b/examples/glm_example.py similarity index 97% rename from examples/glm4_example.py rename to examples/glm_example.py index cf16278..59f7b99 100644 --- a/examples/glm4_example.py +++ b/examples/glm_example.py @@ -23,7 +23,7 @@ def stream_web_search_example(): }] client = ZaiClient() response = client.chat.completions.create( - model="glm-4.7", + model="glm-5.1", messages=messages, tools=tools, stream=True @@ -35,7 +35,7 @@ def sync_example(): print("=== GLM-4 Synchronous Example ===") client = ZaiClient() response = client.chat.completions.create( - model="glm-4.7", + model="glm-5.1", messages=[ {"role": "system", "content": "You are a helpful assistant who provides professional, accurate, and insightful advice."}, {"role": "user", "content": "I'm very interested in the planets of the solar system, especially Saturn. Please provide basic information about Saturn, including its size, composition, ring system, and any unique astronomical phenomena."}, @@ -47,7 +47,7 @@ def async_example(): print("=== GLM-4 Async Example ===") client = ZaiClient() response = client.chat.asyncCompletions.create( - model="glm-4.7", + model="glm-5.1", messages=[ { "role": "user", diff --git a/examples/stream_tools.py b/examples/stream_tools.py index 1906f8a..b821969 100644 --- a/examples/stream_tools.py +++ b/examples/stream_tools.py @@ -4,7 +4,7 @@ def main(): client = ZhipuAiClient() # create chat completion with tool calls and streaming response = client.chat.completions.create( - model="glm-5", + model="glm-5.1", messages=[ {"role": "user", "content": "How is the weather in Beijing and Shanghai? Please provide the answer in Celsius."}, ],