Skip to content

Commit 2116aed

Browse files
authored
chore: support GPT 5.5, update doc (#4029)
1 parent 5cf1c00 commit 2116aed

8 files changed

Lines changed: 388 additions & 322 deletions

File tree

.github/ISSUE_TEMPLATE/bug_report.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ body:
2626
attributes:
2727
label: What version of camel are you using?
2828
description: Run command `python3 -c 'print(__import__("camel").__version__)'` in your shell and paste the output here.
29-
placeholder: E.g., 0.2.91a2
29+
placeholder: E.g., 0.2.91a3
3030
validations:
3131
required: true
3232

camel/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
from camel.logger import disable_logging, enable_logging, set_log_level
1616

17-
__version__ = '0.2.91a2'
17+
__version__ = '0.2.91a3'
1818

1919
__all__ = [
2020
'__version__',

camel/types/enums.py

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,11 @@ class ModelType(UnifiedModelType, Enum):
5252
GPT_5_1 = "gpt-5.1"
5353
GPT_5_2 = "gpt-5.2"
5454
GPT_5_4 = "gpt-5.4"
55+
GPT_5_4_MINI = "gpt-5.4-mini-2026-03-17"
56+
GPT_5_4_NANO = "gpt-5.4-nano-2026-03-17"
57+
GPT_5_4_PRO = "gpt-5.5-pro-2026-04-23"
58+
GPT_5_5 = "gpt-5.5"
59+
GPT_5_5_PRO = "gpt-5.5-pro-2026-04-23"
5560
GPT_5 = "gpt-5"
5661
GPT_5_MINI = "gpt-5-mini"
5762
GPT_5_NANO = "gpt-5-nano"
@@ -638,6 +643,11 @@ def is_openai(self) -> bool:
638643
ModelType.GPT_5_1,
639644
ModelType.GPT_5_2,
640645
ModelType.GPT_5_4,
646+
ModelType.GPT_5_4_MINI,
647+
ModelType.GPT_5_4_NANO,
648+
ModelType.GPT_5_4_PRO,
649+
ModelType.GPT_5_5,
650+
ModelType.GPT_5_5_PRO,
641651
}
642652

643653
@property
@@ -1652,10 +1662,10 @@ def token_limit(self) -> int:
16521662
elif self in {
16531663
ModelType.GPT_5_1,
16541664
ModelType.GPT_5_2,
1655-
ModelType.GPT_5_4,
16561665
ModelType.GPT_5_MINI,
16571666
ModelType.GPT_5_NANO,
1658-
ModelType.GPT_5,
1667+
ModelType.GPT_5_4_MINI,
1668+
ModelType.GPT_5_4_NANO,
16591669
}:
16601670
return 400_000
16611671
elif self in {
@@ -1691,11 +1701,12 @@ def token_limit(self) -> int:
16911701
ModelType.QWEN_3_CODER_PLUS,
16921702
ModelType.CLAUDE_SONNET_4_6,
16931703
ModelType.CLAUDE_OPUS_4_7,
1694-
}:
1695-
return 1_000_000
1696-
elif self in {
16971704
ModelType.DEEPSEEK_V4_FLASH,
16981705
ModelType.DEEPSEEK_V4_PRO,
1706+
ModelType.GPT_5_4,
1707+
ModelType.GPT_5_4_PRO,
1708+
ModelType.GPT_5_5,
1709+
ModelType.GPT_5_5_PRO,
16991710
}:
17001711
return 1_000_000
17011712
elif self in {

camel/types/unified_model_type.py

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919
if TYPE_CHECKING:
2020
from camel.types import ModelType
2121

22+
logger = logging.getLogger(__name__)
23+
2224

2325
class UnifiedModelType(str):
2426
r"""Class used for support both :obj:`ModelType` and :obj:`str` to be used
@@ -31,6 +33,7 @@ class UnifiedModelType(str):
3133

3234
_cache: ClassVar[Dict[str, "UnifiedModelType"]] = {}
3335
_lock: ClassVar[Lock] = Lock()
36+
_token_limit_warning_emitted: ClassVar[bool] = False
3437

3538
def __new__(cls, value: Union["ModelType", str]) -> "UnifiedModelType":
3639
if isinstance(value, Enum):
@@ -49,6 +52,19 @@ def __new__(cls, value: Union["ModelType", str]) -> "UnifiedModelType":
4952
def __init__(self, value: Union["ModelType", str]) -> None:
5053
pass
5154

55+
@classmethod
56+
def _warn_unknown_token_limit_once(cls, model_name: str) -> None:
57+
with cls._lock:
58+
if cls._token_limit_warning_emitted:
59+
return
60+
cls._token_limit_warning_emitted = True
61+
62+
logger.warning(
63+
"Unknown model '%s': context window size not defined. "
64+
"Defaulting to 999_999_999.",
65+
model_name,
66+
)
67+
5268
def __repr__(self) -> str:
5369
return super().__str__()
5470

@@ -67,11 +83,7 @@ def token_limit(self) -> int:
6783
For unknown model types not defined in ModelType enum, this returns
6884
a default value of 999_999_999 tokens.
6985
"""
70-
logging.warning(
71-
"Unknown model '%s': context window size not defined. "
72-
"Defaulting to 999_999_999.",
73-
str(self),
74-
)
86+
self._warn_unknown_token_limit_once(str(self))
7587
return 999_999_999
7688

7789
@property

docs/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
project = 'CAMEL'
2828
copyright = '2024, CAMEL-AI.org'
2929
author = 'CAMEL-AI.org'
30-
release = '0.2.91a2'
30+
release = '0.2.91a3'
3131

3232
html_favicon = (
3333
'https://raw.githubusercontent.com/camel-ai/camel/master/misc/favicon.png'

docs/key_modules/models.md

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -39,24 +39,21 @@ CAMEL supports a wide range of models, including [OpenAI’s GPT series](https:/
3939

4040
| Model Provider | Model Type(s) |
4141
| :-------------- | :------------ |
42-
| **OpenAI** | gpt-4.5-preview<br/>gpt-4o, gpt-4o-mini<br/>o1, o1-preview, o1-mini<br/>o3-mini, o3-pro, o3<br/>o4-mini<br/>gpt-4.1, gpt-4.1-mini, gpt-4.1-nano<br/>gpt-5, gpt-5-mini, gpt-5-nano<br/>gpt-4-turbo, gpt-4, gpt-3.5-turbo |
43-
| **Azure OpenAI** | gpt-4o, gpt-4-turbo<br/>gpt-4, gpt-3.5-turbo |
42+
| **OpenAI** | gpt-4.5-preview<br/>gpt-4o, gpt-4o-mini<br/>o1, o1-preview, o1-mini<br/>o3-mini, o3-pro, o3<br/>o4-mini<br/>gpt-4.1, gpt-4.1-mini-2025-04-14, gpt-4.1-nano-2025-04-14<br/>gpt-5, gpt-5-mini, gpt-5-nano<br/>gpt-5.1, gpt-5.2<br/>gpt-5.4, gpt-5.4-mini-2026-03-17, gpt-5.4-nano-2026-03-17<br/>gpt-5.5, gpt-5.5-pro-2026-04-23<br/>gpt-4-turbo, gpt-4, gpt-3.5-turbo |
43+
| **Azure OpenAI** | gpt-4.5-preview<br/>gpt-4o, gpt-4o-mini<br/>o1, o1-preview, o1-mini<br/>o3-mini, o3-pro, o3<br/>o4-mini<br/>gpt-4.1, gpt-4.1-mini-2025-04-14, gpt-4.1-nano-2025-04-14<br/>gpt-5<br/>gpt-4-turbo, gpt-4, gpt-3.5-turbo |
4444
| **Mistral AI** | mistral-large-latest, pixtral-12b-2409<br/>ministral-8b-latest, ministral-3b-latest<br/>open-mistral-nemo, codestral-latest<br/>open-mistral-7b, open-mixtral-8x7b<br/>open-mixtral-8x22b, open-codestral-mamba<br/>mistral-small-2506, mistral-medium-2508<br/>magistral-small-1.2, magistral-medium-1.2 |
4545
| **Moonshot** | kimi-k2-0905-Preview, kimi-k2.5, kimi-k2-turbo-preview<br/>kimi-k2-thinking, kimi-k2-thinking-turbo<br/>moonshot-v1-8k, moonshot-v1-32k, moonshot-v1-128k |
46-
| **Anthropic** | claude-3-7-sonnet-latest<br/>claude-sonnet-4-5, claude-opus-4-5, claude-haiku-4-5<br/>claude-sonnet-4-20250514, claude-opus-4-20250514, claude-opus-4-1-20250805 |
47-
| **Gemini** | gemini-3.1-pro-preview, gemini-3.1-flash-lite-preview<br/>gemini-3-pro-preview, gemini-3-flash-preview<br/>gemini-2.5-pro, gemini-2.5-flash<br/>gemini-2.0-flash, gemini-2.0-flash-thinking-exp<br/>gemini-2.0-flash-lite |
48-
| **Lingyiwanwu** | yi-lightning, yi-large, yi-medium<br/>yi-large-turbo, yi-vision, yi-medium-200k<br/>yi-spark, yi-large-rag, yi-large-fc |
49-
| **Qwen** | qwen3-coder-plus, qwq-32b-preview, qwq-plus, qvq-72b-preview, qwen-max, qwen-plus, qwen-turbo, qwen-long<br/>qwen-plus-latest, qwen-plus-2025-04-28, qwen-turbo-latest, qwen-turbo-2025-04-28<br/>qwen-vl-max, qwen-vl-plus, qwen-vl-72b-instruct, qwen-math-plus, qwen-math-turbo, qwen-coder-turbo<br/>qwen2.5-coder-32b-instruct, qwen2.5-72b-instruct, qwen2.5-32b-instruct, qwen2.5-14b-instruct |
50-
| **DeepSeek** | deepseek-chat<br/>deepseek-reasoner |
51-
| **CometAPI** | **All models available on [CometAPI](https://api.cometapi.com/pricing)**<br/>Including: gpt-5-chat-latest, gpt-5, gpt-5-mini, gpt-5-nano<br/>claude-opus-4-1-20250805, claude-sonnet-4-20250514, claude-3-7-sonnet-latest<br/>gemini-2.5-pro, gemini-2.5-flash, grok-4-0709, grok-3<br/>deepseek-v3.1, deepseek-v3, deepseek-r1-0528, qwen3-30b-a3b |
52-
| **Nebius** | **All models available on [Nebius AI Studio](https://studio.nebius.com/)**<br/>Including: gpt-oss-120b, gpt-oss-20b, GLM-4.5<br/>DeepSeek V3 & R1, LLaMA, Mistral, and more |
53-
| **ZhipuAI** | glm-4.7, glm-4.7-flash, glm-4.7-flashx<br/>glm-4.6, glm-4.6v, glm-4.6v-flash<br/>glm-4, glm-4v, glm-4v-flash<br/>glm-4v-plus-0111, glm-4-plus, glm-4-air<br/>glm-4-air-0111, glm-4-airx, glm-4-long<br/>glm-4-flashx, glm-4-flashx-250414<br/>glm-4-flash, glm-4-flash-250414<br/>glm-4.5-air, glm-4.5-airx, glm-4.5-flash<br/>glm-4.1v-thinking-flash, glm-4.1v-thinking-flashx<br/>glm-zero-preview, glm-3-turbo |
46+
| **Anthropic** | claude-3-7-sonnet-latest<br/>claude-sonnet-4-5, claude-opus-4-5, claude-haiku-4-5<br/>claude-sonnet-4-6, claude-opus-4-6, claude-opus-4-7<br/>claude-sonnet-4-20250514, claude-opus-4-20250514, claude-opus-4-1-20250805 |
47+
| **Gemini** | gemini-3.1-pro-preview, gemini-3.1-flash-lite-preview<br/>gemini-3-pro-preview, gemini-3-flash-preview<br/>gemini-2.5-pro, gemini-2.5-flash<br/>gemini-2.0-flash, gemini-2.0-flash-exp<br/>gemini-2.0-flash-thinking-exp, gemini-2.0-pro-exp-02-05<br/>gemini-2.0-flash-lite, gemini-2.0-flash-lite-preview-02-05 |
48+
| **Qwen** | qwen3-coder-plus<br/>qwen-max, qwen-plus, qwen-turbo, qwen-long<br/>qwen-plus-latest, qwen-plus-2025-04-28, qwen-turbo-latest, qwen-turbo-2025-04-28<br/>qwen-vl-max, qwen-vl-plus, qwen2.5-vl-72b-instruct<br/>qwen-math-plus, qwen-math-turbo, qwen-coder-turbo<br/>qwen2.5-coder-32b-instruct, qwen2.5-72b-instruct, qwen2.5-32b-instruct, qwen2.5-14b-instruct<br/>qwq-32b-preview, qwq-plus, qvq-72b-preview |
49+
| **DeepSeek** | deepseek-v4-flash, deepseek-v4-pro<br/>deepseek-chat, deepseek-reasoner |
50+
| **ZhipuAI** | glm-5, glm-4.7, glm-4.7-flash, glm-4.7-flashx<br/>glm-4.6, glm-4.6v, glm-4.6v-flash, glm-4.6v-flashx<br/>glm-4.5v, glm-4.5-air, glm-4.5-airx, glm-4.5-flash<br/>glm-4, glm-4v, glm-4v-flash<br/>glm-4v-plus-0111, glm-4-plus, glm-4-air<br/>glm-4-air-0111, glm-4-airx, glm-4-long<br/>glm-4-flashx, glm-4-flashx-250414<br/>glm-4-flash, glm-4-flash-250414<br/>glm-4.1v-thinking-flash, glm-4.1v-thinking-flashx<br/>glm-zero-preview, glm-3-turbo |
5451
| **InternLM** | internlm3-latest, internlm3-8b-instruct<br/>internlm2.5-latest, internlm2-pro-chat |
5552
| **Reka** | reka-core, reka-flash, reka-edge |
5653
| **COHERE** | command-r-plus, command-r, command-light, command, command-nightly |
57-
| **ERNIE** | ernie-x1-turbo-32k, ernie-x1-32k, ernie-x1-32k-preview<br/>ernie-4.5-turbo-128k, ernie-4.5-turbo-32k<br/>deepseek-v3, deepseek-r1, qwen3-235b-a22b |
54+
| **ERNIE** | ernie-x1-turbo-32k, ernie-x1-32k, ernie-x1-32k-preview<br/>ernie-4.5-turbo-128k, ernie-4.5-turbo-32k<br/>ernie-5.0-thinking-latest, ernie-4.5-turbo-vl-latest<br/>deepseek-v3, deepseek-r1, qwen3-235b-a22b |
5855
| **MiniMax** | MiniMax-M2.7, MiniMax-M2.7-highspeed, MiniMax-M2.5<br/>MiniMax-M2.1, MiniMax-M2.1-lightning, MiniMax-M2 |
59-
| **AtlasCloud** | openai/gpt-oss-120b, zai-org/glm-4-7 |
56+
| **xAI** | Grok models via the xAI SDK |
6057

6158

6259
### API & Connector Platforms
@@ -67,9 +64,12 @@ CAMEL supports a wide range of models, including [OpenAI’s GPT series](https:/
6764
| **TOGETHER AI** | [supported models](https://docs.together.ai/docs/dedicated-models) |
6865
| **SambaNova** | [supported models](https://docs.sambanova.ai/cloud/docs/get-started/supported-models) |
6966
| **Ollama** | [supported models](https://ollama.com/library) |
67+
| **CometAPI** | [supported models](https://api.cometapi.com/pricing) |
68+
| **Nebius** | [supported models](https://studio.nebius.com/) |
7069
| **OpenRouter** | [supported models](https://openrouter.ai/models) |
7170
| **PPIO** | [supported models](https://ppio.com/model-api/console) |
7271
| **LiteLLM** | [supported models](https://docs.litellm.ai/docs/providers) |
72+
| **OpenAI Compatible** | custom OpenAI-compatible endpoints via `OPENAI_COMPATIBLE_MODEL` |
7373
| **LMStudio** | [supported models](https://lmstudio.ai/models) |
7474
| **vLLM** | [supported models](https://docs.vllm.ai/en/latest/models/supported_models.html) |
7575
| **SGLANG** | [supported models](https://docs.sglang.ai/supported_models/generative_models.html ) |
@@ -85,8 +85,8 @@ CAMEL supports a wide range of models, including [OpenAI’s GPT series](https:/
8585
| **AMD** | dvue-aoai-001-gpt-4.1 |
8686
| **Volcano** | [supported models](https://console.volcengine.com/ark) |
8787
| **Qianfan** | [supported models](https://cloud.baidu.com/doc/qianfan/s/rmh4stp0j) |
88-
89-
88+
| **AIHubMix** | [supported models](https://aihubmix.com/) |
89+
| **AtlasCloud** | openai/gpt-oss-120b, zai-org/glm-4-7 |
9090

9191
## How to Use Models via API Calls
9292

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
44

55
[project]
66
name = "camel-ai"
7-
version = "0.2.91a2"
7+
version = "0.2.91a3"
88
description = "Communicative Agents for AI Society Study"
99
authors = [{ name = "CAMEL-AI.org" }]
1010
requires-python = ">=3.10,<3.15"

0 commit comments

Comments
 (0)