Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 30 additions & 13 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,24 @@
from camel.utils.context_utils import ContextUtility
from camel.utils.tool_result import ToolResult

def _is_rate_limit_error(error: Exception) -> bool:
"""Check if an error is a rate limit error from any LLM provider.

Handles OpenAI, Anthropic, Google, and other providers that signal
rate limiting via HTTP 429 status codes or error messages.
"""
if isinstance(error, RateLimitError):
return True
# Only check status_code (HTTP-specific), not generic 'code' which
# may be an application error code unrelated to rate limiting.
status_code = getattr(error, 'status_code', None)
if status_code == 429:
return True
error_msg = str(error).lower()
if 'rate limit' in error_msg or 'too many requests' in error_msg:
return True
return False

if TYPE_CHECKING:
from camel.terminators import ResponseTerminator

Expand Down Expand Up @@ -3592,7 +3610,12 @@ def _get_model_response(
)
if response:
break
except RateLimitError as e:
except Exception as e:
if not _is_rate_limit_error(e):
logger.error(
f"Model error: {self.model_backend.model_type}",
)
raise
last_error = e
if attempt < self.retry_attempts - 1:
delay = min(self.retry_delay * (2**attempt), 60.0)
Expand All @@ -3607,11 +3630,6 @@ def _get_model_response(
f"Rate limit exhausted after "
f"{self.retry_attempts} attempts"
)
except Exception:
logger.error(
f"Model error: {self.model_backend.model_type}",
)
raise
else:
# Loop completed without success
raise ModelProcessingError(
Expand Down Expand Up @@ -3654,7 +3672,12 @@ async def _aget_model_response(
)
if response:
break
except RateLimitError as e:
except Exception as e:
if not _is_rate_limit_error(e):
logger.error(
f"Model error: {self.model_backend.model_type}",
)
raise
last_error = e
if attempt < self.retry_attempts - 1:
delay = min(self.retry_delay * (2**attempt), 60.0)
Expand All @@ -3670,12 +3693,6 @@ async def _aget_model_response(
f"Rate limit exhausted after "
f"{self.retry_attempts} attempts"
)
except Exception:
logger.error(
f"Model error: {self.model_backend.model_type}",
exc_info=True,
)
raise
else:
# Loop completed without success
raise ModelProcessingError(
Expand Down
Loading