From eec6114f48ab287293c6e3dfcdcf36b1ede5bf36 Mon Sep 17 00:00:00 2001 From: DanielRyanSmith <56164590+DanielRyanSmith@users.noreply.github.com> Date: Fri, 10 Apr 2026 17:49:20 +0000 Subject: [PATCH] fix: add required max_tokens argument to Anthropic client The Anthropic SDK requires the `max_tokens` argument when creating messages with Claude models. This commit adds a `DEFAULT_MAX_TOKENS` class constant to `AnthropicClient` and includes it in the kwargs for `generate_content`. Tests have been updated to reflect this change. --- tests/test_anthropic.py | 1 + wptgen/llm.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/tests/test_anthropic.py b/tests/test_anthropic.py index c755707..6e4466c 100644 --- a/tests/test_anthropic.py +++ b/tests/test_anthropic.py @@ -87,6 +87,7 @@ def test_anthropic_generate_content( # Verify the internal SDK method was called with the correct model and prompt mock_instance.messages.create.assert_called_once_with( model="claude-3-7-sonnet-20250219", + max_tokens=AnthropicClient.DEFAULT_MAX_TOKENS, messages=[{"role": "user", "content": "Test prompt"}], system="Test instruction", temperature=0.7, diff --git a/wptgen/llm.py b/wptgen/llm.py index b9cb4d2..88fde5b 100644 --- a/wptgen/llm.py +++ b/wptgen/llm.py @@ -328,6 +328,8 @@ def prompt_exceeds_input_token_limit( class AnthropicClient(LLMClient): """Client for interacting with Anthropic's API.""" + DEFAULT_MAX_TOKENS = 8192 + def __init__( self, api_key: str, @@ -378,6 +380,7 @@ def generate_content( target_model = model or self.model kwargs: dict[str, Any] = { "model": target_model, + "max_tokens": self.DEFAULT_MAX_TOKENS, "messages": [{"role": "user", "content": prompt}], }