diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 0e092b64cd..276405d736 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -92,6 +92,7 @@ from camel.types import ( ChatCompletion, ChatCompletionChunk, + ChatCompletionMessageFunctionToolCall, ModelPlatformType, ModelType, OpenAIBackendRole, @@ -2559,123 +2560,200 @@ def _try_format_message( except ValidationError: return False - def _check_tools_strict_compatibility(self) -> bool: - r"""Check if all tools are compatible with OpenAI strict mode. + @staticmethod + def _collect_tool_calls_from_completion( + tool_calls: List[ChatCompletionMessageFunctionToolCall], + accumulated_tool_calls: Dict[str, Any], + ) -> None: + r"""Convert tool calls from a ChatCompletion into the accumulated + tool-call dictionary format used by the streaming pipeline. - Returns: - bool: True if all tools are strict mode compatible, - False otherwise. + Args: + tool_calls (List[ChatCompletionMessageFunctionToolCall]): Tool + call objects from + ``completion.choices[0].message.tool_calls``. + accumulated_tool_calls: Mutable dict that will be populated with + the converted entries. """ - tool_schemas = self._get_full_tool_schemas() - for schema in tool_schemas: - if not schema.get("function", {}).get("strict", True): - return False - return True - - def _convert_response_format_to_prompt( - self, response_format: Type[BaseModel] - ) -> str: - r"""Convert a Pydantic response format to a prompt instruction. + for tc in tool_calls: + accumulated_tool_calls[tc.id] = { + 'id': tc.id, + 'function': { + 'name': tc.function.name, + 'arguments': tc.function.arguments, + }, + 'complete': True, + } + + def _record_and_build_display_message( + self, + final_content: str, + parsed_object: Optional[Union[BaseModel, Dict[str, Any]]], + final_reasoning: Optional[str], + response_format: Optional[Type[BaseModel]], + ) -> BaseMessage: + r"""Record the full message to memory and build a display message. + + In delta mode the display message has empty content because all + content was already yielded incrementally. In accumulate mode the + display message carries the full content. Args: - response_format (Type[BaseModel]): The Pydantic model class. + final_content (str): The full final content string. + parsed_object: The parsed object from structured output stream. + final_reasoning: The reasoning content, if any. + response_format: The (possibly modified) response format. Returns: - str: A prompt instruction requesting the specific format. + BaseMessage: The display message to yield to the caller. """ - try: - # Get the JSON schema from the Pydantic model - schema = response_format.model_json_schema() + # Record full content to memory + record_msg = BaseMessage( + role_name=self.role_name, + role_type=self.role_type, + meta_dict={}, + content=final_content, + parsed=parsed_object, + reasoning_content=final_reasoning, + ) + if response_format: + self._try_format_message(record_msg, response_format) + self.record_message(record_msg) + + # Build display message (empty content in delta mode) + display_content = final_content if self.stream_accumulate else "" + display_reasoning = final_reasoning if self.stream_accumulate else None + display_msg = BaseMessage( + role_name=self.role_name, + role_type=self.role_type, + meta_dict={}, + content=display_content, + parsed=record_msg.parsed, + reasoning_content=display_reasoning, + ) + return display_msg - # Create a prompt based on the schema - format_instruction = ( - "\n\nPlease respond in the following JSON format:\n{\n" - ) + def _handle_structured_stream_tool_iteration( + self, + final_completion: Any, + accumulated_tool_calls: Dict[str, Any], + tool_call_records: List[ToolCallingRecord], + request_token_usage: Dict[str, int], + step_token_usage: Dict[str, int], + iteration_count: int, + content_accumulator: 'StreamContentAccumulator', + ) -> Tuple[ + bool, + Optional[Tuple[List[OpenAIMessage], int]], + Optional['ChatAgentResponse'], + ]: + r"""Handle post-tool-execution logic for structured output streaming. - properties = schema.get("properties", {}) - for field_name, field_info in properties.items(): - field_type = field_info.get("type", "string") - description = field_info.get("description", "") + Updates token usage, clears accumulated tool calls, and determines + whether the streaming loop should continue or terminate. - if field_type == "array": - format_instruction += ( - f' "{field_name}": ["array of values"]' - ) - elif field_type == "object": - format_instruction += f' "{field_name}": {{"object"}}' - elif field_type == "boolean": - format_instruction += f' "{field_name}": true' - elif field_type == "number": - format_instruction += f' "{field_name}": 0' - else: - format_instruction += f' "{field_name}": "string value"' + Args: + final_completion: The final completion object from the model. + accumulated_tool_calls: Mutable dict of accumulated tool calls, + cleared in-place before returning. + tool_call_records: List of executed tool call records. + request_token_usage: Per-request token usage dict to update. + step_token_usage: Cumulative step token usage dict to update. + iteration_count: Current loop iteration number. + content_accumulator: Accumulator used to reset streaming content + when continuing to the next iteration. - if description: - format_instruction += f' // {description}' + Returns: + A tuple of ``(should_continue, new_context, error_response)``: + - ``should_continue=True``: caller should continue the loop with + the returned ``(openai_messages, num_tokens)`` context. + - ``should_continue=False, error_response=None``: caller should + break the loop. + - ``should_continue=False, error_response!=None``: caller should + yield the error response and return. + """ + if tool_call_records: + logger.info("Sending back result to model") - # Add comma if not the last item - if field_name != list(properties.keys())[-1]: - format_instruction += "," - format_instruction += "\n" + if final_completion.usage: + request_usage = safe_model_dump(final_completion.usage) + self._update_token_usage_tracker( + request_token_usage, request_usage + ) + self._update_token_usage_tracker(step_token_usage, request_usage) - format_instruction += "}" - return format_instruction + accumulated_tool_calls.clear() - except Exception as e: - logger.warning( - f"Failed to convert response_format to prompt: {e}. " - f"Using generic format instruction." - ) - return ( - "\n\nPlease respond in a structured JSON format " - "that matches the requested schema." - ) + if tool_call_records and ( + self.max_iteration is None or iteration_count < self.max_iteration + ): + try: + new_context = self.memory.get_context() + except RuntimeError as e: + return ( + False, + None, + self._step_terminate( + e.args[1], tool_call_records, "max_tokens_exceeded" + ), + ) + content_accumulator.reset_streaming_content() + return True, new_context, None + + return False, None, None - def _handle_response_format_with_non_strict_tools( + def _build_structured_completion_response( self, - input_message: Union[BaseMessage, str], - response_format: Optional[Type[BaseModel]] = None, - ) -> Tuple[Union[BaseMessage, str], Optional[Type[BaseModel]], bool]: - r"""Handle response format when tools are not strict mode compatible. + final_completion: Any, + final_content: str, + parsed_object: Optional[Union[BaseModel, Dict[str, Any]]], + final_reasoning: Optional[str], + response_format: Optional[Type[BaseModel]], + tool_call_records: List[ToolCallingRecord], + step_token_usage: Dict[str, int], + ) -> 'ChatAgentResponse': + r"""Build the final ChatAgentResponse for a structured output stream. Args: - input_message: The original input message. - response_format: The requested response format. + final_completion: The final completion object from the model. + final_content: The full final text content. + parsed_object: The parsed Pydantic object, if any. + final_reasoning: The reasoning content, if any. + response_format: The response format class used for parsing. + tool_call_records: List of executed tool call records. + step_token_usage: Cumulative step token usage (already updated + before this call) used for ``info["usage"]``. Returns: - Tuple: (modified_message, modified_response_format, - used_prompt_formatting) + ChatAgentResponse: The final response to yield to the caller. """ - if response_format is None: - return input_message, response_format, False - - # Check if tools are strict mode compatible - if self._check_tools_strict_compatibility(): - return input_message, response_format, False - - # Tools are not strict compatible, convert to prompt - logger.info( - "Non-strict tools detected. Converting response_format to " - "prompt-based formatting." + final_message = self._record_and_build_display_message( + final_content, + parsed_object, + final_reasoning, + response_format, ) - - format_prompt = self._convert_response_format_to_prompt( - response_format + return ChatAgentResponse( + msgs=[final_message], + terminated=False, + info={ + "id": final_completion.id or "", + "usage": step_token_usage.copy(), + "finish_reasons": [ + choice.finish_reason or "stop" + for choice in final_completion.choices + ], + "num_tokens": self._get_token_count(final_content), + "tool_calls": tool_call_records, + "external_tool_requests": None, + "streaming": False, + "partial": False, + "stream_accumulate_mode": "accumulate" + if self.stream_accumulate + else "delta", + }, ) - # Modify the message to include format instruction - modified_message: Union[BaseMessage, str] - if isinstance(input_message, str): - modified_message = input_message + format_prompt - else: - modified_message = input_message.create_new_instance( - input_message.content + format_prompt - ) - - # Return None for response_format to avoid strict mode conflicts - # and True to indicate we used prompt formatting - return modified_message, None, True - def _is_called_from_registered_toolkit(self) -> bool: r"""Check if current step/astep call originates from a RegisteredAgentToolkit. @@ -2703,66 +2781,6 @@ def _is_called_from_registered_toolkit(self) -> bool: return False - def _apply_prompt_based_parsing( - self, - response: ModelResponse, - original_response_format: Type[BaseModel], - ) -> None: - r"""Apply manual parsing when using prompt-based formatting. - - Args: - response: The model response to parse. - original_response_format: The original response format class. - """ - for message in response.output_messages: - if message.content: - try: - # Try to extract JSON from the response content - import json - - from pydantic import ValidationError - - # Try to find JSON in the content - content = message.content.strip() - - # Try direct parsing first - try: - parsed_json = json.loads(content) - message.parsed = ( - original_response_format.model_validate( - parsed_json - ) - ) - continue - except (json.JSONDecodeError, ValidationError): - pass - - # Try to extract JSON from text - json_pattern = r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}' - json_matches = re.findall(json_pattern, content, re.DOTALL) - - for json_str in json_matches: - try: - parsed_json = json.loads(json_str) - message.parsed = ( - original_response_format.model_validate( - parsed_json - ) - ) - # Update content to just the JSON for consistency - message.content = json.dumps(parsed_json) - break - except (json.JSONDecodeError, ValidationError): - continue - - if not message.parsed: - logger.warning( - f"Failed to parse JSON from response: {content}" - ) - - except Exception as e: - logger.warning(f"Error during prompt-based parsing: {e}") - def _format_response_if_needed( self, response: ModelResponse, @@ -2901,14 +2919,6 @@ def _step_impl( # use disable_tools = self._is_called_from_registered_toolkit() - # Handle response format compatibility with non-strict tools - original_response_format = response_format - input_message, response_format, used_prompt_formatting = ( - self._handle_response_format_with_non_strict_tools( - input_message, response_format - ) - ) - # Convert input message to BaseMessage if necessary if isinstance(input_message, str): input_message = BaseMessage.make_user_message( @@ -3091,12 +3101,6 @@ def _step_impl( self._format_response_if_needed(response, response_format) - # Apply manual parsing if we used prompt-based formatting - if used_prompt_formatting and original_response_format: - self._apply_prompt_based_parsing( - response, original_response_format - ) - # Only record final output if we haven't already recorded tool calls # for this response (to avoid duplicate assistant messages) if not recorded_tool_calls: @@ -3210,14 +3214,6 @@ async def _astep_non_streaming_task( # use disable_tools = self._is_called_from_registered_toolkit() - # Handle response format compatibility with non-strict tools - original_response_format = response_format - input_message, response_format, used_prompt_formatting = ( - self._handle_response_format_with_non_strict_tools( - input_message, response_format - ) - ) - if isinstance(input_message, str): input_message = BaseMessage.make_user_message( role_name="User", content=input_message @@ -3399,12 +3395,6 @@ async def _astep_non_streaming_task( await self._aformat_response_if_needed(response, response_format) - # Apply manual parsing if we used prompt-based formatting - if used_prompt_formatting and original_response_format: - self._apply_prompt_based_parsing( - response, original_response_format - ) - # Only record final output if we haven't already recorded tool calls # for this response (to avoid duplicate assistant messages) if not recorded_tool_calls: @@ -4299,13 +4289,6 @@ def _stream( content, tool calls, and other information as they become available. """ - # Handle response format compatibility with non-strict tools - input_message, response_format, _ = ( - self._handle_response_format_with_non_strict_tools( - input_message, response_format - ) - ) - # Convert input message to BaseMessage if necessary if isinstance(input_message, str): input_message = BaseMessage.make_user_message( @@ -4326,7 +4309,9 @@ def _stream( # Start streaming response yield from self._stream_response( - openai_messages, num_tokens, response_format + openai_messages, + num_tokens, + response_format, ) def _get_token_count(self, content: str) -> int: @@ -4507,64 +4492,78 @@ def _stream_response( # Get final completion and record final message try: final_completion = stream.get_final_completion() - final_content = ( - final_completion.choices[0].message.content or "" + + # Check if the model wants to call tools + final_choice = final_completion.choices[0] + final_tool_calls = getattr( + final_choice.message, 'tool_calls', None ) + if final_tool_calls: + self._collect_tool_calls_from_completion( + final_tool_calls, + accumulated_tool_calls, + ) + + # Execute tools + for status_response in ( + self + )._execute_tools_sync_with_status_accumulator( + accumulated_tool_calls, + tool_call_records, + ): + yield status_response + + should_continue, new_context, error_response = ( + self._handle_structured_stream_tool_iteration( + final_completion, + accumulated_tool_calls, + tool_call_records, + request_token_usage, + step_token_usage, + iteration_count, + content_accumulator, + ) + ) + if error_response: + yield error_response + return + self._emit_request_usage( + usage_dict=request_token_usage, + step_usage=step_token_usage.copy(), + request_index=iteration_count, + response_id=final_completion.id or "", + ) + if should_continue: + openai_messages, num_tokens = new_context # type: ignore[misc] + continue + else: + break + + final_content = final_choice.message.content or "" final_reasoning = ( content_accumulator.get_full_reasoning_content() or None ) - - final_message = BaseMessage( - role_name=self.role_name, - role_type=self.role_type, - meta_dict={}, - content=final_content, - parsed=cast( - "BaseModel | dict[str, Any] | None", + if final_completion.usage: + request_usage = safe_model_dump( + final_completion.usage + ) + self._update_token_usage_tracker( + request_token_usage, request_usage + ) + self._update_token_usage_tracker( + step_token_usage, request_usage + ) + final_response = ( + self._build_structured_completion_response( + final_completion, + final_content, parsed_object, - ), # type: ignore[arg-type] - reasoning_content=final_reasoning, - ) - - self.record_message(final_message) - - request_usage = ( - safe_model_dump(final_completion.usage) - if final_completion.usage - else {} - ) - self._update_token_usage_tracker( - request_token_usage, - request_usage, - ) - self._update_token_usage_tracker( - step_token_usage, - request_usage, - ) - - # Create final response with cumulative step usage. - final_response = ChatAgentResponse( - msgs=[final_message], - terminated=False, - info={ - "id": final_completion.id or "", - "usage": step_token_usage.copy(), - "finish_reasons": [ - choice.finish_reason or "stop" - for choice in final_completion.choices - ], - "num_tokens": self._get_token_count( - final_content - ), - "tool_calls": tool_call_records, - "external_tool_requests": None, - "streaming": False, - "partial": False, - "stream_accumulate_mode": "accumulate" - if self.stream_accumulate - else "delta", - }, + final_reasoning, + response_format, + tool_call_records, + step_token_usage, + ) ) self._emit_request_usage( usage_dict=request_token_usage, @@ -5344,7 +5343,9 @@ async def _astream( # Start async streaming response last_response = None async for response in self._astream_response( - openai_messages, num_tokens, response_format + openai_messages, + num_tokens, + response_format, ): last_response = response yield response @@ -5517,64 +5518,80 @@ async def _astream_response( # Get final completion and record final message try: final_completion = await stream.get_final_completion() - final_content = ( - final_completion.choices[0].message.content or "" + + # Check if the model wants to call tools + final_choice = final_completion.choices[0] + final_tool_calls = getattr( + final_choice.message, 'tool_calls', None ) + if final_tool_calls: + self._collect_tool_calls_from_completion( + final_tool_calls, + accumulated_tool_calls, + ) + + # Execute tools + async for status_response in ( + self + )._execute_tools_async_with_status_accumulator( + accumulated_tool_calls, + content_accumulator, + step_token_usage, + tool_call_records, + ): + yield status_response + + should_continue, new_context, error_response = ( + self._handle_structured_stream_tool_iteration( + final_completion, + accumulated_tool_calls, + tool_call_records, + request_token_usage, + step_token_usage, + iteration_count, + content_accumulator, + ) + ) + if error_response: + yield error_response + return + await self._aemit_request_usage( + usage_dict=request_token_usage, + step_usage=step_token_usage.copy(), + request_index=iteration_count, + response_id=final_completion.id or "", + ) + if should_continue: + openai_messages, num_tokens = new_context # type: ignore[misc] + continue + else: + break + + final_content = final_choice.message.content or "" final_reasoning = ( content_accumulator.get_full_reasoning_content() or None ) - - final_message = BaseMessage( - role_name=self.role_name, - role_type=self.role_type, - meta_dict={}, - content=final_content, - parsed=cast( - "BaseModel | dict[str, Any] | None", + if final_completion.usage: + request_usage = safe_model_dump( + final_completion.usage + ) + self._update_token_usage_tracker( + request_token_usage, request_usage + ) + self._update_token_usage_tracker( + step_token_usage, request_usage + ) + final_response = ( + self._build_structured_completion_response( + final_completion, + final_content, parsed_object, - ), # type: ignore[arg-type] - reasoning_content=final_reasoning, - ) - - self.record_message(final_message) - - request_usage = ( - safe_model_dump(final_completion.usage) - if final_completion.usage - else {} - ) - self._update_token_usage_tracker( - request_token_usage, - request_usage, - ) - self._update_token_usage_tracker( - step_token_usage, - request_usage, - ) - - # Create final response with cumulative step usage. - final_response = ChatAgentResponse( - msgs=[final_message], - terminated=False, - info={ - "id": final_completion.id or "", - "usage": step_token_usage.copy(), - "finish_reasons": [ - choice.finish_reason or "stop" - for choice in final_completion.choices - ], - "num_tokens": self._get_token_count( - final_content - ), - "tool_calls": tool_call_records, - "external_tool_requests": None, - "streaming": False, - "partial": False, - "stream_accumulate_mode": "accumulate" - if self.stream_accumulate - else "delta", - }, + final_reasoning, + response_format, + tool_call_records, + step_token_usage, + ) ) await self._aemit_request_usage( usage_dict=request_token_usage, diff --git a/camel/models/anthropic_model.py b/camel/models/anthropic_model.py index 533add9de8..d2d11d86d6 100644 --- a/camel/models/anthropic_model.py +++ b/camel/models/anthropic_model.py @@ -636,6 +636,37 @@ def _convert_anthropic_stream_to_openai_chunk( usage=usage, ) + @staticmethod + def _add_additional_properties_false(schema: Dict[str, Any]) -> None: + r"""Recursively add additionalProperties: false to all object types.""" + if schema.get("type") == "object": + schema["additionalProperties"] = False + for value in schema.values(): + if isinstance(value, dict): + AnthropicModel._add_additional_properties_false(value) + elif isinstance(value, list): + for item in value: + if isinstance(item, dict): + AnthropicModel._add_additional_properties_false(item) + + @staticmethod + def _build_output_config( + response_format: Type[BaseModel], + ) -> Dict[str, Any]: + r"""Convert a Pydantic model to Anthropic's output_config format.""" + schema = response_format.model_json_schema() + # Remove unsupported fields + schema.pop("$defs", None) + schema.pop("definitions", None) + # Anthropic requires additionalProperties: false on all object types + AnthropicModel._add_additional_properties_false(schema) + return { + "format": { + "type": "json_schema", + "schema": schema, + } + } + def _convert_openai_tools_to_anthropic( self, tools: Optional[List[Dict[str, Any]]] ) -> Optional[List[Dict[str, Any]]]: @@ -678,7 +709,7 @@ def _run( messages (List[OpenAIMessage]): Message list with the chat history in OpenAI API format. response_format (Optional[Type[BaseModel]]): The format of the - response. (Not supported by Anthropic API directly) + response. tools (Optional[List[Dict[str, Any]]]): The schema of the tools to use for the request. @@ -687,15 +718,6 @@ def _run( `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ - if response_format is not None: - warnings.warn( - "The 'response_format' parameter is not supported by the " - "Anthropic API and will be ignored. Consider using tools " - "for structured output instead.", - UserWarning, - stacklevel=2, - ) - # Update Langfuse trace with current agent session and metadata agent_session_id = get_current_agent_session_id() if agent_session_id: @@ -716,11 +738,19 @@ def _run( self._convert_openai_to_anthropic_messages(processed_messages) ) + if "max_tokens" not in self.model_config_dict: + warnings.warn( + "Anthropic `max_tokens` is not set; using the default value " + "16384. Set `max_tokens` explicitly to suppress this warning.", + UserWarning, + stacklevel=3, + ) + # Prepare request parameters request_params: Dict[str, Any] = { "model": str(self.model_type), "messages": anthropic_messages, - "max_tokens": self.model_config_dict.get("max_tokens", None), + "max_tokens": self.model_config_dict.get("max_tokens", 16384), } if system_message: @@ -764,6 +794,12 @@ def _run( if key in self.model_config_dict: request_params[key] = self.model_config_dict[key] + # Add structured output via output_config + if response_format is not None: + request_params["output_config"] = self._build_output_config( + response_format + ) + # Convert tools anthropic_tools = self._convert_openai_tools_to_anthropic(tools) if anthropic_tools: @@ -807,7 +843,7 @@ async def _arun( messages (List[OpenAIMessage]): Message list with the chat history in OpenAI API format. response_format (Optional[Type[BaseModel]]): The format of the - response. (Not supported by Anthropic API directly) + response. tools (Optional[List[Dict[str, Any]]]): The schema of the tools to use for the request. @@ -816,15 +852,6 @@ async def _arun( `ChatCompletion` in the non-stream mode, or `AsyncStream[ChatCompletionChunk]` in the stream mode. """ - if response_format is not None: - warnings.warn( - "The 'response_format' parameter is not supported by the " - "Anthropic API and will be ignored. Consider using tools " - "for structured output instead.", - UserWarning, - stacklevel=2, - ) - # Update Langfuse trace with current agent session and metadata agent_session_id = get_current_agent_session_id() if agent_session_id: @@ -845,11 +872,19 @@ async def _arun( self._convert_openai_to_anthropic_messages(processed_messages) ) + if "max_tokens" not in self.model_config_dict: + warnings.warn( + "Anthropic `max_tokens` is not set; using the default value " + "16384. Set `max_tokens` explicitly to suppress this warning.", + UserWarning, + stacklevel=3, + ) + # Prepare request parameters request_params: Dict[str, Any] = { "model": str(self.model_type), "messages": anthropic_messages, - "max_tokens": self.model_config_dict.get("max_tokens", None), + "max_tokens": self.model_config_dict.get("max_tokens", 16384), } if system_message: @@ -893,6 +928,12 @@ async def _arun( if key in self.model_config_dict: request_params[key] = self.model_config_dict[key] + # Add structured output via output_config + if response_format is not None: + request_params["output_config"] = self._build_output_config( + response_format + ) + # Convert tools anthropic_tools = self._convert_openai_tools_to_anthropic(tools) if anthropic_tools: diff --git a/camel/models/gemini_model.py b/camel/models/gemini_model.py index de356e6b7b..102ea2767a 100644 --- a/camel/models/gemini_model.py +++ b/camel/models/gemini_model.py @@ -27,6 +27,10 @@ ) from openai import AsyncStream, Stream +from openai.lib.streaming.chat import ( + AsyncChatCompletionStreamManager, + ChatCompletionStreamManager, +) if TYPE_CHECKING: from google.genai.client import Client as GenaiClient @@ -720,6 +724,47 @@ async def async_thought_preserving_generator(): return async_thought_preserving_generator() + @staticmethod + def _clean_gemini_tools( + tools: Optional[List[Dict[str, Any]]], + ) -> Optional[List[Dict[str, Any]]]: + r"""Clean tools for Gemini API compatibility. + + Removes unsupported fields like strict, anyOf, and restricts + enum/format to allowed types. + """ + if not tools: + return tools + import copy + + tools = copy.deepcopy(tools) + for tool in tools: + function_dict = tool.get('function', {}) + function_dict.pop("strict", None) + + if 'parameters' in function_dict: + params = function_dict['parameters'] + if 'properties' in params: + for prop_name, prop_value in params['properties'].items(): + if 'anyOf' in prop_value: + first_type = prop_value['anyOf'][0] + params['properties'][prop_name] = first_type + if 'description' in prop_value: + params['properties'][prop_name][ + 'description' + ] = prop_value['description'] + + if prop_value.get('type') != 'string': + prop_value.pop('enum', None) + + if prop_value.get('type') not in [ + 'string', + 'integer', + 'number', + ]: + prop_value.pop('format', None) + return tools + @observe() def _run( self, @@ -748,19 +793,18 @@ def _run( "response_format", None ) messages = self._process_messages(messages) + is_streaming = self.model_config_dict.get("stream", False) + if response_format: - if tools: - raise ValueError( - "Gemini does not support function calling with " - "response format." + tools = self._clean_gemini_tools(tools) + if is_streaming: + return self._request_stream_parse( # type: ignore[return-value] + messages, response_format, tools ) - result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = ( - self._request_parse(messages, response_format) - ) + else: + return self._request_parse(messages, response_format, tools) else: - result = self._request_chat_completion(messages, tools) - - return result + return self._request_chat_completion(messages, tools) @observe() async def _arun( @@ -790,25 +834,89 @@ async def _arun( "response_format", None ) messages = self._process_messages(messages) + is_streaming = self.model_config_dict.get("stream", False) + if response_format: - if tools: - raise ValueError( - "Gemini does not support function calling with " - "response format." + tools = self._clean_gemini_tools(tools) + if is_streaming: + return await self._arequest_stream_parse( # type: ignore[return-value] + messages, response_format, tools + ) + else: + return await self._arequest_parse( + messages, response_format, tools ) - result: Union[ - ChatCompletion, AsyncStream[ChatCompletionChunk] - ] = await self._arequest_parse(messages, response_format) else: - result = await self._arequest_chat_completion(messages, tools) + return await self._arequest_chat_completion(messages, tools) - return result + @staticmethod + def _build_gemini_response_format( + response_format: Type[BaseModel], + ) -> Dict[str, Any]: + r"""Convert a Pydantic model to Gemini-compatible response_format.""" + schema = response_format.model_json_schema() + # Remove $defs and other unsupported fields for Gemini + schema.pop("$defs", None) + schema.pop("definitions", None) + return { + "type": "json_schema", + "json_schema": { + "name": response_format.__name__, + "schema": schema, + }, + } + + def _request_stream_parse( + self, + messages: List[OpenAIMessage], + response_format: Type[BaseModel], + tools: Optional[List[Dict[str, Any]]] = None, + ) -> ChatCompletionStreamManager[BaseModel]: + r"""Gemini-specific streaming structured output. + + Uses regular streaming with response_format as JSON schema + instead of OpenAI's beta streaming API which is incompatible + with Gemini's tool call delta format. + """ + request_config = self._prepare_request_config(tools) + request_config["stream"] = True + request_config["response_format"] = self._build_gemini_response_format( + response_format + ) + + response = self._client.chat.completions.create( + messages=messages, + model=self.model_type, + **request_config, + ) + return self._preserve_thought_signatures(response) # type: ignore[return-value] + + async def _arequest_stream_parse( + self, + messages: List[OpenAIMessage], + response_format: Type[BaseModel], + tools: Optional[List[Dict[str, Any]]] = None, + ) -> AsyncChatCompletionStreamManager[BaseModel]: + r"""Gemini-specific async streaming structured output.""" + request_config = self._prepare_request_config(tools) + request_config["stream"] = True + request_config["response_format"] = self._build_gemini_response_format( + response_format + ) + + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **request_config, + ) + return self._preserve_thought_signatures(response) # type: ignore[return-value] def _request_chat_completion( self, messages: List[OpenAIMessage], tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + tools = self._clean_gemini_tools(tools) request_config = self._prepare_request_config(tools) try: @@ -836,6 +944,7 @@ async def _arequest_chat_completion( messages: List[OpenAIMessage], tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: + tools = self._clean_gemini_tools(tools) request_config = self._prepare_request_config(tools) try: diff --git a/examples/agents/chatagent_stream_structured_output.py b/examples/agents/chatagent_stream_structured_output.py new file mode 100644 index 0000000000..0562533627 --- /dev/null +++ b/examples/agents/chatagent_stream_structured_output.py @@ -0,0 +1,125 @@ +# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= + +""" +Example: Streaming + Tool Calls + Structured Output + +Demonstrates using ChatAgent in streaming mode with both tools and +response_format (structured output) simultaneously, in both sync and async. +""" + +import asyncio + +from pydantic import BaseModel, Field + +from camel.agents import ChatAgent +from camel.models import ModelFactory +from camel.toolkits import MathToolkit +from camel.types import ModelPlatformType, ModelType + + +class Result(BaseModel): + sum_result: str = Field(description="Only the result of the addition") + product_result: str = Field( + description="Only the result of the multiplication" + ) + division_result: str = Field(description="Only the result of the division") + capital_result: str = Field( + description="Only the result of the capital search" + ) + + +USER_MESSAGE = ( + "Calculate: 1) 123.45 + 678.90 2) 100 * 3.14159 3) 1000 / 7, " + "also search what is the capital of Germany" +) + + +def create_agent() -> ChatAgent: + streaming_model = ModelFactory.create( + model_platform=ModelPlatformType.DEFAULT, + model_type=ModelType.DEFAULT, + model_config_dict={ + "stream": True, + "stream_options": {"include_usage": True}, + }, + ) + return ChatAgent( + system_message="You are a helpful assistant.", + model=streaming_model, + tools=MathToolkit().get_tools(), + stream_accumulate=False, # Delta mode + ) + + +def sync_example(): + """Sync streaming with tools + structured output.""" + print("=== Sync Example ===") + agent = create_agent() + + streaming_response = agent.step(USER_MESSAGE, response_format=Result) + + content_parts = [] + for chunk in streaming_response: + if chunk.msgs[0].content: + content_parts.append(chunk.msgs[0].content) + print(chunk.msgs[0].content, end="", flush=True) + + print() + + # Print tool call records + tool_calls = streaming_response.info.get("tool_calls", []) + if tool_calls: + print(f"\nTool calls made: {len(tool_calls)}") + for i, tc in enumerate(tool_calls, 1): + print(f" {i}. {tc.tool_name}({tc.args}) = {tc.result}") + + # Check parsed output + final_msg = streaming_response.msgs[0] + if final_msg.parsed: + print(f"\nParsed result: {final_msg.parsed}") + + +async def async_example(): + """Async streaming with tools + structured output.""" + print("\n=== Async Example ===") + agent = create_agent() + + content_parts = [] + async for chunk in await agent.astep(USER_MESSAGE, response_format=Result): + final_response = chunk + if chunk.msgs[0].content: + content_parts.append(chunk.msgs[0].content) + print(chunk.msgs[0].content, end="", flush=True) + + print() + full_content = "".join(content_parts) + print(f"\nFull content: {full_content}") + + # Print tool call records + tool_calls = final_response.info.get("tool_calls", []) + if tool_calls: + print(f"\nTool calls made: {len(tool_calls)}") + for i, tc in enumerate(tool_calls, 1): + print(f" {i}. {tc.tool_name}({tc.args}) = {tc.result}") + + # Check parsed output + final_msg = final_response.msgs[0] + if final_msg.parsed: + print(f"\nParsed result: {final_msg.parsed}") + + +if __name__ == "__main__": + sync_example() + asyncio.run(async_example()) diff --git a/pyproject.toml b/pyproject.toml index 569dd3cb2b..da5bba9b30 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -195,7 +195,7 @@ model_platforms = [ "litellm>=1.38.1,<1.80.12", "mistralai>=1.1.0,<2", "reka-api>=3.0.8,<4", - "anthropic>=0.47.0,<0.50.0", + "anthropic>=0.77.0", "cohere>=5.11.0,<6", "fish-audio-sdk>=1.0.0", "ibm-watsonx-ai>=1.3.11", @@ -239,7 +239,7 @@ owl = [ "python-dotenv>=1.0.0,<2", "transformers>=4,<5", "sentencepiece>=0.2,<0.3", - "anthropic>=0.47.0,<0.50.0", + "anthropic>=0.77.0", "datasets>=3,<4", "soundfile>=0.13,<0.14", "pydub>=0.25.1,<0.26", @@ -288,7 +288,7 @@ eigent = [ "mcp-simple-arxiv==0.2.2", "mcp-server-fetch==2025.1.17", "python-dotenv>=1.0.0,<2", - "anthropic>=0.47.0,<0.50.0", + "anthropic>=0.77.0", "datasets>=3,<4", "pydub>=0.25.1,<0.26", "ffmpeg-python>=0.2.0,<0.3", @@ -393,7 +393,7 @@ all = [ "litellm>=1.38.1,<1.80.12", "mistralai>=1.1.0,<2", "fish-audio-sdk>=1.0.0", - "anthropic>=0.47.0,<0.50.0", + "anthropic>=0.77.0", "reka-api>=3.0.8,<4", "redis>=5.0.6,<6", "azure-storage-blob>=12.21.0,<13", diff --git a/test/agents/test_chat_agent.py b/test/agents/test_chat_agent.py index 1d771a74d0..d272d8f355 100644 --- a/test/agents/test_chat_agent.py +++ b/test/agents/test_chat_agent.py @@ -2182,6 +2182,355 @@ async def mock_async_generator() -> ( assert tool_calls_found, "Tool calls should be found in responses" +@pytest.mark.model_backend +def test_stream_structured_output_with_tool_calls(): + r"""Test sync streaming when model triggers tool calls via the + ChatCompletionStreamManager path (structured output + tool call). + """ + + class TravelPlan(BaseModel): + destination: str = Field(description="Travel destination") + recommendation: str = Field(description="Travel recommendation") + + def get_weather(city: str) -> str: + r"""Get weather for a city. + + Args: + city (str): The city name. + + Returns: + str: Weather description. + """ + return f"Sunny in {city}" + + model = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_5_MINI, + model_config_dict={"stream": True}, + ) + agent = ChatAgent( + system_message="You are a travel assistant.", + model=model, + tools=[FunctionTool(get_weather)], + stream_accumulate=False, + ) + + # --- Helper classes to simulate ChatCompletionStreamManager --- + class _MockEvent: + def __init__(self, type_, delta=None, parsed=None): + self.type = type_ + self.delta = delta + self.parsed = parsed + + class _SyncStreamInner: + def __init__(self, events, final_completion): + self._events = events + self._final_completion = final_completion + + def __iter__(self): + return iter(self._events) + + def get_final_completion(self): + return self._final_completion + + class _SyncStreamManager: + """Has __enter__ but NOT __iter__ — matches the elif branch.""" + + def __init__(self, events, final_completion): + self._inner = _SyncStreamInner(events, final_completion) + + def __enter__(self): + return self._inner + + def __exit__(self, *args): + return False + + # First call: model wants to call get_weather + tool_completion = ChatCompletion( + id="chatcmpl-tool", + choices=[ + Choice( + finish_reason="tool_calls", + index=0, + logprobs=None, + message=ChatCompletionMessage( + content="", + role="assistant", + function_call=None, + tool_calls=[ + ChatCompletionMessageFunctionToolCall( + id="call_weather_sync", + type="function", + function=Function( + name="get_weather", + arguments='{"city": "Paris"}', + ), + ) + ], + ), + ) + ], + created=123456789, + model="gpt-5-mini", + object="chat.completion", + usage=CompletionUsage( + completion_tokens=10, prompt_tokens=20, total_tokens=30 + ), + ) + first_manager = _SyncStreamManager( + events=[_MockEvent("content.done", parsed=None)], + final_completion=tool_completion, + ) + + # Second call: model returns structured travel plan + travel_plan = TravelPlan( + destination="Paris", + recommendation="Visit the Eiffel Tower!", + ) + final_completion = ChatCompletion( + id="chatcmpl-final", + choices=[ + Choice( + finish_reason="stop", + index=0, + logprobs=None, + message=ChatCompletionMessage( + content='{"destination":"Paris",' + '"recommendation":"Visit the Eiffel Tower!"}', + role="assistant", + function_call=None, + tool_calls=None, + ), + ) + ], + created=123456790, + model="gpt-5-mini", + object="chat.completion", + usage=CompletionUsage( + completion_tokens=20, prompt_tokens=40, total_tokens=60 + ), + ) + second_manager = _SyncStreamManager( + events=[ + _MockEvent("content.delta", delta='{"destination":"Paris"'), + _MockEvent("content.done", parsed=travel_plan), + ], + final_completion=final_completion, + ) + + call_count = 0 + + def mock_run(*args, **kwargs): + nonlocal call_count + result = first_manager if call_count == 0 else second_manager + call_count += 1 + return result + + agent.model_backend.run = mock_run + + user_msg = BaseMessage( + role_name="User", + role_type=RoleType.USER, + meta_dict=dict(), + content="Plan a trip to Paris", + ) + + responses = list(agent.step(user_msg, response_format=TravelPlan)) + + assert len(responses) > 0, "Should receive at least one response" + assert call_count == 2, "Model should be called twice (tool call + final)" + + tool_call_found = any( + tc.tool_name == "get_weather" and tc.result == "Sunny in Paris" + for response in responses + for tc in (response.info.get("tool_calls") or []) + ) + assert tool_call_found, "get_weather tool should have been called" + + final_response = responses[-1] + assert final_response.msg.parsed is not None + assert final_response.msg.parsed.destination == "Paris" + + +@pytest.mark.model_backend +@pytest.mark.asyncio +async def test_async_stream_structured_output_with_tool_calls(): + r"""Test async streaming when model triggers tool calls via the + AsyncChatCompletionStreamManager path (structured output + tool call). + """ + + class TravelPlan(BaseModel): + destination: str = Field(description="Travel destination") + recommendation: str = Field(description="Travel recommendation") + + def get_weather(city: str) -> str: + r"""Get weather for a city. + + Args: + city (str): The city name. + + Returns: + str: Weather description. + """ + return f"Sunny in {city}" + + model = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_5_MINI, + model_config_dict={"stream": True}, + ) + agent = ChatAgent( + system_message="You are a travel assistant.", + model=model, + tools=[FunctionTool(get_weather)], + stream_accumulate=False, + ) + + # --- Helper classes to simulate AsyncChatCompletionStreamManager --- + class _MockEvent: + def __init__(self, type_, delta=None, parsed=None): + self.type = type_ + self.delta = delta + self.parsed = parsed + + class _AsyncStreamInner: + def __init__(self, events, final_completion): + self._events = events + self._final_completion = final_completion + + def __aiter__(self): + return self._gen() + + async def _gen(self): + for event in self._events: + yield event + + async def get_final_completion(self): + return self._final_completion + + class _AsyncStreamManager: + """Has __aenter__ but NOT __aiter__ — matches the async elif branch.""" + + def __init__(self, events, final_completion): + self._inner = _AsyncStreamInner(events, final_completion) + + async def __aenter__(self): + return self._inner + + async def __aexit__(self, *args): + return False + + # First call: model wants to call get_weather + tool_completion = ChatCompletion( + id="chatcmpl-tool-async", + choices=[ + Choice( + finish_reason="tool_calls", + index=0, + logprobs=None, + message=ChatCompletionMessage( + content="", + role="assistant", + function_call=None, + tool_calls=[ + ChatCompletionMessageFunctionToolCall( + id="call_weather_async", + type="function", + function=Function( + name="get_weather", + arguments='{"city": "Tokyo"}', + ), + ) + ], + ), + ) + ], + created=123456789, + model="gpt-5-mini", + object="chat.completion", + usage=CompletionUsage( + completion_tokens=10, prompt_tokens=20, total_tokens=30 + ), + ) + first_manager = _AsyncStreamManager( + events=[_MockEvent("content.done", parsed=None)], + final_completion=tool_completion, + ) + + # Second call: model returns structured travel plan + travel_plan = TravelPlan( + destination="Tokyo", + recommendation="Visit Shibuya Crossing!", + ) + final_completion = ChatCompletion( + id="chatcmpl-final-async", + choices=[ + Choice( + finish_reason="stop", + index=0, + logprobs=None, + message=ChatCompletionMessage( + content='{"destination":"Tokyo",' + '"recommendation":"Visit Shibuya Crossing!"}', + role="assistant", + function_call=None, + tool_calls=None, + ), + ) + ], + created=123456790, + model="gpt-5-mini", + object="chat.completion", + usage=CompletionUsage( + completion_tokens=20, prompt_tokens=40, total_tokens=60 + ), + ) + second_manager = _AsyncStreamManager( + events=[ + _MockEvent("content.delta", delta='{"destination":"Tokyo"'), + _MockEvent("content.done", parsed=travel_plan), + ], + final_completion=final_completion, + ) + + call_count = 0 + + async def mock_arun(*args, **kwargs): + nonlocal call_count + result = first_manager if call_count == 0 else second_manager + call_count += 1 + return result + + agent.model_backend.arun = mock_arun + + user_msg = BaseMessage( + role_name="User", + role_type=RoleType.USER, + meta_dict=dict(), + content="Plan a trip to Tokyo", + ) + + responses = [] + async for response in await agent.astep( + user_msg, response_format=TravelPlan + ): + responses.append(response) + + assert len(responses) > 0, "Should receive at least one response" + assert call_count == 2, "Model should be called twice (tool call + final)" + + tool_call_found = any( + tc.tool_name == "get_weather" and tc.result == "Sunny in Tokyo" + for response in responses + for tc in (response.info.get("tool_calls") or []) + ) + assert tool_call_found, "get_weather tool should have been called" + + final_response = responses[-1] + assert final_response.msg.parsed is not None + assert final_response.msg.parsed.destination == "Tokyo" + + @pytest.mark.model_backend def test_chat_agent_stream_with_structured_output(): r"""Test streaming with structured output (response_format). diff --git a/uv.lock b/uv.lock index adc5a19092..4e1ef80f59 100644 --- a/uv.lock +++ b/uv.lock @@ -265,20 +265,21 @@ wheels = [ [[package]] name = "anthropic" -version = "0.49.0" +version = "0.84.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, { name = "distro" }, + { name = "docstring-parser" }, { name = "httpx" }, { name = "jiter" }, { name = "pydantic" }, { name = "sniffio" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/86/e3/a88c8494ce4d1a88252b9e053607e885f9b14d0a32273d47b727cbee4228/anthropic-0.49.0.tar.gz", hash = "sha256:c09e885b0f674b9119b4f296d8508907f6cff0009bc20d5cf6b35936c40b4398", size = 210016, upload-time = "2025-02-28T19:35:47.01Z" } +sdist = { url = "https://files.pythonhosted.org/packages/04/ea/0869d6df9ef83dcf393aeefc12dd81677d091c6ffc86f783e51cf44062f2/anthropic-0.84.0.tar.gz", hash = "sha256:72f5f90e5aebe62dca316cb013629cfa24996b0f5a4593b8c3d712bc03c43c37", size = 539457, upload-time = "2026-02-25T05:22:38.54Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/76/74/5d90ad14d55fbe3f9c474fdcb6e34b4bed99e3be8efac98734a5ddce88c1/anthropic-0.49.0-py3-none-any.whl", hash = "sha256:bbc17ad4e7094988d2fa86b87753ded8dce12498f4b85fe5810f208f454a8375", size = 243368, upload-time = "2025-02-28T19:35:44.963Z" }, + { url = "https://files.pythonhosted.org/packages/64/ca/218fa25002a332c0aa149ba18ffc0543175998b1f65de63f6d106689a345/anthropic-0.84.0-py3-none-any.whl", hash = "sha256:861c4c50f91ca45f942e091d83b60530ad6d4f98733bfe648065364da05d29e7", size = 455156, upload-time = "2026-02-25T05:22:40.468Z" }, ] [[package]] @@ -1297,10 +1298,10 @@ requires-dist = [ { name = "agentops", marker = "extra == 'dev-tools'", specifier = ">=0.3.21,<0.4" }, { name = "aiosqlite", marker = "extra == 'all'", specifier = ">=0.20.0,<0.21" }, { name = "aiosqlite", marker = "extra == 'data-tools'", specifier = ">=0.20.0,<0.21" }, - { name = "anthropic", marker = "extra == 'all'", specifier = ">=0.47.0,<0.50.0" }, - { name = "anthropic", marker = "extra == 'eigent'", specifier = ">=0.47.0,<0.50.0" }, - { name = "anthropic", marker = "extra == 'model-platforms'", specifier = ">=0.47.0,<0.50.0" }, - { name = "anthropic", marker = "extra == 'owl'", specifier = ">=0.47.0,<0.50.0" }, + { name = "anthropic", marker = "extra == 'all'", specifier = ">=0.77.0" }, + { name = "anthropic", marker = "extra == 'eigent'", specifier = ">=0.77.0" }, + { name = "anthropic", marker = "extra == 'model-platforms'", specifier = ">=0.77.0" }, + { name = "anthropic", marker = "extra == 'owl'", specifier = ">=0.77.0" }, { name = "apify-client", marker = "extra == 'all'", specifier = ">=1.8.1,<2" }, { name = "apify-client", marker = "extra == 'web-tools'", specifier = ">=1.8.1,<2" }, { name = "arxiv", marker = "extra == 'all'", specifier = ">=2.1.3,<3" }, @@ -2675,16 +2676,16 @@ wheels = [ [[package]] name = "ddgs" -version = "9.11.2" +version = "9.11.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "lxml" }, { name = "primp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6b/80/eb387dd4291d1624e773f455fd1dfc54596e06469d680fe3b3f8c326ba1a/ddgs-9.11.2.tar.gz", hash = "sha256:b5f072149580773291fd3eb6e9f4de47fa9d910ebd5ef85845a37e59cfe24c40", size = 34722, upload-time = "2026-03-05T05:17:31.574Z" } +sdist = { url = "https://files.pythonhosted.org/packages/06/9e/d89f0c24d78812bad0b4150d9a432925aa756b4bfeb4ef4815fe6ff8f2a6/ddgs-9.11.3.tar.gz", hash = "sha256:6098c030d6806217260071d85e38d9b94b99fe326a3c40ebf5de25f620528ae2", size = 34776, upload-time = "2026-03-11T07:12:02.041Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/fe/7591bfa694ee26fcf0dd8035811994e28a9699402d3861eea7754958c1bd/ddgs-9.11.2-py3-none-any.whl", hash = "sha256:0023a3633d271e72cdd1da757d3fcea2d996608da3f3c9da2cc0c0607b219c76", size = 43646, upload-time = "2026-03-05T05:17:30.469Z" }, + { url = "https://files.pythonhosted.org/packages/0b/9d/018d745128a9a33aff3e6b8f0260f7b970784d4b31573d36ee233b2e4db1/ddgs-9.11.3-py3-none-any.whl", hash = "sha256:596d656d00219b4748d839de1fa9a9c3eb5dd36db07365331f7526201115f18a", size = 43691, upload-time = "2026-03-11T07:12:00.21Z" }, ] [[package]] @@ -7474,7 +7475,7 @@ name = "pexpect" version = "4.9.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "ptyprocess" }, + { name = "ptyprocess", marker = "sys_platform != 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" } wheels = [ @@ -7678,40 +7679,40 @@ wheels = [ [[package]] name = "primp" -version = "1.1.2" +version = "1.1.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/03/35/80be154508529f753fb82cb81298bdeb33e90f39f9901d7cfa0f488a581f/primp-1.1.2.tar.gz", hash = "sha256:c4707ab374a77c0cbead3d9a65605919fa4997fa910ef06e37b65df42a1d4d04", size = 313908, upload-time = "2026-03-01T05:52:49.773Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f7/13/dc9588356d983f988877ae065c842cdd6cf95073615b56b460cbe857f3dc/primp-1.1.2-cp310-abi3-macosx_10_12_x86_64.whl", hash = "sha256:181bb9a6d5544e0483592f693f33f5874a60726ea0da1f41685aa2267f084a4d", size = 4002669, upload-time = "2026-03-01T05:52:31.977Z" }, - { url = "https://files.pythonhosted.org/packages/70/af/6a6c26141583a5081bad69b9753c85df81b466939663742ef5bec35ee868/primp-1.1.2-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:f362424ffa83e1de55a7573300a416fa71dc5516829526a9bf77dc0cfa42256b", size = 3743010, upload-time = "2026-03-01T05:52:38.452Z" }, - { url = "https://files.pythonhosted.org/packages/a9/99/03db937e031a02885d8c80d073d7424967d629721b5044dcb4e80b6cbdcf/primp-1.1.2-cp310-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:736820326eb1ed19c6b0e971f852316c049c36bdd251a03757056a74182796df", size = 3889905, upload-time = "2026-03-01T05:52:20.616Z" }, - { url = "https://files.pythonhosted.org/packages/15/3c/faecef36238f464e2dd52056420676eb2d541cd20ff478d3b967815079e3/primp-1.1.2-cp310-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed37d1bc89fa8cad8b60481c81ea7b3bd42dc757868009ad3bb0b1e74c17fd22", size = 3524521, upload-time = "2026-03-01T05:52:08.403Z" }, - { url = "https://files.pythonhosted.org/packages/7f/d5/8954e5b5b454139ff35063d5a143a1570f865b736cfd8a46cc7ce9575a5a/primp-1.1.2-cp310-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78e78355b1c495bc7e3d92121067760c7e7a1d419519542ed9dd88688ce43aab", size = 3738228, upload-time = "2026-03-01T05:52:05.127Z" }, - { url = "https://files.pythonhosted.org/packages/26/e7/dc93dbeddb7642e12f4575aaf2c9fda7234b241050a112a9baa288971b16/primp-1.1.2-cp310-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c4c560d018dad4e3a3f17b07f9f5d894941e3acbbb5b566f6b6baf42786012f", size = 4013704, upload-time = "2026-03-01T05:52:48.529Z" }, - { url = "https://files.pythonhosted.org/packages/dd/3d/2cc2e0cd310f585df05a7008fd6de4542d7c0bc61e62b6797f28a9ede28b/primp-1.1.2-cp310-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2494b52cf3056d3e41c0746a11cbeca7f2f882a92a09d87383646cd75e2f3d8c", size = 3920174, upload-time = "2026-03-01T05:52:06.635Z" }, - { url = "https://files.pythonhosted.org/packages/35/60/dc4572ba96911374b43b4f5d1f012706c3f27fd2c12dd3e158fcf74ac3dd/primp-1.1.2-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c424a46f48ccd8fd309215a15bc098b47198b8f779c43ed8d95b3f53a382ffa8", size = 4113822, upload-time = "2026-03-01T05:52:51.061Z" }, - { url = "https://files.pythonhosted.org/packages/ec/2e/90f5f8e138f8bc6652c5134aa59a746775623a820f92164c6690217e49d6/primp-1.1.2-cp310-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba51cf19f17fd4bab4567d96b4cd7dcb6a4e0f0d4721819180b46af9794ae310", size = 4068028, upload-time = "2026-03-01T05:52:13.843Z" }, - { url = "https://files.pythonhosted.org/packages/d4/ea/753d8edcb85c3c36d5731fbd2b215528738d917ae9cf3dce651ae0f1c529/primp-1.1.2-cp310-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:77ebae43c6735328051beb08e7e2360b6cf79d50f6cef77629beba880c99222d", size = 3754469, upload-time = "2026-03-01T05:52:15.671Z" }, - { url = "https://files.pythonhosted.org/packages/ae/51/b417cd741bf8eacea86debad358a6dc5821e2849a22e2c91cff926bebbb2/primp-1.1.2-cp310-abi3-musllinux_1_2_i686.whl", hash = "sha256:5f3252d47e9d0f4a567990c79cd388be43353fc7c78efea2a6a5734e8a425598", size = 3859330, upload-time = "2026-03-01T05:52:46.979Z" }, - { url = "https://files.pythonhosted.org/packages/3e/20/19db933c878748e9a7b9ad4057e9caf7ad9c91fd27d2a2692ac629453a66/primp-1.1.2-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:9e094417825df9748e179a1104b2df4459c3dbd1eea994f05a136860b847f0e1", size = 4365491, upload-time = "2026-03-01T05:52:35.007Z" }, - { url = "https://files.pythonhosted.org/packages/fc/0f/48a57ee744cc6dc64fb7daff7bc04e9ec3cefd0594d008a775496dddaeb1/primp-1.1.2-cp310-abi3-win32.whl", hash = "sha256:bc67112b61a8dc1d40ddcc81ff5c47a1cb7b620954fee01a529e28bebb359e20", size = 3266998, upload-time = "2026-03-01T05:52:02.059Z" }, - { url = "https://files.pythonhosted.org/packages/9c/0a/119d497fb098c739142d4a47b062a8a9cc0b4b87aca65334150066d075a0/primp-1.1.2-cp310-abi3-win_amd64.whl", hash = "sha256:4509850301c669c04e124762e953946ed10fe9039f059ec40b818c085697d9a4", size = 3601691, upload-time = "2026-03-01T05:52:12.34Z" }, - { url = "https://files.pythonhosted.org/packages/95/1f/2b8f218aebb4f236d94ae148b4f5c0471b3d00316b0ef5d0b7c2222d8417/primp-1.1.2-cp310-abi3-win_arm64.whl", hash = "sha256:de5958dc7ce78ce107dd776056a58f9da7a7164a912e908cb9b66b84f87967f6", size = 3613756, upload-time = "2026-03-01T05:52:28.279Z" }, - { url = "https://files.pythonhosted.org/packages/40/38/f77c5af1fd53658e04ae52decfab71349af43bdfdb32ddd8a622f6251842/primp-1.1.2-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:c3bbece26e8312e3e0df2ec222b954f9ac9f279422ffbbf47a6cad31ef8736cd", size = 3992311, upload-time = "2026-03-01T05:52:43.497Z" }, - { url = "https://files.pythonhosted.org/packages/77/f6/2e4504cfdeec5d39063173205ca10a281a2681fd9999da37b442ac7e6662/primp-1.1.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:78acdf43b57d984170e986be5fcae0a1537a245fafda970e92056dae42cd9545", size = 3736438, upload-time = "2026-03-01T05:52:22.505Z" }, - { url = "https://files.pythonhosted.org/packages/d3/6c/fe10c51b79cd407d3a1e08a0bb8a35ae53d79ce4156543ea4df7262581ef/primp-1.1.2-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89a2641441732f81e1876db2e18490d3210a8302290e4844b7f04159e02033d4", size = 3878622, upload-time = "2026-03-01T05:52:33.458Z" }, - { url = "https://files.pythonhosted.org/packages/fb/86/5c68dc877af9baf4fba3e5d2615fe0aefbdd4e1337d3b678b66769b434c9/primp-1.1.2-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1df66deacb539efbca5730d0fc3dea19cd83c33422fa05445bbddc17aef3f71", size = 3520112, upload-time = "2026-03-01T05:52:45.214Z" }, - { url = "https://files.pythonhosted.org/packages/fd/aa/f8798a1c0fabbc9254e29330df61b93bdb54130e9d5e5d8495eff99fc658/primp-1.1.2-cp314-cp314t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78ea1f56dd3ac52f2d5375a084c7f31ce6ad274811bdb5d17ecaca6b4ddb8b6d", size = 3740187, upload-time = "2026-03-01T05:52:26.052Z" }, - { url = "https://files.pythonhosted.org/packages/90/e4/ea08359b6fbcda7b3ffcc15b4c1e0bf4f89680db126ba96889e7f8e1fe04/primp-1.1.2-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c980527bd46c034ab9e06dca75b6237cea8d5b3fe1f5691904a2c35d92d143c", size = 4011825, upload-time = "2026-03-01T05:52:17.403Z" }, - { url = "https://files.pythonhosted.org/packages/01/4a/8cf516250cc97eab2d4c822478ab0037b9848bca844787196481b5691f25/primp-1.1.2-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c0b4006a9a25c5f89a968f3bf67221fc19183890b8a1304873132d703697816", size = 3907535, upload-time = "2026-03-01T05:52:24.455Z" }, - { url = "https://files.pythonhosted.org/packages/90/00/e6fe4abf75012d05009abf22e9e1eb89b4bca06ad9f79c10876cebdf7271/primp-1.1.2-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2851bedc1598ed72f6a2016e391012744259c523dc5d27f2f02e3ae5ef020d4", size = 4108136, upload-time = "2026-03-01T05:52:42.007Z" }, - { url = "https://files.pythonhosted.org/packages/9d/8a/64cd76fee8b994f349c1a9c6541b4144dee64056dcaa8109bd352518b777/primp-1.1.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f7340e34023dda2660bd02cb92ac8ed441f13a1afdc00487581d8b8b473f890b", size = 4060289, upload-time = "2026-03-01T05:52:40.4Z" }, - { url = "https://files.pythonhosted.org/packages/dd/7c/fbea74676def2ce1d21a53e86cdbb3ef9c7a12b2febfdd3961a8466449a7/primp-1.1.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:618a027bb45ac44e9b6c35d5758547ce5e73607de4fb54b52bb9d0dc896f11fa", size = 3749499, upload-time = "2026-03-01T05:51:59.988Z" }, - { url = "https://files.pythonhosted.org/packages/12/7a/36fc46a385141063e2ae4fd24dda308e75da8c6409c425a56ffceb6e4f71/primp-1.1.2-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:37e30ce1435142dd010f2ee1dd909f1e6e3a8cd3e32c8e22f3bb6703bf618209", size = 3858861, upload-time = "2026-03-01T05:52:10.621Z" }, - { url = "https://files.pythonhosted.org/packages/65/bb/d0319dbd2e20fb4f54d8b3f536b89431a9d1442f00fa11a874dfbe9d2de7/primp-1.1.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b5d335d28eae65543b20c75911d71c5f89882a4598efade47abe92389f6da7f", size = 4358677, upload-time = "2026-03-01T05:52:18.978Z" }, - { url = "https://files.pythonhosted.org/packages/57/89/ab887a516dc83dbae12ea5b338f60c46a56966a972fed65f8de5bf05a9c2/primp-1.1.2-cp314-cp314t-win32.whl", hash = "sha256:b938cc2d033ac56be90c617836a60fb468f33ab630d3eacab2b36651b7ce106e", size = 3258062, upload-time = "2026-03-01T05:52:36.741Z" }, - { url = "https://files.pythonhosted.org/packages/df/ca/e870d65162f6c68da6d25afa3e01202ac500c8ad1b682dfd03e8c45e4d4a/primp-1.1.2-cp314-cp314t-win_amd64.whl", hash = "sha256:6378d55bbe8b722e7b39b6c0df1e46a1b767d2e4e8a7c1e60d9f8ec238bf48c4", size = 3599631, upload-time = "2026-03-01T05:52:03.595Z" }, - { url = "https://files.pythonhosted.org/packages/4e/cb/61667c710293d8007416130c9ad69f60a956393b52e82557c84ae8286aa7/primp-1.1.2-cp314-cp314t-win_arm64.whl", hash = "sha256:2431104658b86e7cf9bedbadabe6d2c4705c1c10b54f17ad0094cc927577adea", size = 3610624, upload-time = "2026-03-01T05:52:30.19Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/c4/0e/62ed44af95c66fd6fa8ad49c8bde815f64c7e976772d6979730be2b7cd97/primp-1.1.3.tar.gz", hash = "sha256:56adc3b8a5048cbd5f926b21fdff839195f3a9181512ca33f56ddc66f4c95897", size = 311356, upload-time = "2026-03-11T06:42:51.763Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ad/6b/36794b5758a0dd1251e67b6ab3ea946e53fa69745e0ecc29facc072ddf5b/primp-1.1.3-cp310-abi3-macosx_10_12_x86_64.whl", hash = "sha256:24383cfc267f620769be102b7fa4b64c7d47105f86bd21d047f1e07709e83c6e", size = 4000660, upload-time = "2026-03-11T06:42:58.092Z" }, + { url = "https://files.pythonhosted.org/packages/98/18/ebbe318a926d158c57f9e9cf49bbea70e8f0bd7f87e7675ed68e0d6ab433/primp-1.1.3-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:61bcb8c53b41e4bac43d04a1374b6ab7d8ded0f3517d32c5cdd5c30562756805", size = 3737318, upload-time = "2026-03-11T06:42:50.19Z" }, + { url = "https://files.pythonhosted.org/packages/a9/4c/430c9154284b53b771e6713a18dec4ad0159e4a501a20b222d67c730ced9/primp-1.1.3-cp310-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0c6b9388578ee9d903f30549a792c5f391fdeb9d36b508da2ffb8e13c764954", size = 3881005, upload-time = "2026-03-11T06:43:12.894Z" }, + { url = "https://files.pythonhosted.org/packages/93/34/2466ef66386a1b50e6aaf7832f9f603628407bb33342378faf4b38c4aee8/primp-1.1.3-cp310-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:09a8bfa870c92c81d76611846ec53b2520845e3ec5f4139f47604986bcf4bc25", size = 3514480, upload-time = "2026-03-11T06:43:06.058Z" }, + { url = "https://files.pythonhosted.org/packages/ff/42/ca7a71df6493dd6c1971c0cc3b20b8125e2547eb3bf88b4429715cb6ed81/primp-1.1.3-cp310-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac372cb9959fff690b255fad91c5b3bc948c14065da9fc00ad80d139651515af", size = 3734658, upload-time = "2026-03-11T06:43:47.486Z" }, + { url = "https://files.pythonhosted.org/packages/bc/7c/0fb34db619e9935e11140929713c2c7b5323c1e8ba75cad6f0aade51c89d/primp-1.1.3-cp310-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3034672a007f04e12b8fe7814c97ea172e8b9c5d45bd7b00cf6e7334fdd4222a", size = 4011898, upload-time = "2026-03-11T06:43:41.121Z" }, + { url = "https://files.pythonhosted.org/packages/da/8b/afd1bd8b14f38d58c5ebd0d45fc6b74914956907aa4e981bb2e5231626d3/primp-1.1.3-cp310-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a07d5b7d7278dc63452a59f3bf851dc4d1f8ddc2aada7844cbdb68002256e2f4", size = 3910728, upload-time = "2026-03-11T06:43:01.819Z" }, + { url = "https://files.pythonhosted.org/packages/32/9e/1ec3a9678efcbb51e50d7b4886d9195f956c9fd7f4efcff13ccb152248b0/primp-1.1.3-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08eec2f58abbcc1060032a2af81dabacec87a580a364a75862039f7422ac82e6", size = 4114189, upload-time = "2026-03-11T06:42:47.639Z" }, + { url = "https://files.pythonhosted.org/packages/28/d9/76de611027c0688be188d5a833be45b1e36d9c0c98baefab27bf6336ab9d/primp-1.1.3-cp310-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9716d4cd36db2c175443fe1bbd54045a944fc9c49d01a385af8ada1fe9c948df", size = 4061973, upload-time = "2026-03-11T06:43:37.301Z" }, + { url = "https://files.pythonhosted.org/packages/37/3b/a30a5ea366705d0ece265b12ad089793d644bd5730b18201e3a0a7fa7b5f/primp-1.1.3-cp310-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:e19daca65dc6df369c33e711fa481ad2afe5d26c5bde926c069b3ab067c4fd45", size = 3747920, upload-time = "2026-03-11T06:43:10.403Z" }, + { url = "https://files.pythonhosted.org/packages/df/46/e3c323221c371cdfe6c2ed971f7a70e3b69f30b561977715c55230bd5fda/primp-1.1.3-cp310-abi3-musllinux_1_2_i686.whl", hash = "sha256:ee357537712aa486364b0194cf403c5f9eaaa1354e23e9ac8322a22003f31e6b", size = 3861184, upload-time = "2026-03-11T06:43:49.391Z" }, + { url = "https://files.pythonhosted.org/packages/8a/7f/babaf00753daad7d80061003d7ae1bdfca64ea94c181cdea8d25c8a7226a/primp-1.1.3-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:06c53e77ebf6ac00633bc09e7e5a6d1a994592729d399ca8f065451a2574b92e", size = 4364610, upload-time = "2026-03-11T06:42:56.223Z" }, + { url = "https://files.pythonhosted.org/packages/03/48/c7bca8045c681f5f60972c180d2a20582c7a0857b3b07b12e0a0ee062ac4/primp-1.1.3-cp310-abi3-win32.whl", hash = "sha256:4b1ea3693c118bf04a6e05286f0a73637cf6fe5c9fd77fa1e29a01f190adf512", size = 3265160, upload-time = "2026-03-11T06:43:43.774Z" }, + { url = "https://files.pythonhosted.org/packages/45/3e/4a4b8a0f6f15734cded91e85439e68912b2bb8eafe7132420c13c2db8340/primp-1.1.3-cp310-abi3-win_amd64.whl", hash = "sha256:5ea386a4c8c4d8c1021d17182f4ee24dbb6f17c107c4e9ee5500b6372cf08f32", size = 3603953, upload-time = "2026-03-11T06:43:33.144Z" }, + { url = "https://files.pythonhosted.org/packages/70/46/1baf13a7f5fbed6052deb3e4822c69441a8d0fd990fe2a50e4cec802130b/primp-1.1.3-cp310-abi3-win_arm64.whl", hash = "sha256:63c7b1a1ccbcd07213f438375df186f807cdc5214bc2debb055737db9b5078de", size = 3619917, upload-time = "2026-03-11T06:42:44.76Z" }, + { url = "https://files.pythonhosted.org/packages/be/0c/a73cbe13f075e7ceaa5172b44ebc6f423713c6b4efe168114993a1710b26/primp-1.1.3-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:4b3d52f3233134584ef527e7e52f1b371a964ade1df0461f8187100e41d7fa84", size = 3987141, upload-time = "2026-03-11T06:43:24.904Z" }, + { url = "https://files.pythonhosted.org/packages/49/56/b70d7991fb1e07af53706b1f69f78a0b440a7b4b2a2999c44ab44afef1e7/primp-1.1.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b3d947e2c1d15147e8f4736d027b9f3bef518d67da859ead1c54e028ff491bbb", size = 3735665, upload-time = "2026-03-11T06:43:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/31/82/69efc663341c2bab55659ed221903a090e5c80255c2de2acc70f3726a3fc/primp-1.1.3-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ed2fee7d4758f6bb873b19a6759f54e0bc453213dad5ba7e52de7582921079", size = 3873695, upload-time = "2026-03-11T06:43:15.396Z" }, + { url = "https://files.pythonhosted.org/packages/07/7e/6b360742019ef8fb4ea036a420eb21b0a58d380ca09c68b075fc103cc043/primp-1.1.3-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5aa717f256af9e4391fb1c4dc946d99d04652b4c57dad20c3947e839ab26769", size = 3512644, upload-time = "2026-03-11T06:43:08.368Z" }, + { url = "https://files.pythonhosted.org/packages/03/46/51d2ada6d5b53b8496eddf2c80392deab13698987412d0234f88e72390c1/primp-1.1.3-cp314-cp314t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17f37fcacd97540f68b06f2b468b111ca7f2b142c48370db7344b522274fc0d6", size = 3733114, upload-time = "2026-03-11T06:43:22.838Z" }, + { url = "https://files.pythonhosted.org/packages/45/f5/5f5f5f4bef7e247ec3543e2fbdb670d8db8753a7693baf9c8b9fcf52cd43/primp-1.1.3-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5f010d0b8ba111dd9a66f814c2cd56332e047c98f45d7714ffbf2b1cec5b073", size = 4005664, upload-time = "2026-03-11T06:43:20.824Z" }, + { url = "https://files.pythonhosted.org/packages/f2/bf/99cf4a5f179b3f13b0c2ba4d3ae8f8af19f0084308e76cb79a0cee03c31b/primp-1.1.3-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e1e431915e4a7094d589213fc14e955243d93751031d889f4b359fa8ed54298", size = 3895746, upload-time = "2026-03-11T06:43:35.376Z" }, + { url = "https://files.pythonhosted.org/packages/c3/75/4c625e1cab37585365b0856ca44f31ad598e92a847d23561f454b7f36fca/primp-1.1.3-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaffa22dae2f193d899d9f68cca109ea5d16cdf4c901c20cec186de89e7d5db4", size = 4109815, upload-time = "2026-03-11T06:43:04.059Z" }, + { url = "https://files.pythonhosted.org/packages/49/72/6197ea78779d359f307be1acc64659896fc960ed91c0bdc6e6e698e423e6/primp-1.1.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f93bee50990884621ef482e8434e87f9fbb4eca6f4d47973c44c5d6393c35679", size = 4050839, upload-time = "2026-03-11T06:43:18.296Z" }, + { url = "https://files.pythonhosted.org/packages/a4/b2/cdd565b28bcf7ce555f4decdf89dafd16db8ed3ba8661890d3b9337abe45/primp-1.1.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:399dfb9ad01c3612c9e510a7034ac925af5524cade0961d8a019dedd90a46474", size = 3748397, upload-time = "2026-03-11T06:43:27.347Z" }, + { url = "https://files.pythonhosted.org/packages/62/6e/def3a90821b52589dbe1f57477c2c89bde7a5b26a7c166d7751930c06f98/primp-1.1.3-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:78ce595bbb9f339e83975efa9db2a81128842fad1a2fdafb78d72fcdc59590fc", size = 3861261, upload-time = "2026-03-11T06:43:39.292Z" }, + { url = "https://files.pythonhosted.org/packages/10/7d/3e610614d6a426502cfc6eccea21ef4557b39177d365df393c994945ca43/primp-1.1.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d709bdf520aa9401c0592b642730b3477c828629f01d2550977b77135b34e8d", size = 4358608, upload-time = "2026-03-11T06:43:45.606Z" }, + { url = "https://files.pythonhosted.org/packages/91/50/eb190cefe5eb05896825a5b3365d5650b9327161329cd1df4f7351b66ba9/primp-1.1.3-cp314-cp314t-win32.whl", hash = "sha256:6fe893eb87156dfb146dd666c7c8754670de82e38af0a27d82a47b7461ec2eea", size = 3259903, upload-time = "2026-03-11T06:42:59.922Z" }, + { url = "https://files.pythonhosted.org/packages/1f/a8/9e8534bc6d729a667f79b249fcdbf2230b0eb41214e277998cd6be900498/primp-1.1.3-cp314-cp314t-win_amd64.whl", hash = "sha256:ced76ef6669f31dc4af25e81e87914310645bcfc0892036bde084dafd6d00c3c", size = 3602569, upload-time = "2026-03-11T06:42:53.955Z" }, + { url = "https://files.pythonhosted.org/packages/9c/92/e18be996a01c7fd0e7dd7d198edefe42813cdfe1637bbbc80370ce656f62/primp-1.1.3-cp314-cp314t-win_arm64.whl", hash = "sha256:efadef0dfd10e733a254a949abf9ed05c668c28a68aa6513d811c0c6acd54cdb", size = 3611571, upload-time = "2026-03-11T06:43:31.249Z" }, ] [[package]] @@ -8469,7 +8470,7 @@ name = "pyobjc-framework-cocoa" version = "12.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pyobjc-core", marker = "python_full_version >= '3.13' or sys_platform == 'darwin'" }, + { name = "pyobjc-core", marker = "(python_full_version >= '3.13' and sys_platform != 'win32') or sys_platform == 'darwin'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/02/a3/16ca9a15e77c061a9250afbae2eae26f2e1579eb8ca9462ae2d2c71e1169/pyobjc_framework_cocoa-12.1.tar.gz", hash = "sha256:5556c87db95711b985d5efdaaf01c917ddd41d148b1e52a0c66b1a2e2c5c1640", size = 2772191, upload-time = "2025-11-14T10:13:02.069Z" } wheels = [ @@ -8487,8 +8488,8 @@ name = "pyobjc-framework-quartz" version = "12.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pyobjc-core", marker = "python_full_version >= '3.13' or sys_platform == 'darwin'" }, - { name = "pyobjc-framework-cocoa", marker = "python_full_version >= '3.13' or sys_platform == 'darwin'" }, + { name = "pyobjc-core", marker = "(python_full_version >= '3.13' and sys_platform != 'win32') or sys_platform == 'darwin'" }, + { name = "pyobjc-framework-cocoa", marker = "(python_full_version >= '3.13' and sys_platform != 'win32') or sys_platform == 'darwin'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/94/18/cc59f3d4355c9456fc945eae7fe8797003c4da99212dd531ad1b0de8a0c6/pyobjc_framework_quartz-12.1.tar.gz", hash = "sha256:27f782f3513ac88ec9b6c82d9767eef95a5cf4175ce88a1e5a65875fee799608", size = 3159099, upload-time = "2025-11-14T10:21:24.31Z" } wheels = [