Skip to content

Commit e9f9db5

Browse files
fix(langchain): Set agent name as gen_ai.agent.name for chat and tool spans (#5877)
Set `lc_agent_name` as the `gen_ai.agent.name` attribute in `on_chat_model_start()` and in `on_tool_start()`. Remove propagation of the `run_name` LangChain attribute that was previously used to set the agent name in the hooks.
1 parent e882029 commit e9f9db5

File tree

2 files changed

+54
-92
lines changed

2 files changed

+54
-92
lines changed

sentry_sdk/integrations/langchain.py

Lines changed: 48 additions & 92 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import contextvars
21
import itertools
32
import sys
43
import json
@@ -162,44 +161,6 @@ def _transform_langchain_message_content(content: "Any") -> "Any":
162161
return content
163162

164163

165-
# Contextvar to track agent names in a stack for re-entrant agent support
166-
_agent_stack: "contextvars.ContextVar[Optional[List[Optional[str]]]]" = (
167-
contextvars.ContextVar("langchain_agent_stack", default=None)
168-
)
169-
170-
171-
def _push_agent(agent_name: "Optional[str]") -> None:
172-
"""Push an agent name onto the stack."""
173-
stack = _agent_stack.get()
174-
if stack is None:
175-
stack = []
176-
else:
177-
# Copy the list to maintain contextvar isolation across async contexts
178-
stack = stack.copy()
179-
stack.append(agent_name)
180-
_agent_stack.set(stack)
181-
182-
183-
def _pop_agent() -> "Optional[str]":
184-
"""Pop an agent name from the stack and return it."""
185-
stack = _agent_stack.get()
186-
if stack:
187-
# Copy the list to maintain contextvar isolation across async contexts
188-
stack = stack.copy()
189-
agent_name = stack.pop()
190-
_agent_stack.set(stack)
191-
return agent_name
192-
return None
193-
194-
195-
def _get_current_agent() -> "Optional[str]":
196-
"""Get the current agent name (top of stack) without removing it."""
197-
stack = _agent_stack.get()
198-
if stack:
199-
return stack[-1]
200-
return None
201-
202-
203164
def _get_system_instructions(messages: "List[List[BaseMessage]]") -> "List[str]":
204165
system_instructions = []
205166

@@ -465,9 +426,11 @@ def on_chat_model_start(
465426
if ai_system:
466427
span.set_data(SPANDATA.GEN_AI_SYSTEM, ai_system)
467428

468-
agent_name = _get_current_agent()
469-
if agent_name:
470-
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
429+
agent_metadata = kwargs.get("metadata")
430+
if isinstance(agent_metadata, dict) and "lc_agent_name" in agent_metadata:
431+
span.set_data(
432+
SPANDATA.GEN_AI_AGENT_NAME, agent_metadata["lc_agent_name"]
433+
)
471434

472435
for key, attribute in DATA_FIELDS.items():
473436
if key in all_params and all_params[key] is not None:
@@ -665,9 +628,11 @@ def on_tool_start(
665628
if tool_description is not None:
666629
span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_description)
667630

668-
agent_name = _get_current_agent()
669-
if agent_name:
670-
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
631+
agent_metadata = kwargs.get("metadata")
632+
if isinstance(agent_metadata, dict) and "lc_agent_name" in agent_metadata:
633+
span.set_data(
634+
SPANDATA.GEN_AI_AGENT_NAME, agent_metadata["lc_agent_name"]
635+
)
671636

672637
if should_send_default_pii() and self.include_prompts:
673638
set_data_normalized(
@@ -987,58 +952,53 @@ def new_invoke(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
987952
if integration is None:
988953
return f(self, *args, **kwargs)
989954

990-
agent_name, tools = _get_request_data(self, args, kwargs)
955+
run_name, tools = _get_request_data(self, args, kwargs)
991956
start_span_function = get_start_span_function()
992957

993958
with start_span_function(
994959
op=OP.GEN_AI_INVOKE_AGENT,
995-
name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent",
960+
name=f"invoke_agent {run_name}" if run_name else "invoke_agent",
996961
origin=LangchainIntegration.origin,
997962
) as span:
998-
_push_agent(agent_name)
999-
try:
1000-
if agent_name:
1001-
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
963+
if run_name:
964+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, run_name)
1002965

1003-
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
1004-
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False)
966+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
967+
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False)
1005968

1006-
_set_tools_on_span(span, tools)
969+
_set_tools_on_span(span, tools)
1007970

1008-
# Run the agent
1009-
result = f(self, *args, **kwargs)
971+
# Run the agent
972+
result = f(self, *args, **kwargs)
1010973

1011-
input = result.get("input")
1012-
if (
1013-
input is not None
1014-
and should_send_default_pii()
1015-
and integration.include_prompts
1016-
):
1017-
normalized_messages = normalize_message_roles([input])
1018-
scope = sentry_sdk.get_current_scope()
1019-
messages_data = truncate_and_annotate_messages(
1020-
normalized_messages, span, scope
974+
input = result.get("input")
975+
if (
976+
input is not None
977+
and should_send_default_pii()
978+
and integration.include_prompts
979+
):
980+
normalized_messages = normalize_message_roles([input])
981+
scope = sentry_sdk.get_current_scope()
982+
messages_data = truncate_and_annotate_messages(
983+
normalized_messages, span, scope
984+
)
985+
if messages_data is not None:
986+
set_data_normalized(
987+
span,
988+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
989+
messages_data,
990+
unpack=False,
1021991
)
1022-
if messages_data is not None:
1023-
set_data_normalized(
1024-
span,
1025-
SPANDATA.GEN_AI_REQUEST_MESSAGES,
1026-
messages_data,
1027-
unpack=False,
1028-
)
1029992

1030-
output = result.get("output")
1031-
if (
1032-
output is not None
1033-
and should_send_default_pii()
1034-
and integration.include_prompts
1035-
):
1036-
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
993+
output = result.get("output")
994+
if (
995+
output is not None
996+
and should_send_default_pii()
997+
and integration.include_prompts
998+
):
999+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
10371000

1038-
return result
1039-
finally:
1040-
# Ensure agent is popped even if an exception occurs
1041-
_pop_agent()
1001+
return result
10421002

10431003
return new_invoke
10441004

@@ -1050,20 +1010,18 @@ def new_stream(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
10501010
if integration is None:
10511011
return f(self, *args, **kwargs)
10521012

1053-
agent_name, tools = _get_request_data(self, args, kwargs)
1013+
run_name, tools = _get_request_data(self, args, kwargs)
10541014
start_span_function = get_start_span_function()
10551015

10561016
span = start_span_function(
10571017
op=OP.GEN_AI_INVOKE_AGENT,
1058-
name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent",
1018+
name=f"invoke_agent {run_name}" if run_name else "invoke_agent",
10591019
origin=LangchainIntegration.origin,
10601020
)
10611021
span.__enter__()
10621022

1063-
_push_agent(agent_name)
1064-
1065-
if agent_name:
1066-
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
1023+
if run_name:
1024+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, run_name)
10671025

10681026
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
10691027
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
@@ -1117,7 +1075,6 @@ def new_iterator() -> "Iterator[Any]":
11171075
raise
11181076
finally:
11191077
# Ensure cleanup happens even if iterator is abandoned or fails
1120-
_pop_agent()
11211078
span.__exit__(*exc_info)
11221079

11231080
async def new_iterator_async() -> "AsyncIterator[Any]":
@@ -1143,7 +1100,6 @@ async def new_iterator_async() -> "AsyncIterator[Any]":
11431100
raise
11441101
finally:
11451102
# Ensure cleanup happens even if iterator is abandoned or fails
1146-
_pop_agent()
11471103
span.__exit__(*exc_info)
11481104

11491105
if str(type(result)) == "<class 'async_generator'>":

tests/integrations/langchain/test_langchain.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -259,6 +259,8 @@ def test_langchain_create_agent(
259259
assert chat_spans[0]["origin"] == "auto.ai.langchain"
260260

261261
assert chat_spans[0]["data"]["gen_ai.system"] == "openai-chat"
262+
assert chat_spans[0]["data"]["gen_ai.agent.name"] == "word_length_agent"
263+
262264
assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 10
263265
assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 20
264266
assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 30
@@ -415,6 +417,10 @@ def test_tool_execution_span(
415417
assert chat_spans[1]["origin"] == "auto.ai.langchain"
416418
assert tool_exec_span["origin"] == "auto.ai.langchain"
417419

420+
assert chat_spans[0]["data"]["gen_ai.agent.name"] == "word_length_agent"
421+
assert chat_spans[1]["data"]["gen_ai.agent.name"] == "word_length_agent"
422+
assert tool_exec_span["data"]["gen_ai.agent.name"] == "word_length_agent"
423+
418424
assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 142
419425
assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 50
420426
assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 192

0 commit comments

Comments
 (0)