Skip to content

Commit 3559ee9

Browse files
feat(langchain): Record run_name in on_chat_model_start (#5924)
Set the `run_name` LangChain attribute as the `gen_ai.function_id` attribute in `on_chat_model_start`.
1 parent e9f9db5 commit 3559ee9

File tree

6 files changed

+424
-74
lines changed

6 files changed

+424
-74
lines changed

sentry_sdk/consts.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -549,6 +549,12 @@ class SPANDATA:
549549
Example: "Hello!"
550550
"""
551551

552+
GEN_AI_FUNCTION_ID = "gen_ai.function_id"
553+
"""
554+
Framework-specific tracing label for the execution of a function or other unit of execution in a generative AI system.
555+
Example: "my-awesome-function"
556+
"""
557+
552558
GEN_AI_OPERATION_NAME = "gen_ai.operation.name"
553559
"""
554560
The name of the operation being performed.

sentry_sdk/integrations/langchain.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -432,6 +432,13 @@ def on_chat_model_start(
432432
SPANDATA.GEN_AI_AGENT_NAME, agent_metadata["lc_agent_name"]
433433
)
434434

435+
run_name = kwargs.get("name")
436+
if run_name:
437+
span.set_data(
438+
SPANDATA.GEN_AI_FUNCTION_ID,
439+
run_name,
440+
)
441+
435442
for key, attribute in DATA_FIELDS.items():
436443
if key in all_params and all_params[key] is not None:
437444
set_data_normalized(span, attribute, all_params[key], unpack=False)

tests/conftest.py

Lines changed: 25 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1265,26 +1265,31 @@ def streaming_chat_completions_model_response():
12651265

12661266
@pytest.fixture
12671267
def nonstreaming_chat_completions_model_response():
1268-
return openai.types.chat.ChatCompletion(
1269-
id="chatcmpl-test",
1270-
choices=[
1271-
openai.types.chat.chat_completion.Choice(
1272-
index=0,
1273-
finish_reason="stop",
1274-
message=openai.types.chat.ChatCompletionMessage(
1275-
role="assistant", content="Test response"
1276-
),
1277-
)
1278-
],
1279-
created=1234567890,
1280-
model="gpt-3.5-turbo",
1281-
object="chat.completion",
1282-
usage=openai.types.CompletionUsage(
1283-
prompt_tokens=10,
1284-
completion_tokens=20,
1285-
total_tokens=30,
1286-
),
1287-
)
1268+
def inner(
1269+
response_id: str,
1270+
response_model: str,
1271+
message_content: str,
1272+
created: int,
1273+
usage: openai.types.CompletionUsage,
1274+
):
1275+
return openai.types.chat.ChatCompletion(
1276+
id=response_id,
1277+
choices=[
1278+
openai.types.chat.chat_completion.Choice(
1279+
index=0,
1280+
finish_reason="stop",
1281+
message=openai.types.chat.ChatCompletionMessage(
1282+
role="assistant", content=message_content
1283+
),
1284+
)
1285+
],
1286+
created=created,
1287+
model=response_model,
1288+
object="chat.completion",
1289+
usage=usage,
1290+
)
1291+
1292+
return inner
12881293

12891294

12901295
@pytest.fixture

tests/integrations/langchain/test_langchain.py

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@
6767
)
6868

6969
LANGCHAIN_VERSION = package_version("langchain")
70+
LANGCHAIN_OPENAI_VERSION = package_version("langchain-openai")
7071

7172

7273
@tool
@@ -170,6 +171,68 @@ def test_langchain_text_completion(
170171
assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15
171172

172173

174+
def test_langchain_chat_with_run_name(
175+
sentry_init,
176+
capture_events,
177+
get_model_response,
178+
nonstreaming_chat_completions_model_response,
179+
):
180+
sentry_init(
181+
integrations=[
182+
LangchainIntegration(
183+
include_prompts=True,
184+
)
185+
],
186+
traces_sample_rate=1.0,
187+
send_default_pii=True,
188+
)
189+
events = capture_events()
190+
191+
request_headers = {}
192+
# Changed in https://github.com/langchain-ai/langchain/pull/32655
193+
if LANGCHAIN_OPENAI_VERSION >= (0, 3, 32):
194+
request_headers["X-Stainless-Raw-Response"] = "True"
195+
196+
model_response = get_model_response(
197+
nonstreaming_chat_completions_model_response(
198+
response_id="chat-id",
199+
response_model="response-model-id",
200+
message_content="the model response",
201+
created=10000000,
202+
usage=CompletionUsage(
203+
prompt_tokens=20,
204+
completion_tokens=10,
205+
total_tokens=30,
206+
),
207+
),
208+
serialize_pydantic=True,
209+
request_headers=request_headers,
210+
)
211+
212+
llm = ChatOpenAI(
213+
model_name="gpt-3.5-turbo",
214+
temperature=0,
215+
openai_api_key="badkey",
216+
)
217+
218+
with patch.object(
219+
llm.client._client._client,
220+
"send",
221+
return_value=model_response,
222+
) as _:
223+
with start_transaction():
224+
llm.invoke(
225+
"How many letters in the word eudca",
226+
config={"run_name": "my-snazzy-pipeline"},
227+
)
228+
229+
tx = events[0]
230+
231+
chat_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.chat")
232+
assert len(chat_spans) == 1
233+
assert chat_spans[0]["data"][SPANDATA.GEN_AI_FUNCTION_ID] == "my-snazzy-pipeline"
234+
235+
173236
@pytest.mark.skipif(
174237
LANGCHAIN_VERSION < (1,),
175238
reason="LangChain 1.0+ required (ONE AGENT refactor)",

0 commit comments

Comments
 (0)