Skip to content

Commit 4b6c63e

Browse files
test(langchain): Replace mocks with httpx types (#5724)
Replace mocks with `httpx` types to avoid test failures when library internals change.
1 parent 62d2a98 commit 4b6c63e

File tree

2 files changed

+192
-83
lines changed

2 files changed

+192
-83
lines changed

tests/conftest.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1018,10 +1018,14 @@ async def inner(values):
10181018

10191019
@pytest.fixture
10201020
def server_side_event_chunks():
1021-
def inner(events):
1021+
def inner(events, include_event_type=True):
10221022
for event in events:
10231023
payload = event.model_dump()
1024-
chunk = f"event: {payload['type']}\ndata: {json.dumps(payload)}\n\n"
1024+
chunk = (
1025+
f"event: {payload['type']}\ndata: {json.dumps(payload)}\n\n"
1026+
if include_event_type
1027+
else f"data: {json.dumps(payload)}\n\n"
1028+
)
10251029
yield chunk.encode("utf-8")
10261030

10271031
return inner

tests/integrations/langchain/test_langchain.py

Lines changed: 186 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,19 @@
4040
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
4141

4242

43+
from openai.types.chat.chat_completion_chunk import (
44+
ChatCompletionChunk,
45+
Choice,
46+
ChoiceDelta,
47+
ChoiceDeltaToolCall,
48+
ChoiceDeltaToolCallFunction,
49+
)
50+
51+
from openai.types.completion_usage import (
52+
CompletionUsage,
53+
)
54+
55+
4356
@tool
4457
def get_word_length(word: str) -> int:
4558
"""Returns the length of a word."""
@@ -67,12 +80,12 @@ def _llm_type(self) -> str:
6780

6881

6982
@pytest.mark.parametrize(
70-
"send_default_pii, include_prompts, use_unknown_llm_type",
83+
"send_default_pii, include_prompts",
7184
[
72-
(True, True, False),
73-
(True, False, False),
74-
(False, True, False),
75-
(False, False, True),
85+
(True, True),
86+
(True, False),
87+
(False, True),
88+
(False, False),
7689
],
7790
)
7891
@pytest.mark.parametrize(
@@ -92,13 +105,11 @@ def test_langchain_agent(
92105
capture_events,
93106
send_default_pii,
94107
include_prompts,
95-
use_unknown_llm_type,
96108
system_instructions_content,
97109
request,
110+
get_model_response,
111+
server_side_event_chunks,
98112
):
99-
global llm_type
100-
llm_type = "acme-llm" if use_unknown_llm_type else "openai-chat"
101-
102113
sentry_init(
103114
integrations=[
104115
LangchainIntegration(
@@ -120,87 +131,173 @@ def test_langchain_agent(
120131
MessagesPlaceholder(variable_name="agent_scratchpad"),
121132
]
122133
)
123-
global stream_result_mock
124-
stream_result_mock = Mock(
125-
side_effect=[
134+
135+
tool_response = get_model_response(
136+
server_side_event_chunks(
126137
[
127-
ChatGenerationChunk(
128-
type="ChatGenerationChunk",
129-
message=AIMessageChunk(
130-
content="",
131-
additional_kwargs={
132-
"tool_calls": [
133-
{
134-
"index": 0,
135-
"id": "call_BbeyNhCKa6kYLYzrD40NGm3b",
136-
"function": {
137-
"arguments": "",
138-
"name": "get_word_length",
139-
},
140-
"type": "function",
141-
}
142-
]
143-
},
144-
),
138+
ChatCompletionChunk(
139+
id="chatcmpl-turn-1",
140+
object="chat.completion.chunk",
141+
created=10000000,
142+
model="gpt-3.5-turbo",
143+
choices=[
144+
Choice(
145+
index=0,
146+
delta=ChoiceDelta(role="assistant"),
147+
finish_reason=None,
148+
),
149+
],
145150
),
146-
ChatGenerationChunk(
147-
type="ChatGenerationChunk",
148-
message=AIMessageChunk(
149-
content="",
150-
additional_kwargs={
151-
"tool_calls": [
152-
{
153-
"index": 0,
154-
"id": None,
155-
"function": {
156-
"arguments": '{"word": "eudca"}',
157-
"name": None,
158-
},
159-
"type": None,
160-
}
161-
]
162-
},
163-
),
151+
ChatCompletionChunk(
152+
id="chatcmpl-turn-1",
153+
object="chat.completion.chunk",
154+
created=10000000,
155+
model="gpt-3.5-turbo",
156+
choices=[
157+
Choice(
158+
index=0,
159+
delta=ChoiceDelta(
160+
tool_calls=[
161+
ChoiceDeltaToolCall(
162+
index=0,
163+
id="call_BbeyNhCKa6kYLYzrD40NGm3b",
164+
type="function",
165+
function=ChoiceDeltaToolCallFunction(
166+
name="get_word_length",
167+
arguments="",
168+
),
169+
),
170+
],
171+
),
172+
finish_reason=None,
173+
),
174+
],
164175
),
165-
ChatGenerationChunk(
166-
type="ChatGenerationChunk",
167-
message=AIMessageChunk(
168-
content="5",
169-
usage_metadata={
170-
"input_tokens": 142,
171-
"output_tokens": 50,
172-
"total_tokens": 192,
173-
"input_token_details": {"audio": 0, "cache_read": 0},
174-
"output_token_details": {"audio": 0, "reasoning": 0},
175-
},
176+
ChatCompletionChunk(
177+
id="chatcmpl-turn-1",
178+
object="chat.completion.chunk",
179+
created=10000000,
180+
model="gpt-3.5-turbo",
181+
choices=[
182+
Choice(
183+
index=0,
184+
delta=ChoiceDelta(
185+
tool_calls=[
186+
ChoiceDeltaToolCall(
187+
index=0,
188+
function=ChoiceDeltaToolCallFunction(
189+
arguments='{"word": "eudca"}',
190+
),
191+
),
192+
],
193+
),
194+
finish_reason=None,
195+
),
196+
],
197+
),
198+
ChatCompletionChunk(
199+
id="chatcmpl-turn-1",
200+
object="chat.completion.chunk",
201+
created=10000000,
202+
model="gpt-3.5-turbo",
203+
choices=[
204+
Choice(
205+
index=0,
206+
delta=ChoiceDelta(content="5"),
207+
finish_reason=None,
208+
),
209+
],
210+
),
211+
ChatCompletionChunk(
212+
id="chatcmpl-turn-1",
213+
object="chat.completion.chunk",
214+
created=10000000,
215+
model="gpt-3.5-turbo",
216+
choices=[
217+
Choice(
218+
index=0,
219+
delta=ChoiceDelta(),
220+
finish_reason="function_call",
221+
),
222+
],
223+
),
224+
ChatCompletionChunk(
225+
id="chatcmpl-turn-1",
226+
object="chat.completion.chunk",
227+
created=10000000,
228+
model="gpt-3.5-turbo",
229+
choices=[],
230+
usage=CompletionUsage(
231+
prompt_tokens=142,
232+
completion_tokens=50,
233+
total_tokens=192,
176234
),
177-
generation_info={"finish_reason": "function_call"},
178235
),
179236
],
237+
include_event_type=False,
238+
)
239+
)
240+
241+
final_response = get_model_response(
242+
server_side_event_chunks(
180243
[
181-
ChatGenerationChunk(
182-
text="The word eudca has 5 letters.",
183-
type="ChatGenerationChunk",
184-
message=AIMessageChunk(
185-
content="The word eudca has 5 letters.",
186-
usage_metadata={
187-
"input_tokens": 89,
188-
"output_tokens": 28,
189-
"total_tokens": 117,
190-
"input_token_details": {"audio": 0, "cache_read": 0},
191-
"output_token_details": {"audio": 0, "reasoning": 0},
192-
},
193-
),
244+
ChatCompletionChunk(
245+
id="chatcmpl-turn-2",
246+
object="chat.completion.chunk",
247+
created=10000000,
248+
model="gpt-3.5-turbo",
249+
choices=[
250+
Choice(
251+
index=0,
252+
delta=ChoiceDelta(role="assistant"),
253+
finish_reason=None,
254+
),
255+
],
194256
),
195-
ChatGenerationChunk(
196-
type="ChatGenerationChunk",
197-
generation_info={"finish_reason": "stop"},
198-
message=AIMessageChunk(content=""),
257+
ChatCompletionChunk(
258+
id="chatcmpl-turn-2",
259+
object="chat.completion.chunk",
260+
created=10000000,
261+
model="gpt-3.5-turbo",
262+
choices=[
263+
Choice(
264+
index=0,
265+
delta=ChoiceDelta(content="The word eudca has 5 letters."),
266+
finish_reason=None,
267+
),
268+
],
269+
),
270+
ChatCompletionChunk(
271+
id="chatcmpl-turn-2",
272+
object="chat.completion.chunk",
273+
created=10000000,
274+
model="gpt-3.5-turbo",
275+
choices=[
276+
Choice(
277+
index=0,
278+
delta=ChoiceDelta(),
279+
finish_reason="stop",
280+
),
281+
],
282+
),
283+
ChatCompletionChunk(
284+
id="chatcmpl-turn-2",
285+
object="chat.completion.chunk",
286+
created=10000000,
287+
model="gpt-3.5-turbo",
288+
choices=[],
289+
usage=CompletionUsage(
290+
prompt_tokens=89,
291+
completion_tokens=28,
292+
total_tokens=117,
293+
),
199294
),
200295
],
201-
]
296+
include_event_type=False,
297+
)
202298
)
203-
llm = MockOpenAI(
299+
300+
llm = ChatOpenAI(
204301
model_name="gpt-3.5-turbo",
205302
temperature=0,
206303
openai_api_key="badkey",
@@ -209,8 +306,13 @@ def test_langchain_agent(
209306

210307
agent_executor = AgentExecutor(agent=agent, tools=[get_word_length], verbose=True)
211308

212-
with start_transaction():
213-
list(agent_executor.stream({"input": "How many letters in the word eudca"}))
309+
with patch.object(
310+
llm.client._client._client,
311+
"send",
312+
side_effect=[tool_response, final_response],
313+
) as _:
314+
with start_transaction():
315+
list(agent_executor.stream({"input": "How many letters in the word eudca"}))
214316

215317
tx = events[0]
216318
assert tx["type"] == "transaction"
@@ -321,6 +423,9 @@ def test_langchain_agent(
321423

322424

323425
def test_langchain_error(sentry_init, capture_events):
426+
global llm_type
427+
llm_type = "acme-llm"
428+
324429
sentry_init(
325430
integrations=[LangchainIntegration(include_prompts=True)],
326431
traces_sample_rate=1.0,

0 commit comments

Comments
 (0)