Skip to content

Commit 92df109

Browse files
authored
chore(langchain): add end to end test for strict mode in provider strategy (#34289)
1 parent d27fb0c commit 92df109

File tree

3 files changed

+52
-1
lines changed

3 files changed

+52
-1
lines changed
Binary file not shown.
3.85 KB
Binary file not shown.

libs/langchain_v1/tests/unit_tests/agents/test_response_format_integration.py

Lines changed: 52 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,15 @@ def bytes_encoder(obj):
4444
"""
4545

4646
import os
47+
from typing import Any
48+
from unittest.mock import patch
4749

4850
import pytest
4951
from langchain_core.messages import HumanMessage
5052
from pydantic import BaseModel, Field
5153

5254
from langchain.agents import create_agent
53-
from langchain.agents.structured_output import ToolStrategy
55+
from langchain.agents.structured_output import ProviderStrategy, ToolStrategy
5456

5557

5658
class WeatherBaseModel(BaseModel):
@@ -140,3 +142,52 @@ def test_inference_to_tool_output(use_responses_api: bool) -> None:
140142
"ai", # structured response
141143
"tool", # artificial tool message
142144
]
145+
146+
147+
@pytest.mark.requires("langchain_openai")
148+
@pytest.mark.vcr
149+
@pytest.mark.parametrize("use_responses_api", [False, True])
150+
def test_strict_mode(use_responses_api: bool) -> None:
151+
from langchain_openai import ChatOpenAI
152+
153+
model_kwargs = {"model": "gpt-5", "use_responses_api": use_responses_api}
154+
155+
if "OPENAI_API_KEY" not in os.environ:
156+
model_kwargs["api_key"] = "foo"
157+
158+
model = ChatOpenAI(**model_kwargs)
159+
160+
# spy on _get_request_payload to check that `strict` is enabled
161+
original_method = model._get_request_payload
162+
payloads = []
163+
164+
def capture_payload(*args: Any, **kwargs: Any) -> dict[str, Any]:
165+
result = original_method(*args, **kwargs)
166+
payloads.append(result)
167+
return result
168+
169+
with patch.object(model, "_get_request_payload", side_effect=capture_payload):
170+
agent = create_agent(
171+
model,
172+
tools=[get_weather],
173+
response_format=ProviderStrategy(WeatherBaseModel, strict=True),
174+
)
175+
response = agent.invoke({"messages": [HumanMessage("What's the weather in Boston?")]})
176+
177+
assert len(payloads) == 2
178+
if use_responses_api:
179+
assert payloads[-1]["text"]["format"]["strict"]
180+
else:
181+
assert payloads[-1]["response_format"]["json_schema"]["strict"]
182+
183+
assert isinstance(response["structured_response"], WeatherBaseModel)
184+
assert response["structured_response"].temperature == 75.0
185+
assert response["structured_response"].condition.lower() == "sunny"
186+
assert len(response["messages"]) == 4
187+
188+
assert [m.type for m in response["messages"]] == [
189+
"human", # "What's the weather?"
190+
"ai", # "What's the weather?"
191+
"tool", # "The weather is sunny and 75°F."
192+
"ai", # structured response
193+
]

0 commit comments

Comments
 (0)