Skip to content

Commit b6f42ac

Browse files
committed
Backwards compatible init
1 parent e86555a commit b6f42ac

File tree

2 files changed

+67
-0
lines changed

2 files changed

+67
-0
lines changed

src/guardrails/_base_client.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,41 @@ class GuardrailsResponse:
107107
_llm_response: OpenAIResponseType # Private: OpenAI response object
108108
guardrail_results: GuardrailResults
109109

110+
def __init__(
111+
self,
112+
guardrail_results: GuardrailResults,
113+
_llm_response: OpenAIResponseType | None = None,
114+
llm_response: OpenAIResponseType | None = None,
115+
) -> None:
116+
"""Initialize GuardrailsResponse with backward-compatible parameter names.
117+
118+
Accepts both _llm_response (new) and llm_response (deprecated) parameter names
119+
to maintain backward compatibility with existing code.
120+
121+
Args:
122+
guardrail_results: The guardrail results.
123+
_llm_response: The underlying OpenAI response (preferred parameter name).
124+
llm_response: The underlying OpenAI response (deprecated parameter name).
125+
126+
Raises:
127+
TypeError: If neither or both llm_response parameters are provided.
128+
"""
129+
# Handle backward compatibility: accept both parameter names
130+
if _llm_response is not None and llm_response is not None:
131+
msg = "Cannot specify both 'llm_response' and '_llm_response'"
132+
raise TypeError(msg)
133+
134+
if _llm_response is None and llm_response is None:
135+
msg = "Must specify either 'llm_response' or '_llm_response'"
136+
raise TypeError(msg)
137+
138+
# Use whichever was provided
139+
response_obj = _llm_response if _llm_response is not None else llm_response
140+
141+
# Set fields on frozen dataclass using object.__setattr__
142+
object.__setattr__(self, "_llm_response", response_obj)
143+
object.__setattr__(self, "guardrail_results", guardrail_results)
144+
110145
@property
111146
def llm_response(self) -> OpenAIResponseType:
112147
"""Access the underlying OpenAI response (deprecated).

tests/unit/test_response_flattening.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -356,6 +356,38 @@ def test_separate_instances_warn_independently() -> None:
356356
assert len(deprecation_warnings) == 2 # noqa: S101
357357

358358

359+
def test_init_backward_compatibility_with_llm_response_param() -> None:
360+
"""Test that __init__ accepts both llm_response and _llm_response parameters."""
361+
mock_llm_response = _create_mock_chat_completion()
362+
guardrail_results = _create_mock_guardrail_results()
363+
364+
# Old parameter name should still work (backward compatibility)
365+
response_old = GuardrailsResponse(
366+
llm_response=mock_llm_response,
367+
guardrail_results=guardrail_results,
368+
)
369+
assert response_old.id == "chatcmpl-123" # noqa: S101
370+
371+
# New parameter name should work
372+
response_new = GuardrailsResponse(
373+
_llm_response=mock_llm_response,
374+
guardrail_results=guardrail_results,
375+
)
376+
assert response_new.id == "chatcmpl-123" # noqa: S101
377+
378+
# Both should raise TypeError
379+
with pytest.raises(TypeError, match="Cannot specify both"):
380+
GuardrailsResponse(
381+
llm_response=mock_llm_response,
382+
_llm_response=mock_llm_response,
383+
guardrail_results=guardrail_results,
384+
)
385+
386+
# Neither should raise TypeError
387+
with pytest.raises(TypeError, match="Must specify either"):
388+
GuardrailsResponse(guardrail_results=guardrail_results)
389+
390+
359391
def test_dir_includes_delegated_attributes() -> None:
360392
"""Test that dir() includes attributes from the underlying llm_response."""
361393
mock_llm_response = _create_mock_chat_completion()

0 commit comments

Comments
 (0)