Skip to content

Commit 6e46824

Browse files
committed
test_streaming_response
1 parent 515ae96 commit 6e46824

File tree

3 files changed

+19
-8
lines changed

3 files changed

+19
-8
lines changed

tests/llm_translation/test_openai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -337,7 +337,7 @@ def test_openai_max_retries_0(mock_get_openai_client):
337337
assert mock_get_openai_client.call_args.kwargs["max_retries"] == 0
338338

339339

340-
@pytest.mark.parametrize("model", ["o1", "o1-mini", "o3-mini"])
340+
@pytest.mark.parametrize("model", ["o1", "o3-mini"])
341341
def test_o1_parallel_tool_calls(model):
342342
litellm.completion(
343343
model=model,

tests/llm_translation/test_openai_o1.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ def test_o3_reasoning_effort():
210210
assert resp.choices[0].message.content is not None
211211

212212

213-
@pytest.mark.parametrize("model", ["o1-mini", "o1", "o3-mini"])
213+
@pytest.mark.parametrize("model", ["o1", "o3-mini"])
214214
def test_streaming_response(model):
215215
"""Test that streaming response is returned correctly"""
216216
from litellm import completion

tests/local_testing/test_openai_moderations_hook.py

Lines changed: 17 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,13 +28,15 @@
2828

2929

3030
@pytest.mark.asyncio
31-
async def test_openai_moderation_error_raising():
31+
async def test_openai_moderation_error_raising(monkeypatch):
3232
"""
3333
Tests to see OpenAI Moderation raises an error for a flagged response
3434
"""
35-
35+
from unittest.mock import AsyncMock, MagicMock
36+
from litellm.types.llms.openai import OpenAIModerationResponse
37+
38+
litellm.openai_moderations_model_name = "text-moderation-latest"
3639
openai_mod = _ENTERPRISE_OpenAI_Moderation()
37-
litellm.openai_moderations_model_name = "omni-moderation-latest"
3840
_api_key = "sk-12345"
3941
_api_key = hash_token("sk-12345")
4042
user_api_key_dict = UserAPIKeyAuth(api_key=_api_key)
@@ -45,15 +47,24 @@ async def test_openai_moderation_error_raising():
4547
llm_router = litellm.Router(
4648
model_list=[
4749
{
48-
"model_name": "omni-moderation-latest",
50+
"model_name": "text-moderation-latest",
4951
"litellm_params": {
50-
"model": "omni-moderation-latest",
51-
"api_key": os.environ["OPENAI_API_KEY"],
52+
"model": "text-moderation-latest",
53+
"api_key": os.environ.get("OPENAI_API_KEY", "fake-key"),
5254
},
5355
}
5456
]
5557
)
5658

59+
# Mock the amoderation call to return a flagged response
60+
mock_response = MagicMock(spec=OpenAIModerationResponse)
61+
mock_response.results = [MagicMock(flagged=True)]
62+
63+
async def mock_amoderation(*args, **kwargs):
64+
return mock_response
65+
66+
llm_router.amoderation = mock_amoderation
67+
5768
setattr(litellm.proxy.proxy_server, "llm_router", llm_router)
5869

5970
try:

0 commit comments

Comments
 (0)