2828
2929
3030@pytest .mark .asyncio
31- async def test_openai_moderation_error_raising ():
31+ async def test_openai_moderation_error_raising (monkeypatch ):
3232 """
3333 Tests to see OpenAI Moderation raises an error for a flagged response
3434 """
35-
35+ from unittest .mock import AsyncMock , MagicMock
36+ from litellm .types .llms .openai import OpenAIModerationResponse
37+
38+ litellm .openai_moderations_model_name = "text-moderation-latest"
3639 openai_mod = _ENTERPRISE_OpenAI_Moderation ()
37- litellm .openai_moderations_model_name = "omni-moderation-latest"
3840 _api_key = "sk-12345"
3941 _api_key = hash_token ("sk-12345" )
4042 user_api_key_dict = UserAPIKeyAuth (api_key = _api_key )
@@ -45,15 +47,24 @@ async def test_openai_moderation_error_raising():
4547 llm_router = litellm .Router (
4648 model_list = [
4749 {
48- "model_name" : "omni -moderation-latest" ,
50+ "model_name" : "text -moderation-latest" ,
4951 "litellm_params" : {
50- "model" : "omni -moderation-latest" ,
51- "api_key" : os .environ [ "OPENAI_API_KEY" ] ,
52+ "model" : "text -moderation-latest" ,
53+ "api_key" : os .environ . get ( "OPENAI_API_KEY" , "fake-key" ) ,
5254 },
5355 }
5456 ]
5557 )
5658
59+ # Mock the amoderation call to return a flagged response
60+ mock_response = MagicMock (spec = OpenAIModerationResponse )
61+ mock_response .results = [MagicMock (flagged = True )]
62+
63+ async def mock_amoderation (* args , ** kwargs ):
64+ return mock_response
65+
66+ llm_router .amoderation = mock_amoderation
67+
5768 setattr (litellm .proxy .proxy_server , "llm_router" , llm_router )
5869
5970 try :
0 commit comments