|
14 | 14 | import pytest |
15 | 15 |
|
16 | 16 | from litellm.proxy.auth.user_api_key_auth import get_api_key |
| 17 | +from litellm.proxy.auth.route_checks import RouteChecks |
17 | 18 |
|
18 | 19 |
|
19 | 20 | def test_get_api_key(): |
@@ -56,3 +57,130 @@ def test_get_api_key_with_custom_litellm_key_header( |
56 | 57 | route="", |
57 | 58 | request=MagicMock(), |
58 | 59 | ) == (api_key, passed_in_key) |
| 60 | + |
| 61 | + |
| 62 | +def test_route_checks_is_llm_api_route(): |
| 63 | + """Test RouteChecks.is_llm_api_route() correctly identifies LLM API routes including passthrough endpoints""" |
| 64 | + |
| 65 | + # Test OpenAI routes |
| 66 | + openai_routes = [ |
| 67 | + "/v1/chat/completions", |
| 68 | + "/chat/completions", |
| 69 | + "/v1/completions", |
| 70 | + "/completions", |
| 71 | + "/v1/embeddings", |
| 72 | + "/embeddings", |
| 73 | + "/v1/images/generations", |
| 74 | + "/images/generations", |
| 75 | + "/v1/audio/transcriptions", |
| 76 | + "/audio/transcriptions", |
| 77 | + "/v1/audio/speech", |
| 78 | + "/audio/speech", |
| 79 | + "/v1/moderations", |
| 80 | + "/moderations", |
| 81 | + "/v1/models", |
| 82 | + "/models", |
| 83 | + "/v1/rerank", |
| 84 | + "/rerank", |
| 85 | + "/v1/realtime", |
| 86 | + "/realtime", |
| 87 | + ] |
| 88 | + |
| 89 | + for route in openai_routes: |
| 90 | + assert RouteChecks.is_llm_api_route(route=route), f"Route {route} should be identified as LLM API route" |
| 91 | + |
| 92 | + # Test Anthropic routes |
| 93 | + anthropic_routes = [ |
| 94 | + "/v1/messages", |
| 95 | + "/v1/messages/count_tokens", |
| 96 | + ] |
| 97 | + |
| 98 | + for route in anthropic_routes: |
| 99 | + assert RouteChecks.is_llm_api_route(route=route), f"Route {route} should be identified as LLM API route" |
| 100 | + |
| 101 | + # Test passthrough routes (this is the key improvement over the old route checking) |
| 102 | + passthrough_routes = [ |
| 103 | + "/bedrock/v1/chat/completions", |
| 104 | + "/vertex-ai/v1/chat/completions", |
| 105 | + "/vertex_ai/v1/chat/completions", |
| 106 | + "/cohere/v1/chat/completions", |
| 107 | + "/gemini/v1/chat/completions", |
| 108 | + "/anthropic/v1/messages", |
| 109 | + "/langfuse/v1/chat/completions", |
| 110 | + "/azure/v1/chat/completions", |
| 111 | + "/openai/v1/chat/completions", |
| 112 | + "/assemblyai/v1/transcript", |
| 113 | + "/eu.assemblyai/v1/transcript", |
| 114 | + "/vllm/v1/chat/completions", |
| 115 | + "/mistral/v1/chat/completions", |
| 116 | + ] |
| 117 | + |
| 118 | + for route in passthrough_routes: |
| 119 | + assert RouteChecks.is_llm_api_route(route=route), f"Route {route} should be identified as LLM API route" |
| 120 | + |
| 121 | + # Test MCP routes |
| 122 | + mcp_routes = [ |
| 123 | + "/mcp", |
| 124 | + "/mcp/", |
| 125 | + "/mcp/test", |
| 126 | + ] |
| 127 | + |
| 128 | + for route in mcp_routes: |
| 129 | + assert RouteChecks.is_llm_api_route(route=route), f"Route {route} should be identified as LLM API route" |
| 130 | + |
| 131 | + # Test routes with placeholders |
| 132 | + placeholder_routes = [ |
| 133 | + "/v1/threads/thread_49EIN5QF32s4mH20M7GFKdlZ", |
| 134 | + "/threads/thread_49EIN5QF32s4mH20M7GFKdlZ", |
| 135 | + "/v1/assistants/assistant_123", |
| 136 | + "/assistants/assistant_123", |
| 137 | + "/v1/files/file_123", |
| 138 | + "/files/file_123", |
| 139 | + "/v1/batches/batch_123", |
| 140 | + "/batches/batch_123", |
| 141 | + ] |
| 142 | + |
| 143 | + for route in placeholder_routes: |
| 144 | + assert RouteChecks.is_llm_api_route(route=route), f"Route {route} should be identified as LLM API route" |
| 145 | + |
| 146 | + # Test Azure OpenAI routes |
| 147 | + azure_routes = [ |
| 148 | + "/openai/deployments/gpt-4/chat/completions", |
| 149 | + "/openai/deployments/gpt-3.5-turbo/completions", |
| 150 | + "/engines/gpt-4/chat/completions", |
| 151 | + "/engines/gpt-3.5-turbo/completions", |
| 152 | + ] |
| 153 | + |
| 154 | + for route in azure_routes: |
| 155 | + assert RouteChecks.is_llm_api_route(route=route), f"Route {route} should be identified as LLM API route" |
| 156 | + |
| 157 | + # Test non-LLM routes (should return False) |
| 158 | + non_llm_routes = [ |
| 159 | + "/health", |
| 160 | + "/metrics", |
| 161 | + "/key/list", |
| 162 | + "/team/list", |
| 163 | + "/user/list", |
| 164 | + "/config", |
| 165 | + "/routes", |
| 166 | + "/", |
| 167 | + "/admin/settings", |
| 168 | + "/logs", |
| 169 | + "/debug", |
| 170 | + "/test", |
| 171 | + ] |
| 172 | + |
| 173 | + for route in non_llm_routes: |
| 174 | + assert not RouteChecks.is_llm_api_route(route=route), f"Route {route} should NOT be identified as LLM API route" |
| 175 | + |
| 176 | + # Test invalid inputs |
| 177 | + invalid_inputs = [ |
| 178 | + None, |
| 179 | + 123, |
| 180 | + [], |
| 181 | + {}, |
| 182 | + "", |
| 183 | + ] |
| 184 | + |
| 185 | + for invalid_input in invalid_inputs: |
| 186 | + assert not RouteChecks.is_llm_api_route(route=invalid_input), f"Invalid input {invalid_input} should return False" |
0 commit comments