Skip to content

Commit 75bcfbb

Browse files
authored
[Feat] New model vertex_ai/deepseek-ai/deepseek-r1-0528-maas (#13594)
* add ertex_ai/deepseek-ai/deepseek-r1-0528-maas * fix init * test_model_info_for_vertex_ai_deepseek_model
1 parent ffd165c commit 75bcfbb

File tree

4 files changed

+49
-1
lines changed

4 files changed

+49
-1
lines changed

litellm/__init__.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -486,6 +486,7 @@ def identify(event_details):
486486
vertex_embedding_models: List = []
487487
vertex_anthropic_models: List = []
488488
vertex_llama3_models: List = []
489+
vertex_deepseek_models: List = []
489490
vertex_ai_ai21_models: List = []
490491
vertex_mistral_models: List = []
491492
ai21_models: List = []
@@ -618,6 +619,9 @@ def add_known_models():
618619
elif value.get("litellm_provider") == "vertex_ai-llama_models":
619620
key = key.replace("vertex_ai/", "")
620621
vertex_llama3_models.append(key)
622+
elif value.get("litellm_provider") == "vertex_ai-deepseek_models":
623+
key = key.replace("vertex_ai/", "")
624+
vertex_deepseek_models.append(key)
621625
elif value.get("litellm_provider") == "vertex_ai-mistral_models":
622626
key = key.replace("vertex_ai/", "")
623627
vertex_mistral_models.append(key)
@@ -850,7 +854,8 @@ def add_known_models():
850854
+ vertex_text_models
851855
+ vertex_anthropic_models
852856
+ vertex_vision_models
853-
+ vertex_language_models,
857+
+ vertex_language_models
858+
+ vertex_deepseek_models,
854859
"ai21": ai21_models,
855860
"bedrock": bedrock_models + bedrock_converse_models,
856861
"petals": petals_models,

litellm/model_prices_and_context_window_backup.json

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9686,6 +9686,21 @@
96869686
"supports_assistant_prefill": true,
96879687
"supports_tool_choice": true
96889688
},
9689+
"vertex_ai/deepseek-ai/deepseek-r1-0528-maas": {
9690+
"max_tokens": 8192,
9691+
"max_input_tokens": 65336,
9692+
"max_output_tokens": 8192,
9693+
"input_cost_per_token": 1.35e-06,
9694+
"output_cost_per_token": 5.4e-06,
9695+
"litellm_provider": "vertex_ai-deepseek_models",
9696+
"mode": "chat",
9697+
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models",
9698+
"supports_function_calling": true,
9699+
"supports_assistant_prefill": true,
9700+
"supports_reasoning": true,
9701+
"supports_tool_choice": true,
9702+
"supports_prompt_caching": true
9703+
},
96899704
"vertex_ai/meta/llama3-405b-instruct-maas": {
96909705
"max_tokens": 32000,
96919706
"max_input_tokens": 32000,

model_prices_and_context_window.json

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9686,6 +9686,21 @@
96869686
"supports_assistant_prefill": true,
96879687
"supports_tool_choice": true
96889688
},
9689+
"vertex_ai/deepseek-ai/deepseek-r1-0528-maas": {
9690+
"max_tokens": 8192,
9691+
"max_input_tokens": 65336,
9692+
"max_output_tokens": 8192,
9693+
"input_cost_per_token": 1.35e-06,
9694+
"output_cost_per_token": 5.4e-06,
9695+
"litellm_provider": "vertex_ai-deepseek_models",
9696+
"mode": "chat",
9697+
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models",
9698+
"supports_function_calling": true,
9699+
"supports_assistant_prefill": true,
9700+
"supports_reasoning": true,
9701+
"supports_tool_choice": true,
9702+
"supports_prompt_caching": true
9703+
},
96899704
"vertex_ai/meta/llama3-405b-instruct-maas": {
96909705
"max_tokens": 32000,
96919706
"max_input_tokens": 32000,

tests/test_litellm/test_utils.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2416,3 +2416,16 @@ def mock_import(name, *args, **kwargs):
24162416
if __name__ == "__main__":
24172417
# Allow running this test file directly for debugging
24182418
pytest.main([__file__, "-v"])
2419+
2420+
2421+
def test_model_info_for_vertex_ai_deepseek_model():
2422+
model_info = litellm.get_model_info(
2423+
model="vertex_ai/deepseek-ai/deepseek-r1-0528-maas"
2424+
)
2425+
assert model_info is not None
2426+
assert model_info["litellm_provider"] == "vertex_ai-deepseek_models"
2427+
assert model_info["mode"] == "chat"
2428+
2429+
assert model_info["input_cost_per_token"] is not None
2430+
assert model_info["output_cost_per_token"] is not None
2431+
print("vertex deepseek model info", model_info)

0 commit comments

Comments
 (0)