@@ -92,19 +92,10 @@ const DeepInfraFormatChat = (chat, model) => {
9292export const DeepInfraProvider = {
9393 name : "DeepInfra" ,
9494 models : [
95- { model : "meta-llama/Llama-3.3-70B-Instruct-Turbo" , alias : "meta-llama-3.3-70b" , stream : true } ,
96- { model : "meta-llama/Llama-3.2-90B-Vision-Instruct" , alias : "meta-llama-3.2-90b-vision" , stream : true } ,
97- { model : "meta-llama/Llama-3.2-11B-Vision-Instruct" , alias : "meta-llama-3.2-11b-vision" , stream : true } ,
98- { model : "meta-llama/Meta-Llama-3.1-405B-Instruct" , alias : "meta-llama-3.1-405b" , stream : true } ,
99- { model : "meta-llama/Meta-Llama-3.1-70B-Instruct" , alias : "meta-llama-3.1-70b" , stream : true } ,
100- { model : "meta-llama/Meta-Llama-3.1-8B-Instruct" , alias : "meta-llama-3.1-8b" , stream : true } ,
101- { model : "nvidia/Llama-3.1-Nemotron-70B-Instruct" , alias : "llama-3.1-nemotron-70b" , stream : true } ,
102- { model : "microsoft/WizardLM-2-8x22B" , alias : "wizardlm-2-8x22b" , stream : true } ,
103- { model : "deepseek-ai/DeepSeek-V3" , alias : "deepseek-v3" , stream : true } ,
104- { model : "deepseek-ai/DeepSeek-R1-Turbo" , alias : "deepseek-r1" , stream : true } ,
105- { model : "Qwen/Qwen2.5-72B-Instruct" , alias : "qwen2.5-72b" , stream : true } ,
106- { model : "Qwen/Qwen2.5-Coder-32B-Instruct" , alias : "qwen2.5-coder-32b" , stream : true } ,
107- { model : "Qwen/QwQ-32B-Preview" , alias : "qwq-32b-preview" , stream : true } ,
95+ { model : "Qwen/Qwen3-235B-A22B" , alias : "qwen3-235b-a22b" , stream : true } ,
96+ { model : "meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo" , alias : "llama-4-maverick" , stream : true } ,
97+ { model : "deepseek-ai/DeepSeek-V3-0324" , alias : "deepseek-v3" , stream : true } ,
98+ { model : "deepseek-ai/DeepSeek-R1-0528" , alias : "deepseek-r1" , stream : true } ,
10899 ] ,
109100 model_aliases : {
110101 "llama-3.1-405b" : "meta-llama/Meta-Llama-3.1-405B-Instruct" ,
0 commit comments