Skip to content

Commit 0743409

Browse files
authored
Merge pull request #17290 from BerriAI/litellm_fix_create_batch_Header
Respect custom llm provider in header
2 parents b949ec9 + f7380a5 commit 0743409

File tree

1 file changed

+4
-2
lines changed

1 file changed

+4
-2
lines changed

litellm/proxy/batches_endpoints/endpoints.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@
3131
get_original_file_id,
3232
prepare_data_with_credentials,
3333
)
34-
3534
from litellm.proxy.utils import handle_exception_on_proxy, is_known_model
3635
from litellm.types.llms.openai import LiteLLMBatchCreateRequest
3736

@@ -112,7 +111,10 @@ async def create_batch( # noqa: PLR0915
112111
is_router_model = is_known_model(model=router_model, llm_router=llm_router)
113112

114113
custom_llm_provider = (
115-
provider or data.pop("custom_llm_provider", None) or "openai"
114+
provider
115+
or data.pop("custom_llm_provider", None)
116+
or get_custom_llm_provider_from_request_headers(request=request)
117+
or "openai"
116118
)
117119
_create_batch_data = LiteLLMBatchCreateRequest(**data)
118120
input_file_id = _create_batch_data.get("input_file_id", None)

0 commit comments

Comments
 (0)