1818
1919LOGGER_LEVEL = os .getenv ("LOGGER_LEVEL" , "INFO" )
2020# OPENAI reasoning models only support temperature=1
21- OPENAI_REASONING_MODEL_SET = set (["o1" , "o3" , "o3-mini" , "o4-mini" ])
21+ OPENAI_REASONING_MODEL_SET = set (["o1" , "o3" , "o3-mini" , "o4-mini" , "gpt-5" , "gpt-5-2025-08-07" ])
2222
2323logger = bootstrap_logger (level = LOGGER_LEVEL )
2424
@@ -29,13 +29,15 @@ def _create_client(self, config: DictConfig):
2929 """Create configured OpenAI client"""
3030 if self .async_client :
3131 return AsyncOpenAI (
32- api_key = config .env .openai_api_key ,
33- base_url = config .env .openai_base_url ,
32+ api_key = self .cfg .llm .openai_api_key ,
33+ base_url = self .cfg .llm .openai_base_url ,
34+ timeout = 1800 ,
3435 )
3536 else :
3637 return OpenAI (
37- api_key = config .env .openai_api_key ,
38- base_url = config .env .openai_base_url ,
38+ api_key = self .cfg .llm .openai_api_key ,
39+ base_url = self .cfg .llm .openai_base_url ,
40+ timeout = 1800 ,
3941 )
4042
4143 @retry (wait = wait_fixed (10 ), stop = stop_after_attempt (5 ))
@@ -58,6 +60,7 @@ async def _create_message(
5860 or self .model_name .startswith ("o4" )
5961 or self .model_name .startswith ("gpt-4.1" )
6062 or self .model_name .startswith ("gpt-4o" )
63+ or self .model_name .startswith ("gpt-5" )
6164 )
6265 logger .debug (f" Calling LLM ({ 'async' if self .async_client else 'sync' } )" )
6366 # put the system prompt in the first message since OpenAI API does not support system prompt in
@@ -88,21 +91,28 @@ async def _create_message(
8891 tool_list = await self .convert_tool_definition_to_tool_call (tools_definitions )
8992
9093 try :
91- # Set temperature=1 for reasoning models
92- temperature = (
93- 1.0
94- if self .model_name in OPENAI_REASONING_MODEL_SET
95- else self .temperature
96- )
97-
98- params = {
99- "model" : self .model_name ,
100- "temperature" : temperature ,
101- "max_completion_tokens" : self .max_tokens ,
102- "messages" : messages_copy ,
103- "tools" : tool_list ,
104- "stream" : False ,
105- }
94+ # Set temperature and reasoning_effort for reasoning models
95+ if self .model_name in OPENAI_REASONING_MODEL_SET :
96+ temperature = 1.0
97+ params = {
98+ "model" : self .model_name ,
99+ "temperature" : temperature ,
100+ "max_completion_tokens" : self .max_tokens ,
101+ "messages" : messages_copy ,
102+ "reasoning_effort" : self .reasoning_effort ,
103+ "tools" : tool_list ,
104+ "stream" : False ,
105+ }
106+ else :
107+ temperature = self .temperature
108+ params = {
109+ "model" : self .model_name ,
110+ "temperature" : temperature ,
111+ "max_completion_tokens" : self .max_tokens ,
112+ "messages" : messages_copy ,
113+ "tools" : tool_list ,
114+ "stream" : False ,
115+ }
106116
107117 if self .top_p != 1.0 :
108118 params ["top_p" ] = self .top_p
0 commit comments