-
Notifications
You must be signed in to change notification settings - Fork 864
Open
Description
Passing the think parameter to chat() or AsyncClient.chat() doesn’t appear to change the output or behavior when using the qwen3:8b model.
Environment:
- ollama-python: 0.5.4
- ollama version: 0.11.8
My test code:
import asyncio
from ollama import AsyncClient, ChatResponse, chat
async def async_run_chat(think_flag: bool):
client = AsyncClient(host="http://localhost:11434") # change host if remote
prompt = "Give me a concise explanation of why the sky appears blue."
print(f"\n🔎 Starting chat with think={think_flag}\n")
async for chunk in await client.chat(
model="qwen3:8b", # or any installed model
messages=[{"role": "user", "content": prompt}],
stream=True,
think=think_flag, # ✅ direct parameter, no options dict
):
text_piece = chunk.get("message", {}).get("content", "")
if text_piece:
print(text_piece, end="", flush=True)
print("\n✅ Done!\n")
def stream_run_chat(think_flag: bool):
stream = chat(
model='qwen3:8b',
messages=[{'role': 'user', 'content': 'Why is the sky blue?'}],
stream=True,
think=think_flag
)
for chunk in stream:
print(chunk['message']['content'], end='', flush=True)
def non_stream_run_chat(think_flag: bool):
response: ChatResponse = chat(
model='qwen3:8b',
messages=[
{
'role': 'user',
'content': 'Why is the sky blue?',
},
],
think=think_flag
)
print(response['message']['content'])
if __name__ == "__main__":
# asyncio.run(async_run_chat(think_flag=False))
# stream_run_chat(think_flag=False)
non_stream_run_chat(think_flag=False)
Metadata
Metadata
Assignees
Labels
No labels