Skip to content

Commit f2bb35f

Browse files
author
maksimov maksim
committed
Merge branch 'benchmark_simpleqa' of https://github.com/vamplabAI/sgr-deep-research into benchmark_simpleqa
2 parents 4a165c5 + 7317f26 commit f2bb35f

File tree

2 files changed

+104
-1
lines changed

2 files changed

+104
-1
lines changed

docs/examples/simple_shine_cli.py

Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
"""Very basic demo for processing research queries and clarification requests
2+
from agent.
3+
4+
Usage:
5+
pip install rich openai
6+
python -m docs.examples.simple_shine_cli
7+
"""
8+
9+
import json
10+
11+
from openai import OpenAI
12+
from rich.console import Console
13+
from rich.prompt import Prompt
14+
15+
console = Console()
16+
client = OpenAI(base_url="http://localhost:8010/v1", api_key="dummy")
17+
18+
19+
def safe_get_delta(chunk):
20+
if not hasattr(chunk, "choices") or not chunk.choices:
21+
return None
22+
first_choice = chunk.choices[0]
23+
if first_choice is None or not hasattr(first_choice, "delta"):
24+
return None
25+
return first_choice.delta
26+
27+
28+
def stream_response_until_tool_call_or_end(model, messages):
29+
"""Real-time streaming."""
30+
response = client.chat.completions.create(
31+
model=model,
32+
messages=messages,
33+
stream=True,
34+
temperature=0,
35+
)
36+
37+
agent_id = None
38+
full_content = ""
39+
clarification_questions = None
40+
41+
for chunk in response:
42+
if hasattr(chunk, "model") and chunk.model and chunk.model.startswith("sgr_agent_"):
43+
agent_id = chunk.model
44+
45+
delta = safe_get_delta(chunk)
46+
if delta is None:
47+
continue
48+
49+
if hasattr(delta, "tool_calls") and delta.tool_calls:
50+
for tool_call in delta.tool_calls:
51+
if tool_call.function and tool_call.function.name == "clarificationtool":
52+
try:
53+
args = json.loads(tool_call.function.arguments)
54+
clarification_questions = args.get("questions", [])
55+
except Exception as e:
56+
console.print(f"[red]Error parsing clarification: {e}[/red]")
57+
# stop streaming after tool calling detect
58+
return full_content, clarification_questions, agent_id
59+
60+
if hasattr(delta, "content") and delta.content:
61+
text = delta.content
62+
full_content += text
63+
console.print(text, end="", style="white")
64+
65+
return full_content, None, agent_id
66+
67+
68+
console.print("\n[bold green]Research Assistant v1.0[/bold green]", style="bold white")
69+
initial_request = Prompt.ask("[bold yellow]Enter your research request[/bold yellow]")
70+
console.print(f"\nStarting research: [bold]{initial_request}[/bold]")
71+
72+
current_model = "sgr_agent"
73+
messages = [{"role": "user", "content": initial_request}]
74+
agent_id = None
75+
76+
while True:
77+
console.print()
78+
79+
full_content, clarification_questions, returned_agent_id = stream_response_until_tool_call_or_end(
80+
model=current_model, messages=messages
81+
)
82+
83+
if returned_agent_id:
84+
agent_id = returned_agent_id
85+
current_model = agent_id
86+
if clarification_questions is not None:
87+
console.print()
88+
89+
console.print("\n[bold red]Clarification needed:[/bold red]")
90+
for i, question in enumerate(clarification_questions, 1):
91+
console.print(f"[bold]{i}.[/bold] {question}", style="yellow")
92+
93+
clarification = Prompt.ask("[bold grey]Enter your clarification[/bold grey]")
94+
console.print(f"\n[bold green]Providing clarification:[/bold green] [italic]{clarification}[/italic]")
95+
96+
messages.append({"role": "user", "content": clarification})
97+
continue
98+
99+
else:
100+
console.print()
101+
break
102+
103+
console.print("\n[bold green] Report will be prepared in appropriate directory![/bold green]")

sgr_deep_research/core/agents/tools_agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ async def _prepare_tools(self) -> list[ChatCompletionFunctionToolParam]:
6868
tools -= {
6969
WebSearchTool,
7070
}
71-
return [pydantic_function_tool(tool, name=tool.tool_name) for tool in tools]
71+
return [pydantic_function_tool(tool, name=tool.tool_name, description="") for tool in tools]
7272

7373
async def _reasoning_phase(self) -> None:
7474
"""No explicit reasoning phase, reasoning is done internally by LLM."""

0 commit comments

Comments
 (0)