Skip to content

Commit 50b0609

Browse files
Merge pull request #55 from vamplabAI/agents-config-definitions
Agents from Config
2 parents f05af78 + b19523e commit 50b0609

File tree

70 files changed

+2289
-7340
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

70 files changed

+2289
-7340
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
# БЕЗОПАСНОСТЬ - API КЛЮЧИ И КОНФИДЕНЦИАЛЬНЫЕ ДАННЫЕ
44
config.yaml
5+
agents.yaml
56
*.env
67
.env*
78
venv/

agents.yaml.example

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
# Example Custom Agents Configuration
2+
# =====================================
3+
# This file demonstrates how to define custom agents for SGR Deep Research
4+
5+
# Notes:
6+
# ------
7+
# 1. Agent names must be unique or will be overridden
8+
# 2. All tools must be registered in the tool registry
9+
# 3. LLM, Search, Prompts, Execution, MCP settings are optional and inherit from global config
10+
# 4. Agents override global settings by providing their own values
11+
12+
agents:
13+
# Example 1: Simple custom research agent with overrides
14+
custom_research_agent:
15+
base_class: "sgr_deep_research.core.SGRAgent"
16+
# Optional: Override LLM settings for this agent
17+
llm:
18+
model: "gpt-4o"
19+
temperature: 0.3
20+
max_tokens: 16000
21+
# api_key: "your-custom-api-key" # Optional: use different API key
22+
# base_url: "https://api.openai.com/v1" # Optional: use different endpoint
23+
# proxy: "http://127.0.0.1:8080" # Optional: use proxy
24+
25+
# Optional: Override search settings
26+
search:
27+
max_results: 15
28+
max_pages: 8
29+
content_limit: 2000
30+
31+
# Optional: Execution configuration
32+
execution:
33+
max_steps: 8
34+
max_iterations: 15
35+
max_clarifications: 5
36+
max_searches: 6
37+
mcp_context_limit: 20000
38+
logs_dir: "logs/custom_agent"
39+
reports_dir: "reports/custom_agent"
40+
41+
# Optional: MCP configuration
42+
mcp:
43+
mcpServers:
44+
deepwiki:
45+
url: "https://mcp.deepwiki.com/mcp"
46+
47+
# Tools this agent can use (must be registered in tool registry)
48+
tools:
49+
- "WebSearchTool"
50+
- "ExtractPageContentTool"
51+
- "CreateReportTool"
52+
- "ClarificationTool"
53+
- "GeneratePlanTool"
54+
- "AdaptPlanTool"
55+
- "FinalAnswerTool"
56+
57+
# Example 2: Minimal agent with defaults
58+
simple_agent:
59+
base_class: "SGRToolCallingResearchAgent"
60+
61+
# Only override what's needed
62+
llm:
63+
model: "gpt-4o-mini"
64+
65+
tools:
66+
- "WebSearchTool"
67+
- "FinalAnswerTool"
68+
69+
# Example 3: Fast research agent optimized for speed
70+
fast_research_agent:
71+
base_class: "SGRToolCallingAgent"
72+
73+
llm:
74+
model: "gpt-4o-mini"
75+
temperature: 0.1
76+
max_tokens: 4000
77+
78+
execution:
79+
max_steps: 4
80+
max_iterations: 8
81+
max_clarifications: 2
82+
max_searches: 3
83+
84+
tools:
85+
- "WebSearchTool"
86+
- "CreateReportTool"
87+
- "FinalAnswerTool"
88+
- "ReasoningTool"
89+
90+
# Example 4: Specialized technical analyst with custom prompts
91+
technical_analyst:
92+
base_class: "SGRAgent"
93+
94+
llm:
95+
model: "gpt-4o"
96+
temperature: 0.2
97+
98+
prompts:
99+
system_prompt: "You are a highly specialized technical analyst."
100+
101+
execution:
102+
max_steps: 10
103+
max_iterations: 20
104+
max_clarifications: 3
105+
max_searches: 8
106+
107+
tools:
108+
- "WebSearchTool"
109+
- "ExtractPageContentTool"
110+
- "CreateReportTool"
111+
- "ClarificationTool"
112+
- "FinalAnswerTool"
113+
114+
# Example 5: Agent using inline prompts instead of files
115+
inline_prompt_agent:
116+
base_class: "SGRResearchAgent"
117+
118+
prompts:
119+
system_prompt_str: |
120+
You are a helpful research assistant.
121+
Your goal is to provide accurate and concise information.
122+
initial_user_request_str: |
123+
User request: {user_request}
124+
Please analyze and respond.
125+
clarification_response_str: |
126+
I need clarification on: {clarification_needed}
127+
128+
tools:
129+
- "WebSearchTool"
130+
- "FinalAnswerTool"

benchmark/run_simpleqa_bench.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
grading_answer,
1515
save_result,
1616
)
17-
from sgr_deep_research.settings import get_config
17+
from sgr_deep_research.core.agent_config import GlobalConfig
1818

1919
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
2020
config_path = os.path.join(project_root, "config.yaml")
@@ -29,8 +29,8 @@
2929

3030

3131
async def benchmark_agent(question, answer, model_config) -> Dict[str, Any]:
32-
system_conf = get_config()
33-
agent = BenchmarkAgent(task=question, max_iterations=system_conf.execution.max_steps)
32+
system_conf = GlobalConfig()
33+
agent = BenchmarkAgent(task=question, max_iterations=system_conf.execution.max_iterations)
3434

3535
try:
3636
await agent.execute()

config.yaml.example

Lines changed: 57 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -1,58 +1,62 @@
1-
# SGR Research Agent - Configuration Template
2-
# Production-ready configuration for Schema-Guided Reasoning
3-
# Copy this file to config.yaml and fill in your API keys
4-
5-
# OpenAI API Configuration
6-
openai:
7-
api_key: "your-openai-api-key-here" # Required: Your OpenAI API key
8-
base_url: "https://api.openai.com/v1" # Optional: Alternative URL (e.g., for proxy LiteLLM/vLLM)
9-
model: "gpt-4o-mini" # Model to use
10-
max_tokens: 8000 # Maximum number of tokens
11-
temperature: 0.4 # Generation temperature (0.0-1.0)
12-
proxy: "" # Example: "socks5://127.0.0.1:1081" or "http://127.0.0.1:8080" or leave empty for no proxy
13-
14-
# Tavily Search Configuration
15-
tavily:
16-
api_key: "your-tavily-api-key-here" # Required: Your Tavily API key
17-
api_base_url: "https://api.tavily.com" # Tavily API base URL
18-
19-
# Search Settings
1+
# SGR Deep Research Agent - Configuration Template
2+
# Copy this file to config.yaml and fill in your data
3+
4+
# LLM Configuration
5+
llm:
6+
api_key: "your-openai-api-key-here" # Your OpenAI API key
7+
base_url: "https://api.openai.com/v1" # API base URL
8+
model: "gpt-4o-mini" # Model name
9+
max_tokens: 8000 # Max output tokens
10+
temperature: 0.4 # Temperature (0.0-1.0)
11+
# proxy: "socks5://127.0.0.1:1081" # Optional proxy (socks5:// or http://)
12+
13+
# Search Configuration (Tavily)
2014
search:
21-
max_results: 10 # Maximum number of search results
22-
23-
# Scraping Settings
24-
scraping:
25-
enabled: false # Enable full text scraping of found pages
26-
max_pages: 5 # Maximum pages to scrape per search
27-
content_limit: 1500 # Character limit for full content per source
15+
tavily_api_key: "your-tavily-api-key-here" # Tavily API key (get at tavily.com)
16+
tavily_api_base_url: "https://api.tavily.com" # Tavily API URL
17+
max_results: 10 # Max search results
18+
max_pages: 5 # Max pages to scrape
19+
content_limit: 1500 # Content char limit per source
2820

2921
# Execution Settings
3022
execution:
31-
max_steps: 6 # Maximum number of execution steps
32-
reports_dir: "reports" # Directory for saving reports
33-
logs_dir: "logs" # Directory for saving reports
34-
35-
# Prompts Settings
36-
prompts:
37-
prompts_dir: "prompts" # Directory with prompts
38-
system_prompt_file: "system_prompt.txt" # System prompt file
39-
40-
# Logging Settings
41-
logging:
42-
config_file: "logging_config.yaml" # Logging configuration file path
43-
23+
max_steps: 6 # Max execution steps
24+
max_clarifications: 3 # Max clarification requests
25+
max_iterations: 10 # Max iterations per step
26+
max_searches: 4 # Max search operations
27+
mcp_context_limit: 15000 # Max context length from MCP server response
28+
logs_dir: "logs" # Directory for saving agent execution logs
29+
reports_dir: "reports" # Directory for saving agent reports
30+
31+
# Prompts Configuration
32+
# prompts:
33+
# # Option 1: Use file paths (absolute or relative to project root)
34+
# system_prompt_file: "path/to/your/system_prompt.txt"
35+
# initial_user_request_file: "path/to/your/initial_user_request.txt"
36+
# clarification_response_file: "path/to/your/clarification_response.txt"
37+
38+
# # Option 2: Provide prompts directly as strings
39+
# system_prompt_str: "Your custom system prompt here..."
40+
# initial_user_request_str: "Your custom initial request template..."
41+
# clarification_response_str: "Your custom clarification template..."
42+
43+
# Note: If both file and string are provided, string takes precedence
44+
45+
# MCP (Model Context Protocol) Configuration
4446
mcp:
45-
46-
# Limit on the result of MCP tool invocation.
47-
# A balanced constant value: not too large to avoid filling the entire context window with potentially unimportant data,
48-
# yet not too small to ensure critical information from a single MCP fits through
49-
context_limit: 15000
50-
51-
# https://gofastmcp.com/clients/transports#mcp-json-configuration-transport
52-
transport_config:
53-
mcpServers:
54-
deepwiki:
55-
url: "https://mcp.deepwiki.com/mcp"
56-
57-
context7:
58-
url: "https://mcp.context7.com/mcp"
47+
mcpServers:
48+
deepwiki:
49+
url: "https://mcp.deepwiki.com/mcp"
50+
51+
# Add more MCP servers here:
52+
# your_server:
53+
# url: "https://your-mcp-server.com/mcp"
54+
# headers:
55+
# Authorization: "Bearer your-token"
56+
57+
58+
# Note: The 'agents' field is optional and can be loaded from either:
59+
# - This config.yaml file
60+
# - Any separate file by GlobalConfig.definitions_from_yaml method
61+
# See examples in agents.yaml.example for agent configuration options
62+
agents: {}

docs/WIKI.md

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,12 @@ curl -LsSf https://astral.sh/uv/install.sh | sh
5757
```bash
5858
# 1. Setup configuration
5959
cp config.yaml.example config.yaml
60-
# Edit config.yaml with your API keys
60+
# Edit config.yaml with your configuration
61+
62+
#Optional:
63+
touch agents.yaml
64+
# Add your agents definitions based on config.yaml and agents.yaml.example
65+
6166

6267
# 2. Change to src directory and install dependencies
6368
uv sync
@@ -71,7 +76,11 @@ uv run python sgr_deep_research
7176
```bash
7277
# 1. Setup configuration
7378
cp config.yaml.example config.yaml
74-
# Edit config.yaml with your API keys
79+
# Edit config.yaml with your configuration
80+
81+
#Optional:
82+
touch agents.yaml
83+
# Add your agents definitions based on config.yaml and agents.yaml.example
7584

7685
# 2. Go to the services folder
7786
cd services
@@ -511,9 +520,9 @@ execution:
511520

512521
# Prompts Settings
513522
prompts:
514-
prompts_dir: "prompts" # Directory with prompts
515-
tool_function_prompt_file: "tool_function_prompt.txt" # Tool function prompt file
516-
system_prompt_file: "system_prompt.txt" # System prompt file
523+
system_prompt_file: "prompts/system_prompt.txt" # Path to system prompt file
524+
initial_user_request_file: "prompts/initial_user_request.txt" # Path to initial user request file
525+
clarification_response_file: "prompts/clarification_response.txt" # Path to clarification response file
517526
```
518527
519528
### Server Configuration

logging_config.yaml

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,18 +10,20 @@ formatters:
1010
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s'
1111

1212
handlers:
13-
console:
14-
class: logging.StreamHandler
15-
level: INFO
16-
formatter: standard
17-
stream: ext://sys.stdout
1813

1914
console_error:
2015
class: logging.StreamHandler
2116
level: ERROR
2217
formatter: standard
2318
stream: ext://sys.stderr
2419

20+
21+
console:
22+
class: logging.StreamHandler
23+
level: INFO
24+
formatter: standard
25+
stream: ext://sys.stdout
26+
2527
file:
2628
class: logging.handlers.RotatingFileHandler
2729
level: DEBUG

pyproject.toml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@ dependencies = [
4646
"youtube-transcript-api>=0.6.0",
4747
# Configuration and utilities - конфигурация и утилиты
4848
"PyYAML>=6.0",
49-
"envyaml>=1.10.0",
5049
"python-dateutil>=2.8.0",
5150
"pydantic-settings>=2.10.1",
5251
"fastapi>=0.116.1",

services/api_service/requirements.txt

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,6 @@ email-validator==2.3.0
5959
# via
6060
# jambo
6161
# pydantic
62-
envyaml==1.10.211231
63-
# via sgr-deep-research (pyproject.toml)
6462
exceptiongroup==1.3.0
6563
# via fastmcp
6664
fastapi==0.119.0
@@ -188,7 +186,6 @@ pytz==2025.2
188186
pyyaml==6.0.3
189187
# via
190188
# sgr-deep-research (pyproject.toml)
191-
# envyaml
192189
# jsonschema-path
193190
referencing==0.36.2
194191
# via
@@ -247,6 +244,8 @@ trafilatura==2.0.0
247244
# via sgr-deep-research (pyproject.toml)
248245
typing-extensions==4.15.0
249246
# via
247+
# anyio
248+
# exceptiongroup
250249
# fastapi
251250
# openai
252251
# openapi-core

services/docker-compose.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ services:
1111
volumes:
1212
- ../sgr_deep_research:/app/sgr_deep_research:ro
1313
- ../config.yaml:/app/config.yaml:ro
14+
- ../agents.yaml:/app/agents.yaml:ro
1415
- ../logging_config.yaml:/app/logging_config.yaml:ro
1516
- ./logs:/app/logs
1617
- ./reports:/app/reports

sgr_deep_research/__init__.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,8 @@
66

77
from sgr_deep_research.api import * # noqa: F403
88
from sgr_deep_research.core import * # noqa: F403
9-
from sgr_deep_research.services import * # noqa: F403
109

11-
__version__ = "0.2.5"
10+
__version__ = "0.4.0"
1211
__author__ = "sgr-deep-research-team"
1312

1413

0 commit comments

Comments
 (0)