|
1 | | -# SGR Research Agent - Configuration Template |
2 | | -# Production-ready configuration for Schema-Guided Reasoning |
3 | | -# Copy this file to config.yaml and fill in your API keys |
4 | | - |
5 | | -# OpenAI API Configuration |
6 | | -openai: |
7 | | - api_key: "your-openai-api-key-here" # Required: Your OpenAI API key |
8 | | - base_url: "https://api.openai.com/v1" # Optional: Alternative URL (e.g., for proxy LiteLLM/vLLM) |
9 | | - model: "gpt-4o-mini" # Model to use |
10 | | - max_tokens: 8000 # Maximum number of tokens |
11 | | - temperature: 0.4 # Generation temperature (0.0-1.0) |
12 | | - proxy: "" # Example: "socks5://127.0.0.1:1081" or "http://127.0.0.1:8080" or leave empty for no proxy |
13 | | - |
14 | | -# Tavily Search Configuration |
15 | | -tavily: |
16 | | - api_key: "your-tavily-api-key-here" # Required: Your Tavily API key |
17 | | - api_base_url: "https://api.tavily.com" # Tavily API base URL |
18 | | - |
19 | | -# Search Settings |
| 1 | +# SGR Deep Research Agent - Configuration Template |
| 2 | +# Copy this file to config.yaml and fill in your data |
| 3 | + |
| 4 | +# LLM Configuration |
| 5 | +llm: |
| 6 | + api_key: "your-openai-api-key-here" # Your OpenAI API key |
| 7 | + base_url: "https://api.openai.com/v1" # API base URL |
| 8 | + model: "gpt-4o-mini" # Model name |
| 9 | + max_tokens: 8000 # Max output tokens |
| 10 | + temperature: 0.4 # Temperature (0.0-1.0) |
| 11 | + # proxy: "socks5://127.0.0.1:1081" # Optional proxy (socks5:// or http://) |
| 12 | + |
| 13 | +# Search Configuration (Tavily) |
20 | 14 | search: |
21 | | - max_results: 10 # Maximum number of search results |
22 | | - |
23 | | -# Scraping Settings |
24 | | -scraping: |
25 | | - enabled: false # Enable full text scraping of found pages |
26 | | - max_pages: 5 # Maximum pages to scrape per search |
27 | | - content_limit: 1500 # Character limit for full content per source |
| 15 | + tavily_api_key: "your-tavily-api-key-here" # Tavily API key (get at tavily.com) |
| 16 | + tavily_api_base_url: "https://api.tavily.com" # Tavily API URL |
| 17 | + max_results: 10 # Max search results |
| 18 | + max_pages: 5 # Max pages to scrape |
| 19 | + content_limit: 1500 # Content char limit per source |
28 | 20 |
|
29 | 21 | # Execution Settings |
30 | 22 | execution: |
31 | | - max_steps: 6 # Maximum number of execution steps |
32 | | - reports_dir: "reports" # Directory for saving reports |
33 | | - logs_dir: "logs" # Directory for saving reports |
34 | | - |
35 | | -# Prompts Settings |
36 | | -prompts: |
37 | | - prompts_dir: "prompts" # Directory with prompts |
38 | | - system_prompt_file: "system_prompt.txt" # System prompt file |
39 | | - |
40 | | -# Logging Settings |
41 | | -logging: |
42 | | - config_file: "logging_config.yaml" # Logging configuration file path |
43 | | - |
| 23 | + max_steps: 6 # Max execution steps |
| 24 | + max_clarifications: 3 # Max clarification requests |
| 25 | + max_iterations: 10 # Max iterations per step |
| 26 | + max_searches: 4 # Max search operations |
| 27 | + mcp_context_limit: 15000 # Max context length from MCP server response |
| 28 | + logs_dir: "logs" # Directory for saving agent execution logs |
| 29 | + reports_dir: "reports" # Directory for saving agent reports |
| 30 | + |
| 31 | +# Prompts Configuration |
| 32 | +# prompts: |
| 33 | +# # Option 1: Use file paths (absolute or relative to project root) |
| 34 | +# system_prompt_file: "path/to/your/system_prompt.txt" |
| 35 | +# initial_user_request_file: "path/to/your/initial_user_request.txt" |
| 36 | +# clarification_response_file: "path/to/your/clarification_response.txt" |
| 37 | + |
| 38 | +# # Option 2: Provide prompts directly as strings |
| 39 | +# system_prompt_str: "Your custom system prompt here..." |
| 40 | +# initial_user_request_str: "Your custom initial request template..." |
| 41 | +# clarification_response_str: "Your custom clarification template..." |
| 42 | + |
| 43 | + # Note: If both file and string are provided, string takes precedence |
| 44 | + |
| 45 | +# MCP (Model Context Protocol) Configuration |
44 | 46 | mcp: |
45 | | - |
46 | | - # Limit on the result of MCP tool invocation. |
47 | | - # A balanced constant value: not too large to avoid filling the entire context window with potentially unimportant data, |
48 | | - # yet not too small to ensure critical information from a single MCP fits through |
49 | | - context_limit: 15000 |
50 | | - |
51 | | - # https://gofastmcp.com/clients/transports#mcp-json-configuration-transport |
52 | | - transport_config: |
53 | | - mcpServers: |
54 | | - deepwiki: |
55 | | - url: "https://mcp.deepwiki.com/mcp" |
56 | | - |
57 | | - context7: |
58 | | - url: "https://mcp.context7.com/mcp" |
| 47 | + mcpServers: |
| 48 | + deepwiki: |
| 49 | + url: "https://mcp.deepwiki.com/mcp" |
| 50 | + |
| 51 | + # Add more MCP servers here: |
| 52 | + # your_server: |
| 53 | + # url: "https://your-mcp-server.com/mcp" |
| 54 | + # headers: |
| 55 | + # Authorization: "Bearer your-token" |
| 56 | + |
| 57 | + |
| 58 | +# Note: The 'agents' field is optional and can be loaded from either: |
| 59 | +# - This config.yaml file |
| 60 | +# - Any separate file by GlobalConfig.definitions_from_yaml method |
| 61 | +# See examples in agents.yaml.example for agent configuration options |
| 62 | +agents: {} |
0 commit comments