Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 93 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
name: CI

on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main, develop ]

jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.9, 3.10, 3.11]

steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}

- name: Cache pip dependencies
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('backend/requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-

- name: Install dependencies
run: |
python -m pip install --upgrade pip
cd backend
pip install -r requirements.txt

- name: Smoke test - Verify module imports
run: |
export PYTHONPATH=backend
python - <<'PY'
# Test imports for relocated modules
try:
from app.agents.state import AgentState
print("AgentState import successful")
except ImportError as e:
print(f"AgentState import failed: {e}")
exit(1)

try:
from app.agents.devrel.nodes.summarization import store_summary_to_database
print("store_summary_to_database import successful")
except ImportError as e:
print(f"store_summary_to_database import failed: {e}")
exit(1)

print("πŸŽ‰ All smoke test imports successful!")
PY

- name: Run tests
run: |
export PYTHONPATH=backend
cd backend
# Add actual test commands here when tests are available
# python -m pytest tests/ -v
echo "Test placeholder - add actual test commands when available"

lint:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: 3.11

- name: Install linting dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 black isort

- name: Run linting checks
run: |
cd backend
# Check code formatting
black --check --diff .
# Check import sorting
isort --check-only --diff .
# Run flake8 linting
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
4 changes: 2 additions & 2 deletions backend/app/agents/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from .devrel.agent import DevRelAgent
from .shared.base_agent import BaseAgent, AgentState
from .shared.classification_router import ClassificationRouter
from .base_agent import BaseAgent, AgentState
from .classification_router import ClassificationRouter

__all__ = [
"DevRelAgent",
Expand Down
1 change: 1 addition & 0 deletions backend/app/agents/devrel/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

18 changes: 9 additions & 9 deletions backend/app/agents/devrel/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,18 @@
from langgraph.graph import StateGraph, END
from langchain_google_genai import ChatGoogleGenerativeAI
from langgraph.checkpoint.memory import InMemorySaver
from ..shared.base_agent import BaseAgent, AgentState
from ..shared.classification_router import MessageCategory
from ..base_agent import BaseAgent, AgentState
from ..classification_router import MessageCategory
from .tools.search_tool import TavilySearchTool
from .tools.faq_tool import FAQTool
from app.core.config import settings
from .nodes.gather_context_node import gather_context_node
from .nodes.handle_faq_node import handle_faq_node
from .nodes.handle_web_search_node import handle_web_search_node
from .nodes.handle_technical_support_node import handle_technical_support_node
from .nodes.handle_onboarding_node import handle_onboarding_node
from .nodes.generate_response_node import generate_response_node
from .nodes.summarization_node import check_summarization_needed, summarize_conversation_node, store_summary_to_database
from .nodes.gather_context import gather_context_node
from .nodes.handlers.faq import handle_faq_node
from .nodes.handlers.web_search import handle_web_search_node
from .nodes.handlers.technical_support import handle_technical_support_node
from .nodes.handlers.onboarding import handle_onboarding_node
from .generate_response_node import generate_response_node
from .nodes.summarization import check_summarization_needed, summarize_conversation_node, store_summary_to_database

logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,28 +1,12 @@
import logging
from typing import Dict, Any
from app.agents.shared.state import AgentState
from app.agents.state import AgentState
from langchain_core.messages import HumanMessage
from ..prompts.base_prompt import GENERAL_LLM_RESPONSE_PROMPT
from .prompts.base_prompt import GENERAL_LLM_RESPONSE_PROMPT
from .nodes.handlers.web_search import create_search_response

logger = logging.getLogger(__name__)

async def _create_search_response(task_result: Dict[str, Any]) -> str:
"""Create a response string from search results."""
query = task_result.get("query")
results = task_result.get("results", [])
if not results:
return f"I couldn't find any information for '{query}'. You might want to try rephrasing your search."

response_parts = [f"Here's what I found for '{query}':"]
for i, result in enumerate(results[:3]):
title = result.get('title', 'N/A')
snippet = result.get('snippet', 'N/A')
url = result.get('url', '#')
result_line = f"{i+1}. {title}: {snippet}"
response_parts.append(result_line)
response_parts.append(f" (Source: {url})")
response_parts.append("You can ask me to search again with a different query if these aren't helpful.")
return "\n".join(response_parts)

async def _create_llm_response(state: AgentState, task_result: Dict[str, Any], llm) -> str:
"""Generate a response using the LLM based on the current state and task result."""
Expand Down Expand Up @@ -89,7 +73,7 @@ async def generate_response_node(state: AgentState, llm) -> dict:
if task_result.get("type") == "faq":
final_response = task_result.get("response", "I don't have a specific answer for that question.")
elif task_result.get("type") == "web_search":
final_response = await _create_search_response(task_result)
final_response = create_search_response(task_result)
else:
final_response = await _create_llm_response(state, task_result, llm)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import logging
from datetime import datetime
from app.agents.shared.state import AgentState
from app.agents.shared.classification_router import MessageCategory
from app.agents.state import AgentState

logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import logging
from app.agents.shared.state import AgentState
from app.agents.state import AgentState

logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import logging
from app.agents.shared.state import AgentState
from app.agents.state import AgentState

logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import logging
from app.agents.shared.state import AgentState
from app.agents.state import AgentState

logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,12 +1,16 @@
import logging
from app.agents.shared.state import AgentState
from typing import Dict, Any
from app.agents.state import AgentState
from langchain_core.messages import HumanMessage
from ..prompts.search_prompt import EXTRACT_SEARCH_QUERY_PROMPT
from app.agents.devrel.prompts.search_prompt import EXTRACT_SEARCH_QUERY_PROMPT


logger = logging.getLogger(__name__)

async def _extract_search_query(message: str, llm) -> str:
"""Extract a concise search query from the user's message."""
"""
Extract a concise search query from the user's message by invoking the LLM.
"""
logger.info(f"Extracting search query from: {message[:100]}")
try:
prompt = EXTRACT_SEARCH_QUERY_PROMPT.format(message=message)
Expand All @@ -19,7 +23,9 @@ async def _extract_search_query(message: str, llm) -> str:
return search_query

async def handle_web_search_node(state: AgentState, search_tool, llm) -> dict:
"""Handle web search requests"""
"""
Handle web search requests
"""
logger.info(f"Handling web search for session {state.session_id}")

latest_message = ""
Expand All @@ -41,3 +47,25 @@ async def handle_web_search_node(state: AgentState, search_tool, llm) -> dict:
"tools_used": ["tavily_search"],
"current_task": "web_search_handled"
}

def create_search_response(task_result: Dict[str, Any]) -> str:
"""
Create a user-friendly response string from search results.
"""
query = task_result.get("query")
results = task_result.get("results", [])

if not results:
return f"I couldn't find any information for '{query}'. You might want to try rephrasing your search."

response_parts = [f"Here's what I found for '{query}':"]
for i, result in enumerate(results[:5]):
title = result.get('title', 'N/A')
snippet = result.get('snippet', 'N/A')
url = result.get('url', '#')
result_line = f"{i+1}. {title}: {snippet}"
response_parts.append(result_line)
response_parts.append(f" (Source: {url})")

response_parts.append("You can ask me to search again with a different query if these aren't helpful.")
return "\n".join(response_parts)
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging
from datetime import datetime, timedelta
from typing import Dict, Any
from app.agents.shared.state import AgentState
from app.agents.state import AgentState
from langchain_core.messages import HumanMessage
from app.agents.devrel.prompts.summarization_prompt import CONVERSATION_SUMMARY_PROMPT

Expand All @@ -12,7 +12,9 @@
THREAD_TIMEOUT_HOURS = 1

async def check_summarization_needed(state: AgentState) -> Dict[str, Any]:
"""Check if summarization is needed and update interaction count"""
"""
Check if summarization is needed and update interaction count
"""

current_count = getattr(state, 'interaction_count', 0)
new_count = current_count + 1
Expand Down Expand Up @@ -46,14 +48,15 @@ async def check_summarization_needed(state: AgentState) -> Dict[str, Any]:
return updates

async def summarize_conversation_node(state: AgentState, llm) -> Dict[str, Any]:
"""Summarize the conversation and update the state"""
"""
Summarize the conversation and update the state
"""
logger.info(f"Summarizing conversation for session {state.session_id}")

try:
current_count = state.interaction_count
logger.info(f"Summarizing at interaction count: {current_count}")

# Get the recent messages
all_messages = state.messages

if not all_messages:
Expand All @@ -66,7 +69,6 @@ async def summarize_conversation_node(state: AgentState, llm) -> Dict[str, Any]:
for msg in all_messages
])

# Create prompt
existing_summary = state.conversation_summary
if not existing_summary or existing_summary == "This is the beginning of our conversation.":
existing_summary = "No previous summary - this is the start of our conversation tracking."
Expand All @@ -85,11 +87,9 @@ async def summarize_conversation_node(state: AgentState, llm) -> Dict[str, Any]:
logger.info(f"Generating summary with {len(all_messages)} messages, "
f"conversation text length: {len(conversation_text)}")

# Generate summary
response = await llm.ainvoke([HumanMessage(content=prompt)])
new_summary = response.content.strip()

# Extract key topics from summary
new_topics = await _extract_key_topics(new_summary, llm)

logger.info(f"Conversation summarized successfully for session {state.session_id}")
Expand Down Expand Up @@ -121,7 +121,6 @@ async def _extract_key_topics(summary: str, llm) -> list[str]:
response = await llm.ainvoke([HumanMessage(content=topic_prompt)])
topics_text = response.content.strip()

# Parse topics from response
topics = [topic.strip() for topic in topics_text.split(',') if topic.strip()]
return topics[:5] # Limiting to 5 topics

Expand Down
File renamed without changes.
11 changes: 11 additions & 0 deletions backend/app/api/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
"""
API package for the Devr.AI backend.

This package contains all API-related components:
- router: Main API router with all endpoints
- v1: Version 1 API endpoints
"""

from .router import api_router

__all__ = ["api_router"]
19 changes: 19 additions & 0 deletions backend/app/api/router.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from fastapi import APIRouter
from .v1.auth import router as auth_router
from .v1.health import router as health_router

api_router = APIRouter()

api_router.include_router(
auth_router,
prefix="/v1/auth",
tags=["Authentication"]
)

api_router.include_router(
health_router,
prefix="/v1",
tags=["Health"]
)

__all__ = ["api_router"]
1 change: 1 addition & 0 deletions backend/app/api/v1/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

6 changes: 3 additions & 3 deletions backend/app/api/v1/auth.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from fastapi import APIRouter, Request, HTTPException, Query
from fastapi.responses import HTMLResponse
from app.db.supabase.supabase_client import get_supabase_client
from app.db.supabase.users_service import find_user_by_session_and_verify, get_verification_session_info
from app.db.weaviate.user_profiling import profile_user_from_github
from app.database.supabase.client import get_supabase_client
from app.services.auth.verification import find_user_by_session_and_verify, get_verification_session_info
from app.services.user.profiling import profile_user_from_github
from typing import Optional
import logging
import asyncio
Expand Down
Loading
Loading