Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@ jobs:
- run: uv venv
- run: uv pip install -e .[dev]
- run: uv run ruff check
- run: uv run pytest tests/test_mini_eval.py
- run: OPENAI_API_KEY=dummy uv run pytest tests/test_llmcore_client.py tests/test_llmcore_agent.py
10 changes: 5 additions & 5 deletions app_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,19 @@
import logging

from fastapi import FastAPI

from src.scouter.agent.mcp import app as mcp_app
from src.scouter.config.llm import get_client_config
from src.scouter.config.logging import setup_logging

from src.scouter.config import config as app_config
from src.scouter.config import setup_logging
from src.scouter.ingestion.api import router as ingestion_router

# Setup logging
setup_logging()

logger = logging.getLogger(__name__)

config = get_client_config()
logger.info("Starting Scouter in %s environment", config.env)
cfg = app_config.llm
logger.info("Starting Scouter in %s environment", cfg.env)

app: FastAPI = FastAPI(
title="Project Scouter",
Expand Down
43 changes: 20 additions & 23 deletions examples/chatbot/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,19 @@

import asyncio
import json
from typing import TYPE_CHECKING, Any, cast

from mcp import ClientSession
from mcp.client.stdio import StdioServerParameters, stdio_client

from scouter.config.llm import (
DEFAULT_MODEL,
call_with_rate_limit,
get_chatbot_client,
)
from scouter.config import config
from scouter.llmcore import call_llm

# Get LLM client
llm = get_chatbot_client()
if TYPE_CHECKING:
from openai.types.chat import ChatCompletionMessageToolCall


async def chat_with_rag(query: str) -> str:
async def chat_with_rag(query: str) -> str | None:
"""Single message chatbot with RAG using Scouter + OpenRouter and MCP tools."""
server_params = StdioServerParameters(
command="python",
Expand All @@ -33,7 +31,7 @@ async def chat_with_rag(query: str) -> str:
mcp_tools = await session.list_tools()

# Convert MCP tools to OpenAI format
openai_tools = [
openai_tools: list[dict[str, Any]] = [
{
"type": "function",
"function": {
Expand All @@ -54,20 +52,19 @@ async def chat_with_rag(query: str) -> str:
]

# Call LLM with tools
response = call_with_rate_limit(
llm,
model=DEFAULT_MODEL,
messages=messages, # type: ignore[arg-type]
tools=openai_tools,
tool_choice="auto",
max_tokens=200,
response = call_llm(
config.llm.model,
messages, # type: ignore[arg-type]
openai_tools, # type: ignore[arg-type]
{"temperature": 0.9, "max_tokens": 200, "tool_choice": "auto"}, # type: ignore[arg-type]
)

# Handle tool calls
if response.choices[0].message.tool_calls: # type: ignore[attr-defined]
for tool_call in response.choices[0].message.tool_calls: # type: ignore[attr-defined]
tool_name = tool_call.function.name
tool_args = json.loads(tool_call.function.arguments)
tool_call = cast("ChatCompletionMessageToolCall", tool_call)
tool_name = tool_call.function.name # type: ignore[attr-defined]
tool_args = json.loads(tool_call.function.arguments) # type: ignore[attr-defined]
result = await session.call_tool(tool_name, tool_args)
# Add to messages
messages.append( # type: ignore[PGH003]
Expand All @@ -82,11 +79,11 @@ async def chat_with_rag(query: str) -> str:
)

# Call LLM again with updated messages
final_response = call_with_rate_limit(
llm,
model=DEFAULT_MODEL,
messages=messages,
max_tokens=200,
final_response = call_llm(
config.llm.model,
messages, # type: ignore[arg-type]
None,
{"max_tokens": 200}, # type: ignore[arg-type]
)
final_content = final_response.choices[0].message.content # type: ignore[attr-defined]
else:
Expand Down
3 changes: 3 additions & 0 deletions ruff.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
select = ["E", "F", "W", "C90", "I", "N", "UP", "YTT", "S", "BLE", "FBT", "B", "A", "COM", "C4", "DTZ", "T10", "DJ", "EM", "EXE", "FA", "ISC", "ICN", "G", "INP", "PIE", "T20", "PYI", "PT", "Q", "RSE", "RET", "SLF", "SLOT", "SIM", "TID", "TCH", "INT", "ARG", "PTH", "ERA", "PD", "PGH", "PL", "TRY", "FLY", "NPY", "AIR", "PERF", "FURB", "LOG", "RUF"]
ignore = ["E501", "S101", "COM812"]

[lint.per-file-ignores]
"tests/**/*" = ["PLC0415", "ARG001", "F841", "PLR2004"]

[format]
quote-style = "double"
indent-style = "space"
Expand Down
116 changes: 116 additions & 0 deletions src/scouter/config/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
from __future__ import annotations

import logging
import os
import sys
from dataclasses import dataclass


@dataclass
class LLMConfig:
provider: str = "openai"
api_key: str | None = None
model: str = "openai/gpt-oss-20b:free"
base_url: str | None = None
temperature: float = 0.7
max_tokens: int | None = None
timeout: int = 30
max_retries: int = 3
env: str = "test"

@classmethod
def load_from_env(cls) -> LLMConfig:
provider = os.getenv("LLM_PROVIDER", "openai")
if provider == "openrouter":
api_key = os.getenv("OPENROUTER_API_KEY") or os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENROUTER_BASE_URL", "https://openrouter.ai/api/v1")
elif provider == "openai":
api_key = os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENAI_BASE_URL")
else:
# Default to openai
api_key = os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENAI_BASE_URL")

if not api_key:
key_name = (
"OPENROUTER_API_KEY" if provider == "openrouter" else "OPENAI_API_KEY"
)
msg = f"API key required for provider '{provider}'. Set {key_name} environment variable."
raise ValueError(msg)

env = os.getenv("ENV", "test")
if env not in ["development", "production", "test"]:
msg = "env must be one of: development, production, test"
raise ValueError(msg)

return cls(
provider=provider,
api_key=api_key,
base_url=base_url,
env=env,
)


@dataclass
class DBConfig:
uri: str = "bolt://localhost:7687"
user: str = "neo4j"
password: str = ""
embedder_model: str = "Qwen/Qwen3-Embedding-0.6B"
llm_model: str = "openai/gpt-oss-20b:free"

@classmethod
def load_from_env(cls) -> DBConfig:
return cls(
uri=os.getenv("NEO4J_URI", cls.uri),
user=os.getenv("NEO4J_USER", cls.user),
password=os.getenv("NEO4J_PASSWORD", cls.password),
)


@dataclass
class LoggingConfig:
level: str = "INFO"


@dataclass
class AppConfig:
llm: LLMConfig
db: DBConfig
logging: LoggingConfig

@classmethod
def load_from_env(cls) -> AppConfig:
return cls(
llm=LLMConfig.load_from_env(),
db=DBConfig.load_from_env(),
logging=LoggingConfig(),
)


config = AppConfig.load_from_env()


def setup_logging(level: str | None = None) -> None:
"""Setup logging for the application."""
level = level or config.logging.level
logger = logging.getLogger("scouter")
logger.setLevel(getattr(logging, level.upper()))

for handler in logger.handlers[:]:
logger.removeHandler(handler)

console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(getattr(logging, level.upper()))

formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)

root_logger = logging.getLogger()
root_logger.setLevel(logging.WARNING)
logger.propagate = False
111 changes: 0 additions & 111 deletions src/scouter/config/llm.py

This file was deleted.

42 changes: 0 additions & 42 deletions src/scouter/config/logging.py

This file was deleted.

Loading
Loading