Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions app_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,12 @@

from src.scouter.agent.mcp import app as mcp_app
from src.scouter.config.llm import get_client_config
from src.scouter.config.logging import setup_logging
from src.scouter.ingestion.api import router as ingestion_router

# Setup logging
setup_logging()

logger = logging.getLogger(__name__)

config = get_client_config()
Expand Down
42 changes: 42 additions & 0 deletions src/scouter/config/logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
"""Logging configuration for the Scouter project."""

import logging
import sys


def setup_logging(level: str = "INFO") -> None:
"""Configure logging for the application.

Args:
level: The logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
"""
# Create logger
logger = logging.getLogger("scouter")
logger.setLevel(getattr(logging, level.upper()))

# Remove any existing handlers
for handler in logger.handlers[:]:
logger.removeHandler(handler)

# Create console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(getattr(logging, level.upper()))

# Create formatter
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
console_handler.setFormatter(formatter)

# Add handler to logger
logger.addHandler(console_handler)

# Set up root logger to avoid duplicate logs
root_logger = logging.getLogger()
root_logger.setLevel(
logging.WARNING
) # Only show warnings and above from other libraries

# Ensure scouter logger propagates
logger.propagate = False
18 changes: 17 additions & 1 deletion src/scouter/llmcore/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,15 @@
from .agent import AgentRun, run_agent
from .client import ChatCompletionOptions, LLMConfig, call_llm, create_llm_client
from .exceptions import AgentError, LLMError, ToolExecutionError
from .exceptions import (
AgentError,
InvalidRunStateError,
InvalidToolDefinitionError,
LLMError,
MaxRetriesExceededError,
ToolExecutionError,
)
from .messages import create_instruction
from .prompt import resolve_prompt
from .tools import (
Tool,
create_tool,
Expand All @@ -20,6 +29,7 @@
ChatCompletionToolMessageParam,
ChatCompletionToolParam,
ChatCompletionUserMessageParam,
Prompt,
)
from .utils import retry_loop

Expand All @@ -36,16 +46,22 @@
"ChatCompletionToolMessageParam",
"ChatCompletionToolParam",
"ChatCompletionUserMessageParam",
"InvalidRunStateError",
"InvalidToolDefinitionError",
"LLMConfig",
"LLMError",
"MaxRetriesExceededError",
"Prompt",
"Tool",
"ToolExecutionError",
"call_llm",
"create_instruction",
"create_llm_client",
"create_tool",
"execute_tool",
"lookup_tool",
"register_tool",
"resolve_prompt",
"retry_loop",
"run_agent",
"run_tool",
Expand Down
19 changes: 18 additions & 1 deletion src/scouter/llmcore/agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import annotations

import json
import logging
from dataclasses import dataclass, field
from time import time
from typing import TYPE_CHECKING, cast
Expand All @@ -20,8 +21,11 @@
)

from .client import ChatCompletionOptions, call_llm
from .exceptions import InvalidRunStateError
from .tools import run_tool

logger = logging.getLogger(__name__)


@dataclass
class InputStep:
Expand Down Expand Up @@ -89,7 +93,8 @@ def total_usage(
def last_output(self) -> str:
if not self.steps:
msg = "No steps in run"
raise ValueError(msg)
logger.error("Attempted to get last output from empty run")
raise InvalidRunStateError(msg)
last_step = self.steps[-1]
if isinstance(last_step, LLMStep):
content = last_step.message.get("content")
Expand Down Expand Up @@ -129,6 +134,9 @@ def run_agent(
tools: Iterable[ChatCompletionToolUnionParam] | None = None,
options: ChatCompletionOptions | None = None,
):
logger.info(
"Starting agent run with model=%s, initial_steps=%d", model, len(run.steps)
)
while run.continue_condition(run):
completion: ChatCompletion = call_llm(
model, run.conversation_history, tools, options
Expand All @@ -138,18 +146,26 @@ def run_agent(

# Handle tool calls
if msg.tool_calls:
logger.debug("Processing %d tool calls", len(msg.tool_calls))
for tc in msg.tool_calls:
tc = cast("ChatCompletionMessageToolCall", tc)
args = json.loads(tc.function.arguments)
logger.debug(
"Executing tool '%s' with args: %s", tc.function.name, args
)
start = time()
try:
output = run_tool(tc.function.name, args)
success = True
error = None
logger.debug("Tool '%s' executed successfully", tc.function.name)
except Exception as e: # noqa: BLE001
output = ""
success = False
error = str(e)
logger.warning(
"Tool '%s' execution failed: %s", tc.function.name, str(e)
)
end = time()
run.add_step(
ToolStep(
Expand All @@ -162,3 +178,4 @@ def run_agent(
error,
)
)
logger.info("Agent run completed with %d total steps", len(run.steps))
23 changes: 21 additions & 2 deletions src/scouter/llmcore/client.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import logging
import os
from collections.abc import Iterable
from dataclasses import dataclass
Expand All @@ -12,6 +13,8 @@

from .utils import retry_loop

logger = logging.getLogger(__name__)


class ChatCompletionOptions(TypedDict, total=False):
"""Options for ChatCompletion API calls.
Expand Down Expand Up @@ -50,13 +53,20 @@ def load_from_env() -> "LLMConfig":

def create_llm_client(cfg: LLMConfig | None = None) -> OpenAI:
cfg = cfg or LLMConfig.load_from_env()
logger.debug(
"Creating LLM client with timeout=%d, max_retries=%d",
cfg.timeout,
cfg.max_retries,
)

return OpenAI(
client = OpenAI(
api_key=cfg.api_key,
base_url=cfg.base_url,
timeout=cfg.timeout,
max_retries=cfg.max_retries,
)
logger.info("LLM client created successfully")
return client


client = create_llm_client()
Expand All @@ -76,11 +86,20 @@ def call_llm(
tools: Optional tools.
options: Optional ChatCompletion options like max_tokens, temperature, etc.
"""
tools_count = sum(1 for _ in tools) if tools else 0
logger.debug(
"Calling LLM with model=%s, message_count=%d, tools_count=%d",
model,
len(messages),
tools_count,
)

def _call():
kwargs = options or {}
return client.chat.completions.create(
model=model, messages=messages, tools=tools or [], **kwargs
)

return retry_loop(_call)
result = retry_loop(_call)
logger.debug("LLM call completed successfully")
return result
12 changes: 12 additions & 0 deletions src/scouter/llmcore/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,15 @@ class ToolExecutionError(LLMError):

class AgentError(LLMError):
"""Raised when agent operations fail."""


class MaxRetriesExceededError(LLMError):
"""Raised when maximum retry attempts are exceeded."""


class InvalidRunStateError(LLMError):
"""Raised when an agent run is in an invalid state."""


class InvalidToolDefinitionError(LLMError):
"""Raised when a tool is defined incorrectly."""
29 changes: 29 additions & 0 deletions src/scouter/llmcore/messages.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import logging

from .agent import InputStep, Step
from .types import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam

logger = logging.getLogger(__name__)


def create_instruction(
steps: list[Step], system: str | None = None, prompt: str | None = None
) -> None:
"""Add system and user messages to the steps list as InputStep instances."""
logger.debug(
"Creating instruction with system=%s, prompt=%s", bool(system), bool(prompt)
)
if system:
steps.append(
InputStep(
message=ChatCompletionSystemMessageParam(role="system", content=system)
)
)
logger.debug("Added system message to steps")
if prompt:
steps.append(
InputStep(
message=ChatCompletionUserMessageParam(role="user", content=prompt)
)
)
logger.debug("Added user message to steps")
16 changes: 16 additions & 0 deletions src/scouter/llmcore/prompt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import logging

from .types import Prompt

logger = logging.getLogger(__name__)


def resolve_prompt(prompt: Prompt, *args, **kwargs) -> str:
"""Resolve a Prompt to a string, executing callables with optional args if necessary."""
if isinstance(prompt, str):
logger.debug("Resolved string prompt directly")
return prompt
logger.debug("Executing callable prompt")
result = prompt(*args, **kwargs)
logger.debug("Callable prompt executed successfully")
return result
Loading
Loading