Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 0 additions & 69 deletions src/scouter/agent/agent.py

This file was deleted.

53 changes: 0 additions & 53 deletions src/scouter/agent/tools.py

This file was deleted.

30 changes: 17 additions & 13 deletions src/scouter/config/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,27 +24,31 @@ class ClientConfig(BaseSettings):
env: str = "test"

@model_validator(mode="after")
def set_provider_defaults(self):
def validate_and_set_provider_defaults(self):
# Validate provider
supported_providers = ["openai", "openrouter"]
if self.provider not in supported_providers:
msg = f"Unsupported provider '{self.provider}'. Supported providers: {', '.join(supported_providers)}"
raise ValueError(msg)

# Set provider-specific defaults
if self.provider == "openrouter":
self.api_base = self.api_base or "https://openrouter.ai/api/v1"
self.api_key = self.api_key or os.getenv("OPENROUTER_API_KEY")
else:
elif self.provider == "openai":
self.api_key = self.api_key or os.getenv("OPENAI_API_KEY")

# Validate API key is set
if not self.api_key:
msg = f"API key required for provider {self.provider}"
msg = f"API key required for provider '{self.provider}'. Set {'OPENROUTER_API_KEY' if self.provider == 'openrouter' else 'OPENAI_API_KEY'} environment variable."
raise ValueError(msg)

# Validate environment
if self.env not in ["development", "production", "test"]:
msg = "env must be one of: development, production, test"
raise ValueError(msg)
return self

def __init__(self, **data):
super().__init__(**data)
if self.provider == "openrouter":
self.api_base = self.api_base or "https://openrouter.ai/api/v1"
self.api_key = os.getenv("OPENROUTER_API_KEY", self.api_key)
# Map model if needed, e.g., self.model = "openai/gpt-3.5-turbo"
self.env = os.getenv("SCOUTER_ENV", self.env)
return self


def get_client_config(provider: str = "openai") -> ClientConfig:
Expand Down Expand Up @@ -80,8 +84,8 @@ def get_neo4j_driver() -> neo4j.Driver:


@lru_cache(maxsize=1)
def get_neo4j_llm() -> OpenAILLM:
config = get_client_config("openrouter")
def get_neo4j_llm(provider: str = "openrouter") -> OpenAILLM:
config = get_client_config(provider)
return OpenAILLM(config.model, api_key=config.api_key, base_url=config.api_base)


Expand Down
11 changes: 10 additions & 1 deletion src/scouter/llmcore/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,11 @@
from .agent import AgentRun, LLMStep, ToolStep, run_agent
from .agent import (
AgentConfig,
AgentRun,
LLMStep,
ToolStep,
create_agent,
run_agent,
)
from .client import ChatCompletionOptions, LLMConfig, call_llm, create_llm_client
from .exceptions import (
AgentError,
Expand Down Expand Up @@ -34,6 +41,7 @@
from .utils import retry_loop

__all__ = [
"AgentConfig",
"AgentError",
"AgentRun",
"ChatCompletion",
Expand All @@ -57,6 +65,7 @@
"ToolExecutionError",
"ToolStep",
"call_llm",
"create_agent",
"create_instruction",
"create_llm_client",
"create_tool",
Expand Down
120 changes: 102 additions & 18 deletions src/scouter/llmcore/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,10 @@

from .client import ChatCompletionOptions, call_llm
from .exceptions import InvalidRunStateError
from .flow import Flow, LLMStep, ToolCall, ToolStep
from .flow import Flow, InputStep, LLMStep, ToolCall, ToolStep
from .memory import MemoryFunction, full_history_memory
from .tools import run_tool
from .messages import create_instruction
from .tools import lookup_tool, run_tool

if TYPE_CHECKING:
from collections.abc import Callable, Iterable
Expand All @@ -26,6 +27,33 @@

logger = logging.getLogger(__name__)

# Constants
TUPLE_INSTRUCTION_LENGTH = 2


# Type for flexible instruction specification
InstructionType = (
str # Just system prompt
| tuple[str, str] # (system_prompt, user_prompt)
| list["ChatCompletionMessageParam"] # Full message list
| None # No instructions
)


@dataclass
class AgentConfig:
"""Configuration for agent creation."""

name: str = "default"
provider: str = "openai"
model: str = "gpt-4o-mini"
temperature: float = 0.7
max_tokens: int | None = None
instructions: InstructionType = None
tools: list[str] | None = None # Tool names
memory_function: MemoryFunction = full_history_memory
continue_condition: Callable[[AgentRun], bool] | None = None


@dataclass
class AgentRun:
Expand All @@ -34,9 +62,6 @@ class AgentRun:
)
flows: list[Flow] = field(default_factory=list)
memory_function: MemoryFunction = field(default=full_history_memory)
agents: dict[str, Callable[[], AgentRun]] = field(
default_factory=dict
) # For multi-agent: factory functions

def add_flow(self, flow: Flow) -> None:
"""Add a flow to the run."""
Expand All @@ -46,19 +71,6 @@ def get_context(self) -> list[ChatCompletionMessageParam]:
"""Get configurable memory context instead of flat history."""
return self.memory_function(self)

def run_sub_agent(self, agent_id: str) -> Flow:
"""Run a sub-agent within this run, returning its flow."""
if agent_id not in self.agents:
msg = f"Agent {agent_id} not registered"
raise ValueError(msg)
flow = Flow(id=f"{agent_id}_{len(self.flows)}", agent_id=agent_id)
flow.mark_running()
self.add_flow(flow)
# TODO: Integrate with run_agent for actual execution
# For now, placeholder: assume sub_run executes and adds steps to flow
flow.mark_completed()
return flow

@property
def total_usage(
self,
Expand Down Expand Up @@ -200,3 +212,75 @@ async def execute_single_tool(tc: ChatCompletionMessageToolCall):
current_flow.add_step(ToolStep(calls=success))
current_flow.mark_completed()
logger.info("Agent run completed with %d total flows", len(run.flows))


def _process_instructions(
instructions: InstructionType,
) -> list[ChatCompletionMessageParam]:
"""Convert instruction specification to message list."""
if instructions is None:
return []
if isinstance(instructions, str):
# Just system prompt
return [{"role": "system", "content": instructions}]
if (
isinstance(instructions, tuple)
and len(instructions) == TUPLE_INSTRUCTION_LENGTH
):
return create_instruction(instructions[0], instructions[1])
if isinstance(instructions, list):
# Full message list
return instructions

msg = f"Invalid instruction format: {type(instructions)}"
raise ValueError(msg)


def create_agent(config: AgentConfig) -> AgentRun:
"""Create an agent from configuration."""
# Start with default continue condition if none specified
continue_cond = config.continue_condition
if continue_cond is None:
continue_cond = default_continue_condition_factory()

return AgentRun(
memory_function=config.memory_function, continue_condition=continue_cond
)


async def run_agent(
agent: AgentRun,
config: AgentConfig,
messages: list[ChatCompletionMessageParam] | None = None,
**options,
) -> AgentRun:
"""Run an agent with configuration."""
input_messages = messages or []

# Get tools from registry
tools = None
if config.tools:
tools = [lookup_tool(name).openai_tool_spec() for name in config.tools]

# Process instructions and combine with input messages
instruction_messages = _process_instructions(config.instructions)
all_messages = instruction_messages + input_messages

# Add initial messages as InputStep to the agent
initial_flow = Flow(id="initial", agent_id=config.name)
initial_flow.add_step(InputStep(input=all_messages))
agent.add_flow(initial_flow)

# Build options dict, only including max_tokens if set
flow_options = {"temperature": config.temperature, **options}
if config.max_tokens is not None:
flow_options["max_tokens"] = config.max_tokens # type: ignore[assignment]

await run_flow(
agent,
model=config.model,
tools=tools,
options=ChatCompletionOptions(**flow_options),
)

return agent
16 changes: 14 additions & 2 deletions src/scouter/llmcore/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,21 @@ class LLMConfig:

@staticmethod
def load_from_env() -> "LLMConfig":
provider = os.getenv("LLM_PROVIDER", "openai")
if provider == "openrouter":
api_key = os.getenv("OPENROUTER_API_KEY") or os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENROUTER_BASE_URL", "https://openrouter.ai/api/v1")
elif provider == "openai":
api_key = os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENAI_BASE_URL")
else:
# Default to openai for backward compatibility
api_key = os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENAI_BASE_URL")

return LLMConfig(
api_key=os.getenv("OPENAI_API_KEY"),
base_url=os.getenv("OPENAI_BASE_URL"),
api_key=api_key,
base_url=base_url,
)


Expand Down
Loading
Loading