Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions examples/run-examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ class Sample:

TEXT_SAMPLES = [
Sample("stream.py"),
Sample("gemini.py"),
Sample("stream_all.py"),
Sample("tools_schema.py"),
Sample("agent_simple.py"),
Expand Down
1 change: 1 addition & 0 deletions examples/samples/check_connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
PROVIDERS: list[tuple[str, ai.Provider, str]] = [
("ai_gateway", ai.ai_gateway, "anthropic/claude-sonnet-4"),
("anthropic", ai.anthropic, "claude-sonnet-4-20250514"),
("google", ai.google, "gemini-2.5-flash"),
("openai", ai.openai, "gpt-5.4-mini"),
]

Expand Down
29 changes: 29 additions & 0 deletions examples/samples/gemini.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
"""Gemini direct - stream from Google's Gemini API."""

import asyncio
import sys

import ai

if ai.google.client().api_key is None:
print("[SKIP] GOOGLE_API_KEY or GEMINI_API_KEY not set")
sys.exit(0)

model = ai.google("gemini-3.1-flash-lite")

messages = [
ai.system_message("Be concise."),
ai.user_message("Explain why the sky is blue in two sentences."),
]


async def main() -> None:
async with ai.stream(model, messages) as s:
async for event in s:
if isinstance(event, ai.events.TextDelta):
print(event.chunk, end="", flush=True)
print()


if __name__ == "__main__":
asyncio.run(main())
1 change: 1 addition & 0 deletions examples/samples/stream_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
MODELS: list[tuple[str, ai.Provider, str]] = [
("ai_gateway", ai.ai_gateway, "anthropic/claude-sonnet-4.6"),
("anthropic", ai.anthropic, "claude-sonnet-4-6"),
("google", ai.google, "gemini-2.5-flash"),
("openai", ai.openai, "gpt-5.5"),
]

Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ authors = [
requires-python = ">=3.12"
dependencies = [
"anthropic>=0.83.0",
"google-genai>=2.0.1",
"httpx>=0.28.1",
"mcp>=1.18.0",
"openai>=2.14.0",
Expand Down
2 changes: 2 additions & 0 deletions src/ai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
anthropic,
check_connection,
generate,
google,
openai,
stream,
)
Expand Down Expand Up @@ -87,6 +88,7 @@
# Provider factories
"openai",
"anthropic",
"google",
"ai_gateway",
# Agents — primary API
"Agent",
Expand Down
5 changes: 4 additions & 1 deletion src/ai/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,11 @@
Usage::

import ai
from ai.models import openai, anthropic, ai_gateway
from ai.models import openai, anthropic, google, ai_gateway

model = openai("gpt-5.4")
model = anthropic("claude-sonnet-4-6")
model = google("gemini-2.5-flash")
model = ai_gateway("anthropic/claude-sonnet-4")

# stream — auto-creates client from env vars
Expand Down Expand Up @@ -44,6 +45,7 @@
from .core.model import Model
from .core.params import GenerateParams, ImageParams, VideoParams
from .core.proto import CheckConnFn, GenerateFn, Provider, StreamFn
from .google import google
from .openai import openai

__all__ = [
Expand All @@ -66,6 +68,7 @@
# Provider factories
"ai_gateway",
"anthropic",
"google",
"openai",
# Adapter registration
"register_generate",
Expand Down
2 changes: 2 additions & 0 deletions src/ai/models/core/adapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,14 @@ def _ensure_adapters() -> None:
from ..ai_gateway.adapter import generate as ai_gw_generate
from ..ai_gateway.adapter import stream as ai_gw_stream
from ..anthropic.adapter import stream as anthropic_stream
from ..google.adapter import stream as google_stream
from ..openai.adapter import stream as openai_stream

_stream_adapters["ai-gateway-v3"] = ai_gw_stream
_generate_adapters["ai-gateway-v3"] = ai_gw_generate
_stream_adapters["openai"] = openai_stream
_stream_adapters["anthropic"] = anthropic_stream
_stream_adapters["google"] = google_stream


def register_stream(adapter: str, fn: proto.StreamFn) -> None:
Expand Down
32 changes: 32 additions & 0 deletions src/ai/models/google/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
"""Google Gemini provider.

Usage::

from ai.models import google

model = google("gemini-2.5-flash")
ids = await google.list()

# built-in tools
async with ai.stream(
model, msgs,
tools=[google.tools.google_search()],
) as s:
...

The adapter module is loaded lazily to avoid pulling in the ``google-genai``
SDK at import time.
"""

from . import tools
from .provider import google

__all__ = ["google", "tools"]


def __getattr__(name: str) -> object:
if name == "stream":
from .adapter import stream

return stream
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
Loading
Loading