Minimal Zig wrapper for the Anthropic Claude Messages API.
This project follows the basic usage shape of anthropic-sdk-python, but only implements the smallest practical surface:
- synchronous client
POST /v1/messages- basic SSE streaming support for
POST /v1/messages - basic tool use support
- API key auth
- basic request/response structs
- parsed API error response
It intentionally does not implement batches, beta APIs, retries, or a full high-level stream accumulator.
- Zig
0.15.2 - Anthropic API key in
ANTHROPIC_API_KEY, or passapi_keyexplicitly
The public module name is anthropic_sdk_zig.
Main entry points:
Client.init(allocator, options)client.messages().create(request)client.messages().stream(request)client.createMessage(request)client.streamMessage(request)MessageResponse.text(allocator)MessageStream.nextEvent()ServerSentEvent.json(T, allocator)ToolToolUseBlockParamToolResultBlockParam
Default behavior:
- reads
ANTHROPIC_API_KEYifClientOptions.api_keyis not set - reads
ANTHROPIC_BASE_URLifClientOptions.base_urlis not set - sends
anthropic-version: 2023-06-01 - sends
content-type: application/json
const std = @import("std");
const anthropic = @import("anthropic_sdk_zig");
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
var client = try anthropic.Client.init(allocator, .{});
defer client.deinit();
var result = try client.messages().create(.{
.model = "claude-sonnet-4-5",
.max_tokens = 256,
.messages = &.{
.{
.role = .user,
.content = "Hello, Claude",
},
},
});
defer result.deinit();
switch (result) {
.ok => |*message| {
const text = try message.text(allocator);
defer allocator.free(text);
std.debug.print("request_id={s}\n{s}\n", .{
message.request_id orelse "",
text,
});
},
.api_error => |*api_error| {
std.debug.print("status={d} type={s} message={s}\n", .{
api_error.statusCode(),
api_error.parsed.value.@"error".type,
api_error.parsed.value.@"error".message,
});
},
}
}Streaming sends the same request shape with stream: true added internally and returns raw SSE events.
const std = @import("std");
const anthropic = @import("anthropic_sdk_zig");
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
var client = try anthropic.Client.init(allocator, .{});
defer client.deinit();
var result = try client.messages().stream(.{
.model = "claude-sonnet-4-5",
.max_tokens = 256,
.messages = &.{
.{
.role = .user,
.content = "Hello, Claude",
},
},
});
defer result.deinit();
switch (result) {
.stream => |*stream| {
while (try stream.nextEvent()) |event| {
if (try event.textDelta(allocator)) |text| {
defer allocator.free(text);
std.debug.print("{s}", .{text});
}
}
std.debug.print("\n", .{});
},
.api_error => |*api_error| {
std.debug.print("status={d} type={s} message={s}\n", .{
api_error.statusCode(),
api_error.parsed.value.@"error".type,
api_error.parsed.value.@"error".message,
});
},
}
}Example of parsing a specific event type:
const parsed = try event.json(anthropic.StreamContentBlockDeltaEvent, allocator);
defer parsed.deinit();Kimi Code currently documents Anthropic-compatible access with:
ANTHROPIC_BASE_URL=https://api.kimi.com/coding/ANTHROPIC_API_KEY=<your Kimi Code key>- model
kimi-for-coding
This repo includes a minimal multi-turn streaming demo at examples/kimi_stream_chat.zig.
Run it with:
cp .env-example .env
# then edit .env and fill your key
set -a
. ./.env
set +a
zig build kimi-stream-chatThe demo:
- targets
https://api.kimi.com/coding - uses
KIMI_MODELwhen set, otherwise defaults tokimi-for-coding - keeps text-only conversation history in memory
- streams
text_deltachunks to the terminal as they arrive - supports
/clearand/exit
This SDK now supports the minimum pieces needed for a tool loop:
- declare standard tools in
tools - optionally force tool choice with
tool_choice - parse
tool_usecontent blocks from Claude responses - send
tool_resultblocks back in the nextusermessage
It does not execute tools for you. The SDK stays at the protocol layer.
var parsed_schema = try std.json.parseFromSlice(
std.json.Value,
allocator,
\\{"command":{"type":"string"}}
, .{});
defer parsed_schema.deinit();
var result = try client.messages().create(.{
.model = "claude-sonnet-4-5",
.max_tokens = 256,
.tools = &.{
.{
.name = "bash",
.description = "Run a shell command.",
.input_schema = .{
.type = "object",
.properties = parsed_schema.value,
.required = &.{"command"},
},
},
},
.tool_choice = .{
.tool = .{
.name = "bash",
.disable_parallel_tool_use = true,
},
},
.messages = &.{
.{
.role = .user,
.content = "Print the current working directory using bash.",
},
},
});
defer result.deinit();When Claude chooses the tool, the response content may include a tool_use block:
switch (result) {
.ok => |*message| {
for (message.parsed.value.content) |block| {
if (std.mem.eql(u8, block.type, "tool_use")) {
const command = block.input.?.object.get("command").?.string;
std.debug.print("tool={s} command={s}\n", .{
block.name.?,
command,
});
}
}
},
.api_error => |*api_error| {
_ = api_error;
},
}After you run the command in your own code, continue the conversation by sending a tool_result block first, then optional text:
_ = try client.messages().create(.{
.model = "claude-sonnet-4-5",
.max_tokens = 256,
.messages = &.{
.{
.role = .assistant,
.content_blocks = &.{
.{ .text = .{ .text = "I'll run bash." } },
},
},
.{
.role = .user,
.content_blocks = &.{
.{
.tool_result = .{
.tool_use_id = "toolu_123",
.content = "/tmp\n",
},
},
.{ .text = .{ .text = "Continue." } },
},
},
},
});If you already have pre-serialized Claude content arrays, use MessageParam.raw_content_json to embed them without escaping:
.{
.role = .assistant,
.raw_content_json = "[{\"type\":\"tool_use\",\"id\":\"toolu_123\",\"name\":\"bash\",\"input\":{\"command\":\"pwd\"}}]",
}Currently supported request fields:
modelmax_tokensmessagestoolsfor standard Anthropic tool definitionstool_choicesystemtemperaturetop_ptop_kstop_sequences
messages[*].content is currently plain text only.
If you need content blocks in input messages, use messages[*].content_blocks. The current implementation supports:
texttool_usetool_result
If you already have a serialized Claude content array, use messages[*].raw_content_json instead.
For streaming requests, stream: true is added by messages.stream() and streamMessage().
The success path parses:
idtyperolecontentmodelstop_reasonstop_sequenceusage
Unknown response fields are ignored.
Response content blocks currently expose enough fields for text and client tool use:
typetextidnameinput
The error path returns the parsed Claude error envelope plus HTTP status and request-id.
Streaming currently exposes raw SSE events plus a few light helpers:
MessageStream.nextEvent()ServerSentEvent.json(T, allocator)ServerSentEvent.textDelta(allocator)
Known typed event helpers include:
StreamMessageStartEventStreamContentBlockStartEventStreamContentBlockDeltaEventStreamContentBlockStopEventStreamMessageDeltaEventStreamMessageStopEventStreamPingEvent
zig build
zig build test- This package is a library first. The bundled executable only prints a placeholder message.
- The current implementation is designed to stay close to the wire format instead of adding extra abstraction layers.
- Streaming support is intentionally thin: it does not yet accumulate a final
Messageobject from SSE events. - Tool use support is intentionally thin: it does not include a tool runner or automatic local bash execution.