diff --git a/docs/core-concepts/tools-function-calling.md b/docs/core-concepts/tools-function-calling.md index 72d0a4154..8a6ef32f9 100644 --- a/docs/core-concepts/tools-function-calling.md +++ b/docs/core-concepts/tools-function-calling.md @@ -305,6 +305,56 @@ use Prism\Prism\Facades\Tool; $tool = Tool::make(CurrentWeatherTool::class); ``` +## Client-Executed Tools + +Sometimes you need tools that are executed by the client (e.g., frontend application) rather than on the server. Client-executed tools are defined without a handler function - simply omit the `using()` call: + +```php +use Prism\Prism\Facades\Tool; + +$clientTool = Tool::as('browser_action') + ->for('Perform an action in the user\'s browser') + ->withStringParameter('action', 'The action to perform'); + // Note: No using() call - this tool will be executed by the client +``` + +When the AI calls a client-executed tool, Prism will: +1. Stop execution and return control to your application +2. Set the response's `finishReason` to `FinishReason::ToolCalls` +3. Include the tool calls in the response for your client to execute + +### Handling Client-Executed Tools + +```php +use Prism\Prism\Facades\Prism; +use Prism\Prism\Enums\FinishReason; + +$response = Prism::text() + ->using('anthropic', 'claude-3-5-sonnet-latest') + ->withTools([$clientTool]) + ->withMaxSteps(3) + ->withPrompt('Click the submit button') + ->asText(); + +``` + +### Streaming with Client-Executed Tools + +When streaming, client-executed tools emit a `ToolCallEvent` but no `ToolResultEvent`: + +```php + +$response = Prism::text() + ->using('anthropic', 'claude-3-5-sonnet-latest') + ->withTools([$clientTool]) + ->withMaxSteps(3) + ->withPrompt('Click the submit button') + ->asStream(); +``` + +> [!NOTE] +> Client-executed tools are useful for scenarios like browser automation, UI interactions, or any operation that must run on the user's device rather than the server. + ## Tool Choice Options You can control how the AI uses tools with the `withToolChoice` method: diff --git a/src/Concerns/CallsTools.php b/src/Concerns/CallsTools.php index 1dec8b9c6..bd6320408 100644 --- a/src/Concerns/CallsTools.php +++ b/src/Concerns/CallsTools.php @@ -6,6 +6,7 @@ use Illuminate\Support\ItemNotFoundException; use Illuminate\Support\MultipleItemsFoundException; +use JsonException; use Prism\Prism\Exceptions\PrismException; use Prism\Prism\Tool; use Prism\Prism\ValueObjects\ToolCall; @@ -18,6 +19,8 @@ trait CallsTools * @param Tool[] $tools * @param ToolCall[] $toolCalls * @return ToolResult[] + * + * @throws PrismException|JsonException */ protected function callTools(array $tools, array $toolCalls): array { @@ -47,12 +50,25 @@ function (ToolCall $toolCall) use ($tools): ToolResult { } }, - $toolCalls + array_filter($toolCalls, fn (ToolCall $toolCall): bool => ! $this->resolveTool($toolCall->name, $tools)->isClientExecuted()) ); } /** * @param Tool[] $tools + * @param ToolCall[] $toolCalls + * + * @throws PrismException + */ + protected function hasDeferredTools(array $tools, array $toolCalls): bool + { + return array_any($toolCalls, fn (ToolCall $toolCall): bool => $this->resolveTool($toolCall->name, $tools)->isClientExecuted()); + } + + /** + * @param Tool[] $tools + * + * @throws PrismException */ protected function resolveTool(string $name, array $tools): Tool { diff --git a/src/Exceptions/PrismException.php b/src/Exceptions/PrismException.php index 75ca9f3e0..636a6f4e1 100644 --- a/src/Exceptions/PrismException.php +++ b/src/Exceptions/PrismException.php @@ -76,4 +76,11 @@ public static function unsupportedProviderAction(string $method, string $provide $provider, )); } + + public static function toolHandlerNotDefined(string $toolName): self + { + return new self( + sprintf('Tool (%s) has no handler defined', $toolName) + ); + } } diff --git a/src/Providers/Anthropic/Handlers/Stream.php b/src/Providers/Anthropic/Handlers/Stream.php index 2b1e37240..cb7ed4332 100644 --- a/src/Providers/Anthropic/Handlers/Stream.php +++ b/src/Providers/Anthropic/Handlers/Stream.php @@ -443,9 +443,18 @@ protected function handleToolCalls(Request $request, int $depth): Generator // Execute tools and emit results $toolResults = []; + $hasDeferred = false; foreach ($toolCalls as $toolCall) { try { $tool = $this->resolveTool($toolCall->name, $request->tools()); + + // Skip deferred tools - frontend will provide results + if ($tool->isClientExecuted()) { + $hasDeferred = true; + + continue; + } + $result = call_user_func_array($tool->handle(...), $toolCall->arguments()); $toolResult = new ToolResult( @@ -483,6 +492,17 @@ protected function handleToolCalls(Request $request, int $depth): Generator } } + // skip calling llm if there are pending deferred tools + if ($hasDeferred) { + yield new StreamEndEvent( + id: EventID::generate(), + timestamp: time(), + finishReason: FinishReason::ToolCalls + ); + + return; + } + // Add messages to request for next turn if ($toolResults !== []) { $request->addMessage(new AssistantMessage( diff --git a/src/Providers/Anthropic/Handlers/Structured.php b/src/Providers/Anthropic/Handlers/Structured.php index a720e9ed0..62149d533 100644 --- a/src/Providers/Anthropic/Handlers/Structured.php +++ b/src/Providers/Anthropic/Handlers/Structured.php @@ -172,7 +172,7 @@ protected function executeCustomToolsAndContinue(array $toolCalls, Response $tem $this->request->addMessage($message); $this->addStep($toolCalls, $tempResponse, $toolResults); - if ($this->canContinue()) { + if (! $this->hasDeferredTools($this->request->tools(), $toolCalls) && $this->canContinue()) { return $this->handle(); } diff --git a/src/Providers/Anthropic/Handlers/Text.php b/src/Providers/Anthropic/Handlers/Text.php index b3c6c3dff..22a2c990d 100644 --- a/src/Providers/Anthropic/Handlers/Text.php +++ b/src/Providers/Anthropic/Handlers/Text.php @@ -113,7 +113,7 @@ protected function handleToolCalls(): Response $this->addStep($toolResults); - if ($this->responseBuilder->steps->count() < $this->request->maxSteps()) { + if (! $this->hasDeferredTools($this->request->tools(), $this->tempResponse->toolCalls) && $this->responseBuilder->steps->count() < $this->request->maxSteps()) { return $this->handle(); } diff --git a/src/Providers/DeepSeek/Handlers/Stream.php b/src/Providers/DeepSeek/Handlers/Stream.php index e872f7395..f0f38826d 100644 --- a/src/Providers/DeepSeek/Handlers/Stream.php +++ b/src/Providers/DeepSeek/Handlers/Stream.php @@ -359,6 +359,17 @@ protected function handleToolCalls(Request $request, string $text, array $toolCa ); } + // skip calling llm if there are pending deferred tools + if ($this->hasDeferredTools($request->tools(), $mappedToolCalls)) { + yield new StreamEndEvent( + id: EventID::generate(), + timestamp: time(), + finishReason: FinishReason::ToolCalls + ); + + return; + } + $request->addMessage(new AssistantMessage($text, $mappedToolCalls)); $request->addMessage(new ToolResultMessage($toolResults)); diff --git a/src/Providers/DeepSeek/Handlers/Text.php b/src/Providers/DeepSeek/Handlers/Text.php index 8645f13f2..2dae46cff 100644 --- a/src/Providers/DeepSeek/Handlers/Text.php +++ b/src/Providers/DeepSeek/Handlers/Text.php @@ -73,7 +73,10 @@ protected function handleToolCalls(array $data, Request $request): TextResponse $this->addStep($data, $request, $toolResults); - if ($this->shouldContinue($request)) { + if (! $this->hasDeferredTools($request->tools(), ToolCallMap::map(data_get($data, 'choices.0.message.tool_calls', []))) + && + $this->shouldContinue($request) + ) { return $this->handle($request); } diff --git a/src/Providers/Gemini/Handlers/Stream.php b/src/Providers/Gemini/Handlers/Stream.php index 864c3f89f..e7ca6463c 100644 --- a/src/Providers/Gemini/Handlers/Stream.php +++ b/src/Providers/Gemini/Handlers/Stream.php @@ -283,6 +283,7 @@ protected function handleToolCalls( array $data = [] ): Generator { $mappedToolCalls = []; + $hasDeferred = false; // Convert tool calls to ToolCall objects foreach ($this->state->toolCalls() as $toolCallData) { @@ -294,6 +295,14 @@ protected function handleToolCalls( foreach ($mappedToolCalls as $toolCall) { try { $tool = $this->resolveTool($toolCall->name, $request->tools()); + + // Skip deferred tools - frontend will provide results + if ($tool->isClientExecuted()) { + $hasDeferred = true; + + continue; + } + $result = call_user_func_array($tool->handle(...), $toolCall->arguments()); $toolResult = new ToolResult( @@ -333,6 +342,17 @@ protected function handleToolCalls( } } + // skip calling llm if there are pending deferred tools + if ($hasDeferred) { + yield new StreamEndEvent( + id: EventID::generate(), + timestamp: time(), + finishReason: FinishReason::ToolCalls + ); + + return; + } + // Add messages for next turn and continue streaming if ($toolResults !== []) { $request->addMessage(new AssistantMessage($this->state->currentText(), $mappedToolCalls)); diff --git a/src/Providers/Gemini/Handlers/Structured.php b/src/Providers/Gemini/Handlers/Structured.php index b0053a3d4..9238d9176 100644 --- a/src/Providers/Gemini/Handlers/Structured.php +++ b/src/Providers/Gemini/Handlers/Structured.php @@ -209,7 +209,7 @@ protected function handleToolCalls(array $data, Request $request): StructuredRes $this->addStep($data, $request, FinishReason::ToolCalls, $toolResults); - if ($this->shouldContinue($request)) { + if (! $this->hasDeferredTools($request->tools(), ToolCallMap::map(data_get($data, 'candidates.0.content.parts', []))) && $this->shouldContinue($request)) { return $this->handle($request); } diff --git a/src/Providers/Gemini/Handlers/Text.php b/src/Providers/Gemini/Handlers/Text.php index 1384c6f4f..03eb28b23 100644 --- a/src/Providers/Gemini/Handlers/Text.php +++ b/src/Providers/Gemini/Handlers/Text.php @@ -156,7 +156,7 @@ protected function handleToolCalls(array $data, Request $request): TextResponse $this->addStep($data, $request, FinishReason::ToolCalls, $toolResults); - if ($this->shouldContinue($request)) { + if (! $this->hasDeferredTools($request->tools(), ToolCallMap::map(data_get($data, 'candidates.0.content.parts', []))) && $this->shouldContinue($request)) { return $this->handle($request); } diff --git a/src/Providers/Groq/Handlers/Stream.php b/src/Providers/Groq/Handlers/Stream.php index 92f22e900..6180f1d03 100644 --- a/src/Providers/Groq/Handlers/Stream.php +++ b/src/Providers/Groq/Handlers/Stream.php @@ -257,6 +257,17 @@ protected function handleToolCalls( ); } + // skip calling llm if there are pending deferred tools + if ($this->hasDeferredTools($request->tools(), $mappedToolCalls)) { + yield new StreamEndEvent( + id: EventID::generate(), + timestamp: time(), + finishReason: FinishReason::ToolCalls + ); + + return; + } + $request->addMessage(new AssistantMessage($text, $mappedToolCalls)); $request->addMessage(new ToolResultMessage($toolResults)); diff --git a/src/Providers/Groq/Handlers/Text.php b/src/Providers/Groq/Handlers/Text.php index 84e539bff..37c7ac299 100644 --- a/src/Providers/Groq/Handlers/Text.php +++ b/src/Providers/Groq/Handlers/Text.php @@ -95,7 +95,7 @@ protected function handleToolCalls(array $data, Request $request, ClientResponse $this->addStep($data, $request, $clientResponse, FinishReason::ToolCalls, $toolResults); - if ($this->shouldContinue($request)) { + if (! $this->hasDeferredTools($request->tools(), $this->mapToolCalls(data_get($data, 'choices.0.message.tool_calls', []) ?? [])) && $this->shouldContinue($request)) { return $this->handle($request); } diff --git a/src/Providers/Mistral/Handlers/Stream.php b/src/Providers/Mistral/Handlers/Stream.php index 2b7c87095..437bcb2c5 100644 --- a/src/Providers/Mistral/Handlers/Stream.php +++ b/src/Providers/Mistral/Handlers/Stream.php @@ -251,6 +251,17 @@ protected function handleToolCalls( ); } + // skip calling llm if there are pending deferred tools + if ($this->hasDeferredTools($request->tools(), $mappedToolCalls)) { + yield new StreamEndEvent( + id: EventID::generate(), + timestamp: time(), + finishReason: FinishReason::ToolCalls + ); + + return; + } + $request->addMessage(new AssistantMessage($text, $mappedToolCalls)); $request->addMessage(new ToolResultMessage($toolResults)); diff --git a/src/Providers/Mistral/Handlers/Text.php b/src/Providers/Mistral/Handlers/Text.php index a5582f0fd..c22953f23 100644 --- a/src/Providers/Mistral/Handlers/Text.php +++ b/src/Providers/Mistral/Handlers/Text.php @@ -81,7 +81,7 @@ protected function handleToolCalls(array $data, Request $request, ClientResponse $this->addStep($data, $request, $clientResponse, $toolResults); - if ($this->shouldContinue($request)) { + if (! $this->hasDeferredTools($request->tools(), $this->mapToolCalls(data_get($data, 'choices.0.message.tool_calls', []))) && $this->shouldContinue($request)) { return $this->handle($request); } diff --git a/src/Providers/Ollama/Handlers/Stream.php b/src/Providers/Ollama/Handlers/Stream.php index f9b34eab6..ea634f3fc 100644 --- a/src/Providers/Ollama/Handlers/Stream.php +++ b/src/Providers/Ollama/Handlers/Stream.php @@ -273,6 +273,17 @@ protected function handleToolCalls( ); } + // skip calling llm if there are pending deferred tools + if ($this->hasDeferredTools($request->tools(), $mappedToolCalls)) { + yield new StreamEndEvent( + id: EventID::generate(), + timestamp: time(), + finishReason: FinishReason::ToolCalls + ); + + return; + } + // Add messages for next turn $request->addMessage(new AssistantMessage($text, $mappedToolCalls)); $request->addMessage(new ToolResultMessage($toolResults)); diff --git a/src/Providers/Ollama/Handlers/Text.php b/src/Providers/Ollama/Handlers/Text.php index 04a63b3ee..b50a193dc 100644 --- a/src/Providers/Ollama/Handlers/Text.php +++ b/src/Providers/Ollama/Handlers/Text.php @@ -105,7 +105,7 @@ protected function handleToolCalls(array $data, Request $request): Response $this->addStep($data, $request, $toolResults); - if ($this->shouldContinue($request)) { + if (! $this->hasDeferredTools($request->tools(), $this->mapToolCalls(data_get($data, 'message.tool_calls', []))) && $this->shouldContinue($request)) { return $this->handle($request); } @@ -133,10 +133,18 @@ protected function shouldContinue(Request $request): bool */ protected function addStep(array $data, Request $request, array $toolResults = []): void { + $toolCalls = $this->mapToolCalls(data_get($data, 'message.tool_calls', []) ?? []); + + // Ollama sends done_reason: "stop" even when there are tool calls + // Override finish reason to ToolCalls when tool calls are present + $finishReason = $toolCalls === [] + ? $this->mapFinishReason($data) + : FinishReason::ToolCalls; + $this->responseBuilder->addStep(new Step( text: data_get($data, 'message.content') ?? '', - finishReason: $this->mapFinishReason($data), - toolCalls: $this->mapToolCalls(data_get($data, 'message.tool_calls', []) ?? []), + finishReason: $finishReason, + toolCalls: $toolCalls, toolResults: $toolResults, providerToolCalls: [], usage: new Usage( diff --git a/src/Providers/OpenAI/Handlers/Stream.php b/src/Providers/OpenAI/Handlers/Stream.php index dbb05c7b3..c363a9bde 100644 --- a/src/Providers/OpenAI/Handlers/Stream.php +++ b/src/Providers/OpenAI/Handlers/Stream.php @@ -352,6 +352,17 @@ protected function handleToolCalls(Request $request, int $depth): Generator ); } + // skip calling llm if there are pending deferred tools + if ($this->hasDeferredTools($request->tools(), $mappedToolCalls)) { + yield new StreamEndEvent( + id: EventID::generate(), + timestamp: time(), + finishReason: FinishReason::ToolCalls + ); + + return; + } + $request->addMessage(new AssistantMessage($this->state->currentText(), $mappedToolCalls)); $request->addMessage(new ToolResultMessage($toolResults)); diff --git a/src/Providers/OpenAI/Handlers/Structured.php b/src/Providers/OpenAI/Handlers/Structured.php index b2c102d01..c9136aa1c 100644 --- a/src/Providers/OpenAI/Handlers/Structured.php +++ b/src/Providers/OpenAI/Handlers/Structured.php @@ -100,7 +100,7 @@ protected function handleToolCalls(array $data, Request $request, ClientResponse $this->addStep($data, $request, $clientResponse, $toolResults); - if ($this->shouldContinue($request)) { + if (! $this->hasDeferredTools($request->tools(), ToolCallMap::map($this->extractFunctionCalls($data))) && $this->shouldContinue($request)) { return $this->handle($request); } diff --git a/src/Providers/OpenAI/Handlers/Text.php b/src/Providers/OpenAI/Handlers/Text.php index 6b2747442..980218179 100644 --- a/src/Providers/OpenAI/Handlers/Text.php +++ b/src/Providers/OpenAI/Handlers/Text.php @@ -100,7 +100,10 @@ protected function handleToolCalls(array $data, Request $request, ClientResponse $this->addStep($data, $request, $clientResponse, $toolResults); - if ($this->shouldContinue($request)) { + if (! $this->hasDeferredTools($request->tools(), ToolCallMap::map(array_filter(data_get($data, 'output', []), fn (array $output): bool => $output['type'] === 'function_call'))) + && + $this->shouldContinue($request) + ) { return $this->handle($request); } diff --git a/src/Providers/OpenRouter/Handlers/Stream.php b/src/Providers/OpenRouter/Handlers/Stream.php index 35d630c89..7adce0d28 100644 --- a/src/Providers/OpenRouter/Handlers/Stream.php +++ b/src/Providers/OpenRouter/Handlers/Stream.php @@ -365,6 +365,17 @@ protected function handleToolCalls( ); } + // skip calling llm if there are pending deferred tools + if ($this->hasDeferredTools($request->tools(), $mappedToolCalls)) { + yield new StreamEndEvent( + id: EventID::generate(), + timestamp: time(), + finishReason: FinishReason::ToolCalls + ); + + return; + } + $request->addMessage(new AssistantMessage($text, $mappedToolCalls)); $request->addMessage(new ToolResultMessage($toolResults)); diff --git a/src/Providers/OpenRouter/Handlers/Text.php b/src/Providers/OpenRouter/Handlers/Text.php index baf39954f..454b44b28 100644 --- a/src/Providers/OpenRouter/Handlers/Text.php +++ b/src/Providers/OpenRouter/Handlers/Text.php @@ -72,7 +72,7 @@ protected function handleToolCalls(array $data, Request $request): TextResponse $this->addStep($data, $request, $toolResults); - if ($this->shouldContinue($request)) { + if (! $this->hasDeferredTools($request->tools(), ToolCallMap::map(data_get($data, 'choices.0.message.tool_calls', []))) && $this->shouldContinue($request)) { return $this->handle($request); } diff --git a/src/Providers/XAI/Handlers/Stream.php b/src/Providers/XAI/Handlers/Stream.php index f162e7055..7700c8373 100644 --- a/src/Providers/XAI/Handlers/Stream.php +++ b/src/Providers/XAI/Handlers/Stream.php @@ -346,6 +346,17 @@ protected function handleToolCalls( ); } + // skip calling llm if there are pending deferred tools + if ($this->hasDeferredTools($request->tools(), $mappedToolCalls)) { + yield new StreamEndEvent( + id: EventID::generate(), + timestamp: time(), + finishReason: FinishReason::ToolCalls + ); + + return; + } + $request->addMessage(new AssistantMessage($text, $mappedToolCalls)); $request->addMessage(new ToolResultMessage($toolResults)); diff --git a/src/Providers/XAI/Handlers/Text.php b/src/Providers/XAI/Handlers/Text.php index 5191a41d4..318da0b8b 100644 --- a/src/Providers/XAI/Handlers/Text.php +++ b/src/Providers/XAI/Handlers/Text.php @@ -83,7 +83,7 @@ protected function handleToolCalls(array $data, Request $request): TextResponse $this->addStep($data, $request, $toolResults); - if ($this->shouldContinue($request)) { + if (! $this->hasDeferredTools($request->tools(), $toolCalls) && $this->shouldContinue($request)) { return $this->handle($request); } diff --git a/src/Tool.php b/src/Tool.php index dd5a8c319..1b334d55d 100644 --- a/src/Tool.php +++ b/src/Tool.php @@ -37,7 +37,7 @@ class Tool /** @var array */ protected array $requiredParameters = []; - /** @var Closure():string|callable():string */ + /** @var Closure():string|callable():string|null */ protected $fn; /** @var null|false|Closure(Throwable,array):string */ @@ -230,6 +230,11 @@ public function hasParameters(): bool return (bool) count($this->parameters); } + public function isClientExecuted(): bool + { + return $this->fn === null; + } + /** * @return null|false|Closure(Throwable,array):string */ @@ -245,6 +250,10 @@ public function failedHandler(): null|false|Closure */ public function handle(...$args): string { + if ($this->fn === null) { + throw PrismException::toolHandlerNotDefined($this->name); + } + try { $value = call_user_func($this->fn, ...$args); diff --git a/tests/Fixtures/anthropic/stream-with-client-executed-tool-1.sse b/tests/Fixtures/anthropic/stream-with-client-executed-tool-1.sse new file mode 100644 index 000000000..e6fcba676 --- /dev/null +++ b/tests/Fixtures/anthropic/stream-with-client-executed-tool-1.sse @@ -0,0 +1,31 @@ +event: message_start +data: {"type":"message_start","message":{"id":"msg_client_executed_test","type":"message","role":"assistant","model":"claude-3-5-sonnet-20240620","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":100,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":1}}} + +event: content_block_start +data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""}} + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"I'll use the client tool to help you."}} + +event: content_block_stop +data: {"type":"content_block_stop","index":0} + +event: content_block_start +data: {"type":"content_block_start","index":1,"content_block":{"type":"tool_use","id":"toolu_client_tool_stream","name":"client_tool","input":{}}} + +event: content_block_delta +data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":""}} + +event: content_block_delta +data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"{\"input\": \"test input\"}"}} + +event: content_block_stop +data: {"type":"content_block_stop","index":1} + +event: message_delta +data: {"type":"message_delta","delta":{"stop_reason":"tool_use","stop_sequence":null},"usage":{"output_tokens":50}} + +event: message_stop +data: {"type":"message_stop"} + + diff --git a/tests/Fixtures/anthropic/structured-with-client-executed-tool-1.json b/tests/Fixtures/anthropic/structured-with-client-executed-tool-1.json new file mode 100644 index 000000000..980813079 --- /dev/null +++ b/tests/Fixtures/anthropic/structured-with-client-executed-tool-1.json @@ -0,0 +1,2 @@ +{"model":"claude-sonnet-4-20250514","id":"msg_client_executed_structured","type":"message","role":"assistant","content":[{"type":"text","text":"I'll use the client tool to help you with that request."},{"type":"tool_use","id":"toolu_client_structured","name":"client_tool","input":{"input":"test input"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":200,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":50}} + diff --git a/tests/Fixtures/anthropic/text-with-client-executed-tool-1.json b/tests/Fixtures/anthropic/text-with-client-executed-tool-1.json new file mode 100644 index 000000000..9d409fb32 --- /dev/null +++ b/tests/Fixtures/anthropic/text-with-client-executed-tool-1.json @@ -0,0 +1,2 @@ +{"id":"msg_01ClientExecutedTest","type":"message","role":"assistant","model":"claude-3-5-sonnet-20240620","content":[{"type":"text","text":"I'll use the client tool to help you with that."},{"type":"tool_use","id":"toolu_client_tool_123","name":"client_tool","input":{"input":"test input"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":100,"output_tokens":50}} + diff --git a/tests/Fixtures/deepseek/stream-with-client-executed-tool-1.sse b/tests/Fixtures/deepseek/stream-with-client-executed-tool-1.sse new file mode 100644 index 000000000..5dc224e2c --- /dev/null +++ b/tests/Fixtures/deepseek/stream-with-client-executed-tool-1.sse @@ -0,0 +1,9 @@ +data: {"id":"chatcmpl-client-executed","object":"chat.completion.chunk","created":1737244481,"model":"deepseek-chat","choices":[{"index":0,"delta":{"role":"assistant","content":"","tool_calls":[{"index":0,"id":"call_client_tool_stream","type":"function","function":{"name":"client_tool","arguments":""}}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-client-executed","object":"chat.completion.chunk","created":1737244481,"model":"deepseek-chat","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\"input\": \"test input\"}"}}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-client-executed","object":"chat.completion.chunk","created":1737244481,"model":"deepseek-chat","choices":[{"index":0,"delta":{},"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":100,"completion_tokens":50,"total_tokens":150}} + +data: [DONE] + + diff --git a/tests/Fixtures/deepseek/text-with-client-executed-tool-1.json b/tests/Fixtures/deepseek/text-with-client-executed-tool-1.json new file mode 100644 index 000000000..cc22dab36 --- /dev/null +++ b/tests/Fixtures/deepseek/text-with-client-executed-tool-1.json @@ -0,0 +1,2 @@ +{"id":"client-executed-test","object":"chat.completion","created":1737244481,"model":"deepseek-chat","choices":[{"index":0,"message":{"role":"assistant","content":"","tool_calls":[{"index":0,"id":"call_client_tool_123","type":"function","function":{"name":"client_tool","arguments":"{\"input\":\"test input\"}"}}]},"logprobs":null,"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":100,"completion_tokens":50,"total_tokens":150},"system_fingerprint":"fp_test"} + diff --git a/tests/Fixtures/gemini/stream-with-client-executed-tool-1.json b/tests/Fixtures/gemini/stream-with-client-executed-tool-1.json new file mode 100644 index 000000000..972a0996c --- /dev/null +++ b/tests/Fixtures/gemini/stream-with-client-executed-tool-1.json @@ -0,0 +1,3 @@ +data: {"candidates": [{"content": {"parts": [{"functionCall": {"name": "client_tool","args": {"input": "test input"}}}],"role": "model"},"finishReason": "STOP","index": 0}],"usageMetadata": {"promptTokenCount": 100,"candidatesTokenCount": 50,"totalTokenCount": 150,"promptTokensDetails": [{"modality": "TEXT","tokenCount": 100}]},"modelVersion": "gemini-1.5-flash"} + + diff --git a/tests/Fixtures/gemini/structured-with-client-executed-tool-1.json b/tests/Fixtures/gemini/structured-with-client-executed-tool-1.json new file mode 100644 index 000000000..c5a633d1c --- /dev/null +++ b/tests/Fixtures/gemini/structured-with-client-executed-tool-1.json @@ -0,0 +1,40 @@ +{ + "candidates": [ + { + "content": { + "parts": [ + { + "functionCall": { + "name": "client_tool", + "args": { + "input": "test input" + } + } + } + ], + "role": "model" + }, + "finishReason": "STOP", + "avgLogprobs": -0.00003298009687569 + } + ], + "usageMetadata": { + "promptTokenCount": 200, + "candidatesTokenCount": 50, + "totalTokenCount": 250, + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 200 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 50 + } + ] + }, + "modelVersion": "gemini-2.0-flash" +} + diff --git a/tests/Fixtures/gemini/text-with-client-executed-tool-1.json b/tests/Fixtures/gemini/text-with-client-executed-tool-1.json new file mode 100644 index 000000000..5fb57e5b9 --- /dev/null +++ b/tests/Fixtures/gemini/text-with-client-executed-tool-1.json @@ -0,0 +1,40 @@ +{ + "candidates": [ + { + "content": { + "parts": [ + { + "functionCall": { + "name": "client_tool", + "args": { + "input": "test input" + } + } + } + ], + "role": "model" + }, + "finishReason": "STOP", + "avgLogprobs": -0.00003298009687569 + } + ], + "usageMetadata": { + "promptTokenCount": 100, + "candidatesTokenCount": 50, + "totalTokenCount": 150, + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 100 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 50 + } + ] + }, + "modelVersion": "gemini-1.5-flash" +} + diff --git a/tests/Fixtures/groq/stream-with-client-executed-tool-1.sse b/tests/Fixtures/groq/stream-with-client-executed-tool-1.sse new file mode 100644 index 000000000..c750092fc --- /dev/null +++ b/tests/Fixtures/groq/stream-with-client-executed-tool-1.sse @@ -0,0 +1,9 @@ +data: {"id":"chatcmpl-client-executed-stream","object":"chat.completion.chunk","created":1740311145,"model":"llama-3.3-70b-versatile","choices":[{"index":0,"delta":{"role":"assistant","tool_calls":[{"index":0,"id":"call_client_tool_stream","type":"function","function":{"name":"client_tool","arguments":""}}]},"logprobs":null,"finish_reason":null}],"x_groq":{"id":"req_test"}} + +data: {"id":"chatcmpl-client-executed-stream","object":"chat.completion.chunk","created":1740311145,"model":"llama-3.3-70b-versatile","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\"input\": \"test input\"}"}}]},"logprobs":null,"finish_reason":null}],"x_groq":{"id":"req_test"}} + +data: {"id":"chatcmpl-client-executed-stream","object":"chat.completion.chunk","created":1740311145,"model":"llama-3.3-70b-versatile","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":{"queue_time":0.1,"prompt_tokens":100,"prompt_time":0.01,"completion_tokens":50,"completion_time":0.1,"total_tokens":150,"total_time":0.2},"x_groq":{"id":"req_test"}} + +data: [DONE] + + diff --git a/tests/Fixtures/groq/text-with-client-executed-tool-1.json b/tests/Fixtures/groq/text-with-client-executed-tool-1.json new file mode 100644 index 000000000..eecec7bb8 --- /dev/null +++ b/tests/Fixtures/groq/text-with-client-executed-tool-1.json @@ -0,0 +1,2 @@ +{"id":"chatcmpl-client-executed","object":"chat.completion","created":1740311145,"model":"llama-3.3-70b-versatile","choices":[{"index":0,"message":{"role":"assistant","tool_calls":[{"id":"call_client_tool","type":"function","function":{"name":"client_tool","arguments":"{\"input\": \"test input\"}"}}]},"logprobs":null,"finish_reason":"tool_calls"}],"usage":{"queue_time":0.1,"prompt_tokens":100,"prompt_time":0.01,"completion_tokens":50,"completion_time":0.1,"total_tokens":150,"total_time":0.2},"system_fingerprint":"fp_test"} + diff --git a/tests/Fixtures/mistral/stream-with-client-executed-tool-1.sse b/tests/Fixtures/mistral/stream-with-client-executed-tool-1.sse new file mode 100644 index 000000000..756442567 --- /dev/null +++ b/tests/Fixtures/mistral/stream-with-client-executed-tool-1.sse @@ -0,0 +1,6 @@ +data: {"id":"client-executed-test","object":"chat.completion.chunk","created":1759185828,"model":"mistral-large-latest","choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]} + +data: {"id":"client-executed-test","object":"chat.completion.chunk","created":1759185828,"model":"mistral-large-latest","choices":[{"index":0,"delta":{"tool_calls":[{"id":"client_tool_stream","function":{"name":"client_tool","arguments":"{\"input\": \"test input\"}"},"index":0}]},"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":100,"total_tokens":150,"completion_tokens":50}} + +data: [DONE] + diff --git a/tests/Fixtures/mistral/text-with-client-executed-tool-1.json b/tests/Fixtures/mistral/text-with-client-executed-tool-1.json new file mode 100644 index 000000000..21a812489 --- /dev/null +++ b/tests/Fixtures/mistral/text-with-client-executed-tool-1.json @@ -0,0 +1,32 @@ +{ + "id": "client_executed_test", + "object": "chat.completion", + "created": 1728462827, + "model": "mistral-large-latest", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "client_tool_123", + "type": "function", + "function": { + "name": "client_tool", + "arguments": "{\"input\": \"test input\"}" + } + } + ] + }, + "finish_reason": "tool_calls" + } + ], + "usage": { + "prompt_tokens": 100, + "total_tokens": 150, + "completion_tokens": 50 + } +} + diff --git a/tests/Fixtures/ollama/stream-with-client-executed-tool-1.sse b/tests/Fixtures/ollama/stream-with-client-executed-tool-1.sse new file mode 100644 index 000000000..044f3d26e --- /dev/null +++ b/tests/Fixtures/ollama/stream-with-client-executed-tool-1.sse @@ -0,0 +1,3 @@ +{"model":"qwen2.5:14b","created_at":"2025-06-09T18:55:26.684517Z","message":{"role":"assistant","content":"","tool_calls":[{"function":{"name":"client_tool","arguments":{"input":"test input"}}}]},"done_reason":"stop","done":true,"total_duration":8210142000,"load_duration":22224542,"prompt_eval_count":100,"prompt_eval_duration":269880958,"eval_count":50,"eval_duration":7916594250} + + diff --git a/tests/Fixtures/ollama/text-with-client-executed-tool-1.json b/tests/Fixtures/ollama/text-with-client-executed-tool-1.json new file mode 100644 index 000000000..d5d619328 --- /dev/null +++ b/tests/Fixtures/ollama/text-with-client-executed-tool-1.json @@ -0,0 +1,2 @@ +{"model":"qwen2.5:14b","created_at":"2025-06-09T18:55:26.684517Z","message":{"role":"assistant","content":"","tool_calls":[{"function":{"name":"client_tool","arguments":{"input":"test input"}}}]},"done_reason":"stop","done":true,"total_duration":8210142000,"load_duration":22224542,"prompt_eval_count":100,"prompt_eval_duration":269880958,"eval_count":50,"eval_duration":7916594250} + diff --git a/tests/Fixtures/openai/stream-with-client-executed-tool-1.json b/tests/Fixtures/openai/stream-with-client-executed-tool-1.json new file mode 100644 index 000000000..272daa16c --- /dev/null +++ b/tests/Fixtures/openai/stream-with-client-executed-tool-1.json @@ -0,0 +1,22 @@ +event: response.created +data: {"type":"response.created","sequence_number":0,"response":{"id":"resp_client_executed_stream","object":"response","created_at":1750705330,"status":"in_progress","background":false,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":2048,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"summary":null},"service_tier":"auto","store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"A tool that executes on the client","name":"client_tool","parameters":{"type":"object","properties":{"input":{"description":"Input parameter","type":"string"}},"required":["input"]},"strict":true}],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} + +event: response.in_progress +data: {"type":"response.in_progress","sequence_number":1,"response":{"id":"resp_client_executed_stream","object":"response","created_at":1750705330,"status":"in_progress","background":false,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":2048,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"summary":null},"service_tier":"auto","store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"A tool that executes on the client","name":"client_tool","parameters":{"type":"object","properties":{"input":{"description":"Input parameter","type":"string"}},"required":["input"]},"strict":true}],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} + +event: response.output_item.added +data: {"type":"response.output_item.added","sequence_number":2,"output_index":0,"item":{"id":"fc_client_tool_stream","type":"function_call","status":"in_progress","arguments":"","call_id":"call_client_tool_stream","name":"client_tool"}} + +event: response.function_call_arguments.delta +data: {"type":"response.function_call_arguments.delta","sequence_number":3,"item_id":"fc_client_tool_stream","output_index":0,"delta":"{\"input\":\"test input\"}"} + +event: response.function_call_arguments.done +data: {"type":"response.function_call_arguments.done","sequence_number":4,"item_id":"fc_client_tool_stream","output_index":0,"arguments":"{\"input\":\"test input\"}"} + +event: response.output_item.done +data: {"type":"response.output_item.done","sequence_number":5,"output_index":0,"item":{"id":"fc_client_tool_stream","type":"function_call","status":"completed","arguments":"{\"input\":\"test input\"}","call_id":"call_client_tool_stream","name":"client_tool"}} + +event: response.completed +data: {"type":"response.completed","sequence_number":6,"response":{"id":"resp_client_executed_stream","object":"response","created_at":1750705330,"status":"completed","background":false,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":2048,"model":"gpt-4o-2024-08-06","output":[{"id":"fc_client_tool_stream","type":"function_call","status":"completed","arguments":"{\"input\":\"test input\"}","call_id":"call_client_tool_stream","name":"client_tool"}],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"summary":null},"service_tier":"default","store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"A tool that executes on the client","name":"client_tool","parameters":{"type":"object","properties":{"input":{"description":"Input parameter","type":"string"}},"required":["input"]},"strict":true}],"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":100,"input_tokens_details":{"cached_tokens":0},"output_tokens":50,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":150},"user":null,"metadata":{}}} + + diff --git a/tests/Fixtures/openai/structured-with-client-executed-tool-1.json b/tests/Fixtures/openai/structured-with-client-executed-tool-1.json new file mode 100644 index 000000000..8aff9433b --- /dev/null +++ b/tests/Fixtures/openai/structured-with-client-executed-tool-1.json @@ -0,0 +1,31 @@ +{ + "id": "resp_structured_client_executed", + "object": "response", + "created_at": 1741989983, + "status": "completed", + "model": "gpt-4o-2024-08-06", + "output": [ + { + "id": "fc_client_tool_structured", + "type": "function_call", + "status": "completed", + "arguments": "{\"input\": \"test input\"}", + "call_id": "call_client_tool_structured", + "name": "client_tool" + } + ], + "usage": { + "input_tokens": 200, + "input_tokens_details": { + "cached_tokens": 0 + }, + "output_tokens": 50, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 250 + }, + "service_tier": "default", + "system_fingerprint": "fp_test" +} + diff --git a/tests/Fixtures/openai/text-with-client-executed-tool-1.json b/tests/Fixtures/openai/text-with-client-executed-tool-1.json new file mode 100644 index 000000000..a62212d13 --- /dev/null +++ b/tests/Fixtures/openai/text-with-client-executed-tool-1.json @@ -0,0 +1,31 @@ +{ + "id": "resp_client_executed_test", + "object": "response", + "created_at": 1741989983, + "status": "completed", + "model": "gpt-4o-2024-08-06", + "output": [ + { + "id": "fc_client_tool_123", + "type": "function_call", + "status": "completed", + "arguments": "{\"input\": \"test input\"}", + "call_id": "call_client_tool_123", + "name": "client_tool" + } + ], + "usage": { + "input_tokens": 100, + "input_tokens_details": { + "cached_tokens": 0 + }, + "output_tokens": 50, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 150 + }, + "service_tier": "default", + "system_fingerprint": "fp_test" +} + diff --git a/tests/Fixtures/openrouter/stream-with-client-executed-tool-1.sse b/tests/Fixtures/openrouter/stream-with-client-executed-tool-1.sse new file mode 100644 index 000000000..e0a4d5e6c --- /dev/null +++ b/tests/Fixtures/openrouter/stream-with-client-executed-tool-1.sse @@ -0,0 +1,9 @@ +data: {"id":"chatcmpl-client-executed","object":"chat.completion.chunk","created":1737243487,"model":"openai/gpt-4-turbo","choices":[{"index":0,"delta":{"role":"assistant","tool_calls":[{"index":0,"id":"call_client_tool_stream","type":"function","function":{"name":"client_tool","arguments":""}}]},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-client-executed","object":"chat.completion.chunk","created":1737243487,"model":"openai/gpt-4-turbo","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\"input\": \"test input\"}"}}]},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-client-executed","object":"chat.completion.chunk","created":1737243487,"model":"openai/gpt-4-turbo","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":100,"completion_tokens":50,"total_tokens":150}} + +data: [DONE] + + diff --git a/tests/Fixtures/openrouter/text-with-client-executed-tool-1.json b/tests/Fixtures/openrouter/text-with-client-executed-tool-1.json new file mode 100644 index 000000000..5afde1944 --- /dev/null +++ b/tests/Fixtures/openrouter/text-with-client-executed-tool-1.json @@ -0,0 +1,2 @@ +{"id":"gen-client-executed","object":"chat.completion","created":1737243487,"model":"openai/gpt-4-turbo","choices":[{"index":0,"message":{"role":"assistant","content":"","tool_calls":[{"id":"call_client_tool","type":"function","function":{"name":"client_tool","arguments":"{\"input\":\"test input\"}"}}]},"logprobs":null,"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":100,"completion_tokens":50,"total_tokens":150}} + diff --git a/tests/Fixtures/xai/stream-with-client-executed-tool-1.json b/tests/Fixtures/xai/stream-with-client-executed-tool-1.json new file mode 100644 index 000000000..63313a45e --- /dev/null +++ b/tests/Fixtures/xai/stream-with-client-executed-tool-1.json @@ -0,0 +1,9 @@ +data: {"id":"chatcmpl-client-executed","object":"chat.completion.chunk","created":1731129810,"model":"grok-4","choices":[{"index":0,"delta":{"role":"assistant","tool_calls":[{"index":0,"id":"0","type":"function","function":{"name":"client_tool","arguments":""}}]},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-client-executed","object":"chat.completion.chunk","created":1731129810,"model":"grok-4","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\"input\":\"test input\"}"}}]},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-client-executed","object":"chat.completion.chunk","created":1731129810,"model":"grok-4","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":100,"completion_tokens":50,"total_tokens":150}} + +data: [DONE] + + diff --git a/tests/Fixtures/xai/text-with-client-executed-tool-1.json b/tests/Fixtures/xai/text-with-client-executed-tool-1.json new file mode 100644 index 000000000..064707bc0 --- /dev/null +++ b/tests/Fixtures/xai/text-with-client-executed-tool-1.json @@ -0,0 +1,34 @@ +{ + "id": "client-executed-test", + "object": "chat.completion", + "created": 1731129810, + "model": "grok-beta", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "0", + "function": { + "name": "client_tool", + "arguments": "{\"input\":\"test input\"}" + }, + "type": "function" + } + ], + "refusal": null + }, + "finish_reason": "tool_calls" + } + ], + "usage": { + "prompt_tokens": 100, + "completion_tokens": 50, + "total_tokens": 150 + }, + "system_fingerprint": "fp_test" +} + diff --git a/tests/Providers/Anthropic/AnthropicTextTest.php b/tests/Providers/Anthropic/AnthropicTextTest.php index c04791bfe..e335bb4fa 100644 --- a/tests/Providers/Anthropic/AnthropicTextTest.php +++ b/tests/Providers/Anthropic/AnthropicTextTest.php @@ -523,6 +523,28 @@ }); }); +describe('client-executed tools', function (): void { + it('stops execution when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('v1/messages', 'anthropic/text-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using('anthropic', 'claude-3-5-sonnet-20240620') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asText(); + + expect($response->finishReason)->toBe(\Prism\Prism\Enums\FinishReason::ToolCalls); + expect($response->toolCalls)->toHaveCount(1); + expect($response->toolCalls[0]->name)->toBe('client_tool'); + expect($response->steps)->toHaveCount(1); + }); +}); + describe('exceptions', function (): void { it('throws a RateLimitException if the Anthropic responds with a 429', function (): void { Http::fake([ diff --git a/tests/Providers/Anthropic/StreamTest.php b/tests/Providers/Anthropic/StreamTest.php index 625a3d99d..65977bab9 100644 --- a/tests/Providers/Anthropic/StreamTest.php +++ b/tests/Providers/Anthropic/StreamTest.php @@ -598,6 +598,40 @@ })->throws(PrismRequestTooLargeException::class); }); +describe('client-executed tools', function (): void { + it('stops streaming when client-executed tool is called', function (): void { + FixtureResponse::fakeStreamResponses('v1/messages', 'anthropic/stream-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using('anthropic', 'claude-3-5-sonnet-20240620') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asStream(); + + $events = []; + $toolCallFound = false; + + foreach ($response as $event) { + $events[] = $event; + + if ($event instanceof ToolCallEvent) { + $toolCallFound = true; + } + } + + expect($toolCallFound)->toBeTrue(); + + $lastEvent = end($events); + expect($lastEvent)->toBeInstanceOf(StreamEndEvent::class); + expect($lastEvent->finishReason)->toBe(\Prism\Prism\Enums\FinishReason::ToolCalls); + }); +}); + describe('basic stream events', function (): void { it('can generate text with a basic stream', function (): void { FixtureResponse::fakeStreamResponses('v1/messages', 'anthropic/stream-basic-text'); diff --git a/tests/Providers/Anthropic/StructuredWithToolsTest.php b/tests/Providers/Anthropic/StructuredWithToolsTest.php index 9e904d1ce..8cd38e0a4 100644 --- a/tests/Providers/Anthropic/StructuredWithToolsTest.php +++ b/tests/Providers/Anthropic/StructuredWithToolsTest.php @@ -200,6 +200,36 @@ expect($response->toolResults)->toBeArray(); }); + it('stops execution when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('*', 'anthropic/structured-with-client-executed-tool'); + + $schema = new ObjectSchema( + 'output', + 'the output object', + [new StringSchema('result', 'The result', true)], + ['result'] + ); + + $tool = (new Tool) + ->as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::structured() + ->using(Provider::Anthropic, 'claude-sonnet-4-0') + ->withSchema($schema) + ->withTools([$tool]) + ->withMaxSteps(3) + ->withProviderOptions(['use_tool_calling' => true]) + ->withPrompt('Use the client tool') + ->asStructured(); + + expect($response->finishReason)->toBe(FinishReason::ToolCalls); + expect($response->toolCalls)->toHaveCount(1); + expect($response->toolCalls[0]->name)->toBe('client_tool'); + expect($response->steps)->toHaveCount(1); + }); + it('includes strict field in tool definition when specified', function (): void { Prism::fake(); diff --git a/tests/Providers/DeepSeek/StreamTest.php b/tests/Providers/DeepSeek/StreamTest.php index d38c47ae2..a3cdd2323 100644 --- a/tests/Providers/DeepSeek/StreamTest.php +++ b/tests/Providers/DeepSeek/StreamTest.php @@ -126,6 +126,40 @@ }); }); +describe('client-executed tools', function (): void { + it('stops streaming when client-executed tool is called', function (): void { + FixtureResponse::fakeStreamResponses('chat/completions', 'deepseek/stream-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using(Provider::DeepSeek, 'deepseek-chat') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asStream(); + + $events = []; + $toolCallFound = false; + + foreach ($response as $event) { + $events[] = $event; + + if ($event instanceof ToolCallEvent) { + $toolCallFound = true; + } + } + + expect($toolCallFound)->toBeTrue(); + + $lastEvent = end($events); + expect($lastEvent)->toBeInstanceOf(StreamEndEvent::class); + expect($lastEvent->finishReason)->toBe(FinishReason::ToolCalls); + }); +}); + it('handles max_tokens parameter correctly', function (): void { FixtureResponse::fakeStreamResponses('chat/completions', 'deepseek/stream-max-tokens'); diff --git a/tests/Providers/DeepSeek/TextTest.php b/tests/Providers/DeepSeek/TextTest.php index e6fd3ccb6..2da324961 100644 --- a/tests/Providers/DeepSeek/TextTest.php +++ b/tests/Providers/DeepSeek/TextTest.php @@ -76,6 +76,28 @@ expect($response->finishReason)->toBe(FinishReason::Stop); }); +describe('client-executed tools', function (): void { + it('stops execution when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('v1/chat/completions', 'deepseek/text-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using(Provider::DeepSeek, 'deepseek-chat') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->generate(); + + expect($response->finishReason)->toBe(FinishReason::ToolCalls); + expect($response->toolCalls)->toHaveCount(1); + expect($response->toolCalls[0]->name)->toBe('client_tool'); + expect($response->steps)->toHaveCount(1); + }); +}); + it('can generate text using multiple tools and multiple steps', function (): void { FixtureResponse::fakeResponseSequence('v1/chat/completions', 'deepseek/generate-text-with-multiple-tools'); diff --git a/tests/Providers/Gemini/GeminiStreamTest.php b/tests/Providers/Gemini/GeminiStreamTest.php index 5dca1bc9a..ba86212cc 100644 --- a/tests/Providers/Gemini/GeminiStreamTest.php +++ b/tests/Providers/Gemini/GeminiStreamTest.php @@ -229,6 +229,40 @@ }); }); +describe('client-executed tools', function (): void { + it('stops streaming when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('*', 'gemini/stream-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using(Provider::Gemini, 'gemini-1.5-flash') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asStream(); + + $events = []; + $toolCallFound = false; + + foreach ($response as $event) { + $events[] = $event; + + if ($event instanceof ToolCallEvent) { + $toolCallFound = true; + } + } + + expect($toolCallFound)->toBeTrue(); + + $lastEvent = end($events); + expect($lastEvent)->toBeInstanceOf(StreamEndEvent::class); + expect($lastEvent->finishReason)->toBe(FinishReason::ToolCalls); + }); +}); + it('yields ToolCall events before ToolResult events', function (): void { FixtureResponse::fakeResponseSequence('*', 'gemini/stream-with-tools'); diff --git a/tests/Providers/Gemini/GeminiTextTest.php b/tests/Providers/Gemini/GeminiTextTest.php index c07bbed08..a7dc2c638 100644 --- a/tests/Providers/Gemini/GeminiTextTest.php +++ b/tests/Providers/Gemini/GeminiTextTest.php @@ -154,6 +154,29 @@ }); }); +describe('client-executed tools', function (): void { + it('stops execution when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('*', 'gemini/text-with-client-executed-tool'); + + $tool = (new Tool) + ->as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using(Provider::Gemini, 'gemini-1.5-flash') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asText(); + + expect($response->finishReason)->toBe(FinishReason::ToolCalls); + expect($response->toolCalls)->toHaveCount(1); + expect($response->toolCalls[0]->name)->toBe('client_tool'); + expect($response->steps)->toHaveCount(1); + }); +}); + describe('Image support with Gemini', function (): void { it('can send images from path', function (): void { FixtureResponse::fakeResponseSequence('*', 'gemini/image-detection'); diff --git a/tests/Providers/Gemini/StructuredWithToolsTest.php b/tests/Providers/Gemini/StructuredWithToolsTest.php index 36d94de36..bd167bd27 100644 --- a/tests/Providers/Gemini/StructuredWithToolsTest.php +++ b/tests/Providers/Gemini/StructuredWithToolsTest.php @@ -120,6 +120,35 @@ expect($finalStep->structured)->toBeArray(); }); + it('stops execution when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('*', 'gemini/structured-with-client-executed-tool'); + + $schema = new ObjectSchema( + 'output', + 'the output object', + [new StringSchema('result', 'The result', true)], + ['result'] + ); + + $tool = (new Tool) + ->as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::structured() + ->using(Provider::Gemini, 'gemini-2.0-flash') + ->withSchema($schema) + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asStructured(); + + expect($response->finishReason)->toBe(FinishReason::ToolCalls); + expect($response->toolCalls)->toHaveCount(1); + expect($response->toolCalls[0]->name)->toBe('client_tool'); + expect($response->steps)->toHaveCount(1); + }); + it('returns structured output immediately when no tool calls needed', function (): void { FixtureResponse::fakeResponseSequence('*', 'gemini/structured-without-tool-calls'); diff --git a/tests/Providers/Groq/GroqTextTest.php b/tests/Providers/Groq/GroqTextTest.php index 98ba516f4..3ecba36a7 100644 --- a/tests/Providers/Groq/GroqTextTest.php +++ b/tests/Providers/Groq/GroqTextTest.php @@ -107,6 +107,26 @@ ); }); + it('stops execution when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('chat/completions', 'groq/text-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using('groq', 'llama-3.3-70b-versatile') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->generate(); + + expect($response->finishReason)->toBe(\Prism\Prism\Enums\FinishReason::ToolCalls); + expect($response->toolCalls)->toHaveCount(1); + expect($response->toolCalls[0]->name)->toBe('client_tool'); + expect($response->steps)->toHaveCount(1); + }); + it('handles specific tool choice', function (): void { FixtureResponse::fakeResponseSequence('v1/chat/completions', 'groq/generate-text-with-required-tool-call'); diff --git a/tests/Providers/Groq/StreamTest.php b/tests/Providers/Groq/StreamTest.php index b89b61f6c..7482d39d8 100644 --- a/tests/Providers/Groq/StreamTest.php +++ b/tests/Providers/Groq/StreamTest.php @@ -113,6 +113,40 @@ expect($toolResultEvents)->not->toBeEmpty(); }); +describe('client-executed tools', function (): void { + it('stops streaming when client-executed tool is called', function (): void { + FixtureResponse::fakeStreamResponses('openai/v1/chat/completions', 'groq/stream-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using(Provider::Groq, 'llama-3.1-70b-versatile') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asStream(); + + $events = []; + $toolCallFound = false; + + foreach ($response as $event) { + $events[] = $event; + + if ($event instanceof ToolCallEvent) { + $toolCallFound = true; + } + } + + expect($toolCallFound)->toBeTrue(); + + $lastEvent = end($events); + expect($lastEvent)->toBeInstanceOf(StreamEndEvent::class); + expect($lastEvent->finishReason)->toBe(FinishReason::ToolCalls); + }); +}); + it('handles maximum tool call depth exceeded', function (): void { FixtureResponse::fakeStreamResponses('openai/v1/chat/completions', 'groq/stream-with-tools'); diff --git a/tests/Providers/Mistral/MistralTextTest.php b/tests/Providers/Mistral/MistralTextTest.php index 03aa42fd0..09d6df205 100644 --- a/tests/Providers/Mistral/MistralTextTest.php +++ b/tests/Providers/Mistral/MistralTextTest.php @@ -106,6 +106,26 @@ ); }); + it('stops execution when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('v1/chat/completions', 'mistral/text-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using('mistral', 'mistral-large-latest') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->generate(); + + expect($response->finishReason)->toBe(FinishReason::ToolCalls); + expect($response->toolCalls)->toHaveCount(1); + expect($response->toolCalls[0]->name)->toBe('client_tool'); + expect($response->steps)->toHaveCount(1); + }); + it('handles specific tool choice', function (): void { FixtureResponse::fakeResponseSequence('v1/chat/completions', 'mistral/generate-text-with-required-tool-call'); diff --git a/tests/Providers/Mistral/StreamTest.php b/tests/Providers/Mistral/StreamTest.php index 091e48677..aec373ccf 100644 --- a/tests/Providers/Mistral/StreamTest.php +++ b/tests/Providers/Mistral/StreamTest.php @@ -112,6 +112,40 @@ expect($toolResultEvents)->not->toBeEmpty(); }); +describe('client-executed tools', function (): void { + it('stops streaming when client-executed tool is called', function (): void { + FixtureResponse::fakeStreamResponses('v1/chat/completions', 'mistral/stream-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using(Provider::Mistral, 'mistral-large-latest') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asStream(); + + $events = []; + $toolCallFound = false; + + foreach ($response as $event) { + $events[] = $event; + + if ($event instanceof ToolCallEvent) { + $toolCallFound = true; + } + } + + expect($toolCallFound)->toBeTrue(); + + $lastEvent = end($events); + expect($lastEvent)->toBeInstanceOf(StreamEndEvent::class); + expect($lastEvent->finishReason)->toBe(FinishReason::ToolCalls); + }); +}); + it('handles maximum tool call depth exceeded', function (): void { FixtureResponse::fakeStreamResponses('v1/chat/completions', 'mistral/stream-with-tools-1'); diff --git a/tests/Providers/Ollama/StreamTest.php b/tests/Providers/Ollama/StreamTest.php index 0159d9832..8677ddd1a 100644 --- a/tests/Providers/Ollama/StreamTest.php +++ b/tests/Providers/Ollama/StreamTest.php @@ -117,6 +117,40 @@ expect($finishReasonFound)->toBeTrue(); }); +describe('client-executed tools', function (): void { + it('stops streaming when client-executed tool is called', function (): void { + FixtureResponse::fakeStreamResponses('api/chat', 'ollama/stream-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using('ollama', 'qwen2.5:14b') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asStream(); + + $events = []; + $toolCallFound = false; + + foreach ($response as $event) { + $events[] = $event; + + if ($event instanceof ToolCallEvent) { + $toolCallFound = true; + } + } + + expect($toolCallFound)->toBeTrue(); + + $lastEvent = end($events); + expect($lastEvent)->toBeInstanceOf(StreamEndEvent::class); + expect($lastEvent->finishReason)->toBe(FinishReason::ToolCalls); + }); +}); + it('throws a PrismRateLimitedException with a 429 response code', function (): void { Http::fake([ '*' => Http::response( diff --git a/tests/Providers/Ollama/TextTest.php b/tests/Providers/Ollama/TextTest.php index 9b355d7c5..ed667555c 100644 --- a/tests/Providers/Ollama/TextTest.php +++ b/tests/Providers/Ollama/TextTest.php @@ -104,6 +104,28 @@ }); }); +describe('client-executed tools', function (): void { + it('stops execution when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('api/chat', 'ollama/text-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using('ollama', 'qwen2.5:14b') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asText(); + + expect($response->finishReason)->toBe(\Prism\Prism\Enums\FinishReason::ToolCalls); + expect($response->toolCalls)->toHaveCount(1); + expect($response->toolCalls[0]->name)->toBe('client_tool'); + expect($response->steps)->toHaveCount(1); + }); +}); + describe('Thinking parameter', function (): void { it('includes think parameter when thinking is enabled', function (): void { FixtureResponse::fakeResponseSequence('api/chat', 'ollama/text-with-thinking-enabled'); diff --git a/tests/Providers/OpenAI/StreamTest.php b/tests/Providers/OpenAI/StreamTest.php index 0da5a3949..6a47f05aa 100644 --- a/tests/Providers/OpenAI/StreamTest.php +++ b/tests/Providers/OpenAI/StreamTest.php @@ -402,6 +402,40 @@ Http::assertSent(fn (Request $request): bool => $request->data()['parallel_tool_calls'] === false); }); +describe('client-executed tools', function (): void { + it('stops streaming when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('v1/responses', 'openai/stream-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using('openai', 'gpt-4o') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asStream(); + + $events = []; + $toolCallFound = false; + + foreach ($response as $event) { + $events[] = $event; + + if ($event instanceof ToolCallEvent) { + $toolCallFound = true; + } + } + + expect($toolCallFound)->toBeTrue(); + + $lastEvent = end($events); + expect($lastEvent)->toBeInstanceOf(StreamEndEvent::class); + expect($lastEvent->finishReason)->toBe(\Prism\Prism\Enums\FinishReason::ToolCalls); + }); +}); + it('emits usage information', function (): void { FixtureResponse::fakeResponseSequence('v1/responses', 'openai/stream-basic-text-responses'); diff --git a/tests/Providers/OpenAI/StructuredWithToolsTest.php b/tests/Providers/OpenAI/StructuredWithToolsTest.php index 7afd8910f..f0661be6a 100644 --- a/tests/Providers/OpenAI/StructuredWithToolsTest.php +++ b/tests/Providers/OpenAI/StructuredWithToolsTest.php @@ -148,6 +148,35 @@ expect($response->steps)->toHaveCount(1); }); + it('stops execution when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('v1/responses', 'openai/structured-with-client-executed-tool'); + + $schema = new ObjectSchema( + 'output', + 'the output object', + [new StringSchema('result', 'The result', true)], + ['result'] + ); + + $tool = (new Tool) + ->as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::structured() + ->using(Provider::OpenAI, 'gpt-4o') + ->withSchema($schema) + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asStructured(); + + expect($response->finishReason)->toBe(FinishReason::ToolCalls); + expect($response->toolCalls)->toHaveCount(1); + expect($response->toolCalls[0]->name)->toBe('client_tool'); + expect($response->steps)->toHaveCount(1); + }); + it('handles tool orchestration correctly with multiple tool types', function (): void { FixtureResponse::fakeResponseSequence('v1/responses', 'openai/structured-with-tool-orchestration'); diff --git a/tests/Providers/OpenAI/TextTest.php b/tests/Providers/OpenAI/TextTest.php index b7fc8658d..25fe3e1c6 100644 --- a/tests/Providers/OpenAI/TextTest.php +++ b/tests/Providers/OpenAI/TextTest.php @@ -317,6 +317,28 @@ }); }); +describe('client-executed tools', function (): void { + it('stops execution when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('v1/responses', 'openai/text-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using('openai', 'gpt-4o') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asText(); + + expect($response->finishReason)->toBe(FinishReason::ToolCalls); + expect($response->toolCalls)->toHaveCount(1); + expect($response->toolCalls[0]->name)->toBe('client_tool'); + expect($response->steps)->toHaveCount(1); + }); +}); + it('sets usage correctly with automatic caching', function (): void { FixtureResponse::fakeResponseSequence( 'v1/responses', diff --git a/tests/Providers/OpenRouter/StreamTest.php b/tests/Providers/OpenRouter/StreamTest.php index 99925d1da..24d510820 100644 --- a/tests/Providers/OpenRouter/StreamTest.php +++ b/tests/Providers/OpenRouter/StreamTest.php @@ -243,6 +243,40 @@ expect($streamEndEvents)->not->toBeEmpty(); }); +describe('client-executed tools', function (): void { + it('stops streaming when client-executed tool is called', function (): void { + FixtureResponse::fakeStreamResponses('v1/chat/completions', 'openrouter/stream-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using(Provider::OpenRouter, 'openai/gpt-4-turbo') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asStream(); + + $events = []; + $toolCallFound = false; + + foreach ($response as $event) { + $events[] = $event; + + if ($event instanceof ToolCallEvent) { + $toolCallFound = true; + } + } + + expect($toolCallFound)->toBeTrue(); + + $lastEvent = end($events); + expect($lastEvent)->toBeInstanceOf(StreamEndEvent::class); + expect($lastEvent->finishReason)->toBe(FinishReason::ToolCalls); + }); +}); + it('can handle reasoning/thinking tokens in streaming', function (): void { FixtureResponse::fakeStreamResponses('v1/chat/completions', 'openrouter/stream-text-with-reasoning'); diff --git a/tests/Providers/OpenRouter/TextTest.php b/tests/Providers/OpenRouter/TextTest.php index 32a751716..ce09d4bb0 100644 --- a/tests/Providers/OpenRouter/TextTest.php +++ b/tests/Providers/OpenRouter/TextTest.php @@ -127,6 +127,28 @@ expect($response->finishReason)->toBe(FinishReason::Stop); }); +describe('client-executed tools', function (): void { + it('stops execution when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('v1/chat/completions', 'openrouter/text-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using(Provider::OpenRouter, 'openai/gpt-4-turbo') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->generate(); + + expect($response->finishReason)->toBe(FinishReason::ToolCalls); + expect($response->toolCalls)->toHaveCount(1); + expect($response->toolCalls[0]->name)->toBe('client_tool'); + expect($response->steps)->toHaveCount(1); + }); +}); + it('forwards advanced provider options to openrouter', function (): void { FixtureResponse::fakeResponseSequence('v1/chat/completions', 'openrouter/generate-text-with-a-prompt'); diff --git a/tests/Providers/XAI/StreamTest.php b/tests/Providers/XAI/StreamTest.php index d4145ba92..4a8977a06 100644 --- a/tests/Providers/XAI/StreamTest.php +++ b/tests/Providers/XAI/StreamTest.php @@ -124,6 +124,40 @@ }); }); +describe('client-executed tools', function (): void { + it('stops streaming when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('v1/chat/completions', 'xai/stream-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using('xai', 'grok-4') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asStream(); + + $events = []; + $toolCallFound = false; + + foreach ($response as $event) { + $events[] = $event; + + if ($event instanceof ToolCallEvent) { + $toolCallFound = true; + } + } + + expect($toolCallFound)->toBeTrue(); + + $lastEvent = end($events); + expect($lastEvent)->toBeInstanceOf(StreamEndEvent::class); + expect($lastEvent->finishReason)->toBe(FinishReason::ToolCalls); + }); +}); + it('handles max_tokens parameter correctly', function (): void { FixtureResponse::fakeResponseSequence('v1/chat/completions', 'xai/stream-basic-text-responses'); diff --git a/tests/Providers/XAI/XAITextTest.php b/tests/Providers/XAI/XAITextTest.php index 5665b2c65..52a19a6a5 100644 --- a/tests/Providers/XAI/XAITextTest.php +++ b/tests/Providers/XAI/XAITextTest.php @@ -142,6 +142,28 @@ }); }); +describe('client-executed tools', function (): void { + it('stops execution when client-executed tool is called', function (): void { + FixtureResponse::fakeResponseSequence('chat/completions', 'xai/text-with-client-executed-tool'); + + $tool = Tool::as('client_tool') + ->for('A tool that executes on the client') + ->withStringParameter('input', 'Input parameter'); + + $response = Prism::text() + ->using(Provider::XAI, 'grok-beta') + ->withTools([$tool]) + ->withMaxSteps(3) + ->withPrompt('Use the client tool') + ->asText(); + + expect($response->finishReason)->toBe(FinishReason::ToolCalls); + expect($response->toolCalls)->toHaveCount(1); + expect($response->toolCalls[0]->name)->toBe('client_tool'); + expect($response->steps)->toHaveCount(1); + }); +}); + describe('Image support with XAI', function (): void { it('can send images from path', function (): void { FixtureResponse::fakeResponseSequence('chat/completions', 'xai/image-detection'); diff --git a/tests/ToolTest.php b/tests/ToolTest.php index f7865ec8b..61abf4a67 100644 --- a/tests/ToolTest.php +++ b/tests/ToolTest.php @@ -181,3 +181,15 @@ public function __invoke(string $query): string $searchTool->handle('What time is the event?'); }); + +it('can throw a prism exception when handle is called on a tool without a handler', function (): void { + $tool = (new Tool) + ->as('client_tool') + ->for('A tool without a handler') + ->withParameter(new StringSchema('query', 'the search query')); + + $this->expectException(PrismException::class); + $this->expectExceptionMessage('Tool (client_tool) has no handler defined'); + + $tool->handle('test'); +});