From 58194e8ce0430a3ebfc30e7c544ed9642e9160ca Mon Sep 17 00:00:00 2001 From: siddharthsambharia-portkey Date: Fri, 27 Feb 2026 17:48:52 +0530 Subject: [PATCH] docs: Update core API reference for Model Catalog --- .../inference-api/authentication.mdx | 6 +- .../inference-api/gateway-for-other-apis.mdx | 24 +++---- api-reference/inference-api/headers.mdx | 66 ++++++++++--------- api-reference/sdk/c-sharp.mdx | 4 +- api-reference/sdk/python.mdx | 14 ++-- 5 files changed, 58 insertions(+), 56 deletions(-) diff --git a/api-reference/inference-api/authentication.mdx b/api-reference/inference-api/authentication.mdx index 2971b75d..2116fb2c 100644 --- a/api-reference/inference-api/authentication.mdx +++ b/api-reference/inference-api/authentication.mdx @@ -31,7 +31,7 @@ import Portkey from 'portkey-ai' const portkey = new Portkey({ apiKey: "PORTKEY_API_KEY", // Replace with your actual API key - virtualKey: "VIRTUAL_KEY" // Optional: Use for virtual key management + provider: "@openai-prod" // Optional: AI Provider slug from Model Catalog }) const chatCompletion = await portkey.chat.completions.create({ @@ -48,7 +48,7 @@ from portkey_ai import Portkey client = Portkey( api_key="PORTKEY_API_KEY", # Replace with your actual API key - provider="@VIRTUAL_KEY" # Optional: Use if virtual keys are set up + provider="@openai-prod" # Optional: AI Provider slug from Model Catalog ) chat_completion = client.chat.completions.create( @@ -65,7 +65,7 @@ print(chat_completion.choices[0].message["content"]) curl https://api.portkey.ai/v1/chat/completions \ -H "Content-Type: application/json" \ -H "x-portkey-api-key: $PORTKEY_API_KEY" \ - -H "x-portkey-virtual-key: $VIRTUAL_KEY" \ + -H "x-portkey-provider: @openai-prod" \ -d '{ "model": "gpt-4o", "messages": [ diff --git a/api-reference/inference-api/gateway-for-other-apis.mdx b/api-reference/inference-api/gateway-for-other-apis.mdx index 6c55d7c5..9d829f9d 100644 --- a/api-reference/inference-api/gateway-for-other-apis.mdx +++ b/api-reference/inference-api/gateway-for-other-apis.mdx @@ -39,8 +39,8 @@ Create or log in to your Portkey account. Grab your account's API key from the [ Choose one of these authentication methods: - -Portkey integrates with 40+ LLM providers. Add your provider credentials (such as API key) to Portkey, and get a virtual key that you can use to authenticate and send your requests. + +Add your provider credentials to [Model Catalog](https://app.portkey.ai/model-catalog) and use the AI Provider slug to authenticate your requests. @@ -48,7 +48,7 @@ Portkey integrates with 40+ LLM providers. Add your provider credentials (such a curl https://api.portkey.ai/v1/rerank \ -H "Content-Type: application/json" \ -H "x-portkey-api-key: $PORTKEY_API_KEY" \ - -H "x-portkey-virtual-key: $PORTKEY_PROVIDER" \ + -H "x-portkey-provider: @cohere-prod" \ ``` ```py Python @@ -56,7 +56,7 @@ from portkey_ai import Portkey portkey = Portkey( api_key = "PORTKEY_API_KEY", - virtual_key = "PROVIDER" + provider = "@cohere-prod" ) ``` @@ -65,15 +65,15 @@ import Portkey from 'portkey-ai'; const portkey = new Portkey({ apiKey: 'PORTKEY_API_KEY', - virtualKey: 'PROVIDER' + provider: '@cohere-prod' }); ``` -Creating virtual keys lets you: -- Manage all credentials in one place +Model Catalog lets you: +- Manage all provider credentials in one place +- Set budget limits & rate limits per provider - Rotate between different provider keys -- Set custom budget limits & rate limits per key @@ -165,7 +165,7 @@ curl --request POST \ --url https://api.portkey.ai/v1/rerank \ --header 'Content-Type: application/json' \ --header 'x-portkey-api-key: $PORTKEY_API_KEY' \ - --header 'x-portkey-virtual-key: $COHERE_VIRTUAL_KEY' \ + --header 'x-portkey-provider: @cohere-prod' \ --data '{ "model": "rerank-english-v2.0", "query": "What is machine learning?", @@ -181,7 +181,7 @@ curl --request GET \ --url https://api.portkey.ai/v1/collections \ --header 'Content-Type: application/json' \ --header 'x-portkey-api-key: $PORTKEY_API_KEY' \ - --header 'x-portkey-virtual-key: $PROVIDER' + --header 'x-portkey-provider: @provider-prod' ``` ```bash PUT @@ -189,7 +189,7 @@ curl --request PUT \ --url https://api.portkey.ai/v1/collections/my-collection \ --header 'Content-Type: application/json' \ --header 'x-portkey-api-key: $PORTKEY_API_KEY' \ - --header 'x-portkey-virtual-key: $PROVIDER' \ + --header 'x-portkey-provider: @provider-prod' \ --data '{ "metadata": { "description": "Updated collection description" @@ -202,7 +202,7 @@ curl --request DELETE \ --url https://api.portkey.ai/v1/collections/my-collection \ --header 'Content-Type: application/json' \ --header 'x-portkey-api-key: $PORTKEY_API_KEY' \ - --header 'x-portkey-virtual-key: $PROVIDER' + --header 'x-portkey-provider: @provider-prod' ``` diff --git a/api-reference/inference-api/headers.mdx b/api-reference/inference-api/headers.mdx index 13a4e895..8b9ace84 100644 --- a/api-reference/inference-api/headers.mdx +++ b/api-reference/inference-api/headers.mdx @@ -90,10 +90,12 @@ const portkey = new Portkey({ -### 2. Virtual Key +### 2. AI Provider - -Save your provider auth on Portkey and use a virtual key to directly make a call. [Docs](/product/ai-gateway/virtual-keys)) + +Specify your AI Provider slug (from [Model Catalog](/product/model-catalog)) to route requests through a managed provider. Use the `@provider-slug` format. ([Docs](/product/model-catalog)) + +The `x-portkey-virtual-key` / `virtual_key` / `virtualKey` parameter is the legacy equivalent and still works for backward compatibility. @@ -102,7 +104,7 @@ Save your provider auth on Portkey and use a virtual key to directly make a call ```sh cURL {3} curl https://api.portkey.ai/v1/chat/completions \ -H "x-portkey-api-key: $PORTKEY_API_KEY" \ - -H "x-portkey-virtual-key: openai-virtual-key" \ + -H "x-portkey-provider: @openai-prod" \ ``` ```py Python {5} @@ -110,7 +112,7 @@ from portkey_ai import Portkey portkey = Portkey( api_key = "PORTKEY_API_KEY", # defaults to os.environ.get("PORTKEY_API_KEY") - virtual_key = "openai-virtual-key" + provider = "@openai-prod" # Your AI Provider slug from Model Catalog ) ``` @@ -119,7 +121,7 @@ import Portkey from 'portkey-ai'; const portkey = new Portkey({ apiKey: 'PORTKEY_API_KEY', // defaults to process.env["PORTKEY_API_KEY"] - virtualKey: 'openai-virtual-key' + provider: '@openai-prod' // Your AI Provider slug from Model Catalog }); ``` @@ -229,7 +231,7 @@ An ID you can pass to refer to one or more requests later on. If not provided, P ```sh cURL {4} curl https://api.portkey.ai/v1/chat/completions \ -H "x-portkey-api-key: $PORTKEY_API_KEY" \ - -H "x-portkey-virtual-key: openai-virtual-key" \ + -H "x-portkey-provider: @openai-prod" \ -H "x-portkey-trace-id: test-request" \ ``` @@ -238,7 +240,7 @@ from portkey_ai import Portkey portkey = Portkey( api_key = "PORTKEY_API_KEY", # defaults to os.environ.get("PORTKEY_API_KEY") - virtual_key = "openai-virtual-key", + provider = "@openai-prod", trace_id = "test-request" ) ``` @@ -248,7 +250,7 @@ import Portkey from 'portkey-ai'; const portkey = new Portkey({ apiKey: 'PORTKEY_API_KEY', // defaults to process.env["PORTKEY_API_KEY"] - virtualKey: "openai-virtual-key", + provider: "@openai-prod", traceId: "test-request" }); ``` @@ -266,7 +268,7 @@ You can include the special metadata type `_user` to associate requests with spe ```sh cURL {4} curl https://api.portkey.ai/v1/chat/completions \ -H "x-portkey-api-key: $PORTKEY_API_KEY" \ - -H "x-portkey-virtual-key: openai-virtual-key" \ + -H "x-portkey-provider: @openai-prod" \ -H "x-portkey-metadata: {'_user': 'user_id_123', 'foo': 'bar'}" \ ``` @@ -275,7 +277,7 @@ from portkey_ai import Portkey portkey = Portkey( api_key = "PORTKEY_API_KEY", # defaults to os.environ.get("PORTKEY_API_KEY") - virtual_key = "openai-virtual-key", + provider = "@openai-prod", metadata = {"_user": "user_id_123", "foo": "bar"}" ) ``` @@ -285,7 +287,7 @@ import Portkey from 'portkey-ai'; const portkey = new Portkey({ apiKey: 'PORTKEY_API_KEY', // defaults to process.env["PORTKEY_API_KEY"] - virtualKey: "openai-virtual-key", + provider: "@openai-prod", metadata: {"_user": "user_id_123", "foo": "bar"}" }); ``` @@ -303,7 +305,7 @@ Expects `true` or `false` See the caching documentation for more information. ([ ```sh cURL {4} curl https://api.portkey.ai/v1/chat/completions \ -H "x-portkey-api-key: $PORTKEY_API_KEY" \ - -H "x-portkey-virtual-key: openai-virtual-key" \ + -H "x-portkey-provider: @openai-prod" \ -H "x-portkey-cache-force-refresh: true" \ ``` @@ -312,7 +314,7 @@ from portkey_ai import Portkey portkey = Portkey( api_key = "PORTKEY_API_KEY", # defaults to os.environ.get("PORTKEY_API_KEY") - virtual_key = "openai-virtual-key", + provider = "@openai-prod", cache_force_refresh = True ) ``` @@ -322,7 +324,7 @@ import Portkey from 'portkey-ai'; const portkey = new Portkey({ apiKey: 'PORTKEY_API_KEY', // defaults to process.env["PORTKEY_API_KEY"] - virtualKey: "openai-virtual-key", + provider: "@openai-prod", cacheForceRefresh: True }); ``` @@ -339,7 +341,7 @@ Partition your cache store based on custom strings, ignoring metadata and other ```sh cURL {4} curl https://api.portkey.ai/v1/chat/completions \ -H "x-portkey-api-key: $PORTKEY_API_KEY" \ - -H "x-portkey-virtual-key: openai-virtual-key" \ + -H "x-portkey-provider: @openai-prod" \ -H "x-portkey-cache-namespace: any-string" \ ``` @@ -348,7 +350,7 @@ from portkey_ai import Portkey portkey = Portkey( api_key = "PORTKEY_API_KEY", # defaults to os.environ.get("PORTKEY_API_KEY") - virtual_key = "openai-virtual-key", + provider = "@openai-prod", cache_namespace = "any-string" ) ``` @@ -358,7 +360,7 @@ import Portkey from 'portkey-ai'; const portkey = new Portkey({ apiKey: 'PORTKEY_API_KEY', // defaults to process.env["PORTKEY_API_KEY"] - virtualKey: "openai-virtual-key", + provider: "@openai-prod", cacheNamespace: "any-string" }); ``` @@ -375,7 +377,7 @@ Set timeout after which a request automatically terminates. The time is set in m ```sh cURL {4} curl https://api.portkey.ai/v1/chat/completions \ -H "x-portkey-api-key: $PORTKEY_API_KEY" \ - -H "x-portkey-virtual-key: openai-virtual-key" \ + -H "x-portkey-provider: @openai-prod" \ -H "x-portkey-request-timeout: 3000" \ ``` @@ -384,7 +386,7 @@ from portkey_ai import Portkey portkey = Portkey( api_key = "PORTKEY_API_KEY", # defaults to os.environ.get("PORTKEY_API_KEY") - virtual_key = "openai-virtual-key", + provider = "@openai-prod", request_timeout = 3000 ) ``` @@ -394,7 +396,7 @@ import Portkey from 'portkey-ai'; const portkey = new Portkey({ apiKey: 'PORTKEY_API_KEY', // defaults to process.env["PORTKEY_API_KEY"] - virtualKey: "openai-virtual-key", + provider: "@openai-prod", reqiestTimeout: 3000 }); ``` @@ -416,7 +418,7 @@ Pass all the headers you want to forward directly in this array. ([Docs](https:/ ```sh cURL {4-6} curl https://api.portkey.ai/v1/chat/completions \ -H "x-portkey-api-key: $PORTKEY_API_KEY" \ - -H "x-portkey-virtual-key: openai-virtual-key" \ + -H "x-portkey-provider: @openai-prod" \ -H "X-Custom-Header: ...."\ -H "Another-Header: ....."\ -H "x-portkey-forward-headers: ['X-Custom-Header', 'Another-Header']" \ @@ -427,7 +429,7 @@ from portkey_ai import Portkey portkey = Portkey( api_key = "PORTKEY_API_KEY", # defaults to os.environ.get("PORTKEY_API_KEY") - virtual_key = "openai-virtual-key", + provider = "@openai-prod", X_Custom_Header = "....", Another_Header = "....", # The values in forward_headers list must be the original header names @@ -440,7 +442,7 @@ import Portkey from 'portkey-ai'; const portkey = new Portkey({ apiKey: 'PORTKEY_API_KEY', // defaults to process.env["PORTKEY_API_KEY"] - virtualKey: "openai-virtual-key", + provider: "@openai-prod", CustomHeader: "....", AnotherHeader: "....", forwardHeaders: ['CustomHeader', 'AnotherHeader'] @@ -494,7 +496,7 @@ Portkey adheres to language-specific naming conventions: | Parameter | Type | Key | | :--- | :--- | :--- | | **API Key** Your Portkey account's API Key. | stringrequired | `apiKey` | -| **Virtual Key** The virtual key created from Portkey's vault for a specific provider | string | `virtualKey` | +| **Virtual Key** *(Legacy — use `provider` with `@provider-slug` instead)* | string | `virtualKey` | | **Config** The slug or [config object](/api-reference/inference-api/config-object) to use | stringobject | `config` | | **Provider** The AI provider to use for your calls. ([supported providers](/integrations/llms#supported-ai-providers)). | string | `provider` | | **Base URL** You can edit the URL of the gateway to use. Needed if you're [self-hosting the AI gateway](https://github.com/Portkey-AI/gateway/blob/main/docs/installation-deployments.md) | string | `baseURL` | @@ -514,7 +516,7 @@ Portkey adheres to language-specific naming conventions: | Parameter | Type | Key | | :--- | :--- | :--- | | **API Key** Your Portkey account's API Key. | stringrequired | `api_key` | -| **Virtual Key** The virtual key created from Portkey's vault for a specific provider | string | `virtual_key` | +| **Virtual Key** *(Legacy — use `provider` with `@provider-slug` instead)* | string | `virtual_key` | | **Config** The slug or [config object](/api-reference/inference-api/config-object) to use | stringobject | `config` | | **Provider** The AI provider to use for your calls. ([supported providers](/integrations/llms#supported-ai-providers)). | string | `provider` | | **Base URL** You can edit the URL of the gateway to use. Needed if you're [self-hosting the AI gateway](https://github.com/Portkey-AI/gateway/blob/main/docs/installation-deployments.md) | string | `base_url` | @@ -532,7 +534,7 @@ Portkey adheres to language-specific naming conventions: | Parameter | Type | Header Key | | :--- | :--- | :--- | | **API Key** Your Portkey account's API Key. | stringrequired | `x-portkey-api-key` | -| **Virtual Key** The virtual key created from Portkey's vault for a specific provider | string | `x-portkey-virtual-key` | +| **Virtual Key** *(Legacy — use `x-portkey-provider` with `@provider-slug` instead)* | string | `x-portkey-virtual-key` | | **Config** The slug or [config object](/api-reference/inference-api/config-object) to use | string | `x-portkey-config` | | **Provider** The AI provider to use for your calls. ([supported providers](/integrations/llms#supported-ai-providers)). | string | `x-portkey-provider` | | **Base URL** You can edit the URL of the gateway to use. Needed if you're [self-hosting the AI gateway](https://github.com/Portkey-AI/gateway/blob/main/docs/installation-deployments.md) | string | Change the request URL | @@ -558,7 +560,7 @@ You can send these headers in multiple ways: curl https://api.portkey.ai/v1/chat/completions \ -H "Content-Type: application/json" \ -H "x-portkey-api-key: PORTKEY_API_KEY" \ - -H "x-portkey-virtual-key: VIRTUAL_KEY" \ + -H "x-portkey-provider: @openai-prod" \ -H "x-portkey-trace-id: your_trace_id" \ -H "x-portkey-metadata: {\"_user\": \"user_12345\"}" \ -d '{ @@ -572,7 +574,7 @@ from portkey_ai import Portkey portkey = Portkey( api_key="PORTKEY_API_KEY", - provider="@VIRTUAL_KEY", + provider="@openai-prod", config="CONFIG_ID" ) @@ -592,7 +594,7 @@ import Portkey from 'portkey-ai'; const portkey = new Portkey({ apiKey: "PORTKEY_API_KEY", - virtualKey: "VIRTUAL_KEY", + provider: "@openai-prod", config: "CONFIG_ID" }); @@ -619,7 +621,7 @@ client = OpenAI( base_url="https://api.portkey.ai/v1", default_headers=createHeaders({ "apiKey": "PORTKEY_API_KEY", - "virtualKey": "VIRTUAL_KEY" + "provider": "@openai-prod" }) ) @@ -644,7 +646,7 @@ const client = new OpenAI({ baseURL: "https://api.portkey.ai/v1", defaultHeaders: createHeaders({ apiKey: "PORTKEY_API_KEY", - virtualKey: "VIRTUAL_KEY" + provider: "@openai-prod" }) }); diff --git a/api-reference/sdk/c-sharp.mdx b/api-reference/sdk/c-sharp.mdx index 3dea881d..dc1d7f6d 100644 --- a/api-reference/sdk/c-sharp.mdx +++ b/api-reference/sdk/c-sharp.mdx @@ -432,7 +432,7 @@ messages.Add(new AssistantChatMessage(completion)); ``` -Switching providers is just a matter of swapping out your virtual key. Change the virtual key to Anthropic, set the model name, and start making requests to Anthropic from the OpenAI .NET library. +Switching providers is just a matter of changing your AI Provider slug. Change it to Anthropic, set the model name, and start making requests to Anthropic from the OpenAI .NET library. ```csharp {41,44} [expandable] using OpenAI; @@ -487,7 +487,7 @@ public class Program ``` -Similarly, just change your virtual key to Vertex virtual key: +Similarly, just change your provider to the Vertex AI Provider: ```csharp {41,44} [expandable] using OpenAI; diff --git a/api-reference/sdk/python.mdx b/api-reference/sdk/python.mdx index 6437e51f..f83a9c25 100644 --- a/api-reference/sdk/python.mdx +++ b/api-reference/sdk/python.mdx @@ -45,7 +45,7 @@ from portkey_ai import Portkey client = Portkey( api_key="your_api_key_here", # Or use the env var PORTKEY_API_KEY - provider="@your_virtual_key_here" # Or use config="cf-***" + provider="@openai-prod" # Or use config="cf-***" ) response = client.chat.completions.create( @@ -54,20 +54,20 @@ response = client.chat.completions.create( ) ``` - You can use either a Virtual Key or a Config object to select your AI provider. Find more info on different authentication mechanisms [here](/api-reference/inference-api/headers#provider-authentication). + Use an AI Provider slug or a Config object to select your AI provider. Find more info on different authentication mechanisms [here](/api-reference/inference-api/headers#provider-authentication). ## Authentication & Configuration The SDK requires: - **Portkey API Key**: Your Portkey API key (env var `PORTKEY_API_KEY` recommended) - **Provider Authentication**: - - **Virtual Key**: The [Virtual Key](/product/ai-gateway/virtual-keys#using-virtual-keys) of your chosen AI provider + - **Provider Slug**: The [AI Provider](/product/model-catalog) slug (e.g. `@openai-prod`) from Model Catalog - **Config**: The [Config object](/api-reference/inference-api/config-object) or config slug for advanced routing - **Provider Slug + Auth Headers**: Useful if you do not want to save your API keys to Portkey and make direct requests. ```python -# With Virtual Key -portkey = Portkey(api_key="...", provider="@...") +# With AI Provider slug +portkey = Portkey(api_key="...", provider="@openai-prod") # With Config portkey = Portkey(api_key="...", config="cf-***") @@ -87,7 +87,7 @@ from portkey_ai import AsyncPortkey portkey = AsyncPortkey( api_key="PORTKEY_API_KEY", - provider="@VIRTUAL_KEY" + provider="@openai-prod" ) async def main(): @@ -117,7 +117,7 @@ custom_client = httpx.Client(verify=False) portkey = Portkey( api_key="your_api_key_here", - provider="@your_virtual_key_here", + provider="@openai-prod", http_client=custom_client )