From 15dec35d373b2fb76a737cacb02b81335e65bf20 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Feb 2026 16:02:28 +0000 Subject: [PATCH 1/2] feat(api): Add Embeddings API --- .stats.yml | 8 +- api.md | 27 +++ src/perplexity/_client.py | 85 ++++++- src/perplexity/resources/__init__.py | 28 +++ .../resources/contextualized_embeddings.py | 228 ++++++++++++++++++ src/perplexity/resources/embeddings.py | 220 +++++++++++++++++ src/perplexity/types/__init__.py | 11 + .../contextualized_embedding_create_params.py | 39 +++ ...ontextualized_embedding_create_response.py | 25 ++ .../types/embedding_create_params.py | 37 +++ .../types/embedding_create_response.py | 25 ++ src/perplexity/types/shared/__init__.py | 3 + .../shared/contextualized_embedding_object.py | 21 ++ .../types/shared/embedding_object.py | 24 ++ .../types/shared/embeddings_usage.py | 34 +++ .../test_contextualized_embeddings.py | 120 +++++++++ tests/api_resources/test_embeddings.py | 120 +++++++++ 17 files changed, 1050 insertions(+), 5 deletions(-) create mode 100644 src/perplexity/resources/contextualized_embeddings.py create mode 100644 src/perplexity/resources/embeddings.py create mode 100644 src/perplexity/types/contextualized_embedding_create_params.py create mode 100644 src/perplexity/types/contextualized_embedding_create_response.py create mode 100644 src/perplexity/types/embedding_create_params.py create mode 100644 src/perplexity/types/embedding_create_response.py create mode 100644 src/perplexity/types/shared/contextualized_embedding_object.py create mode 100644 src/perplexity/types/shared/embedding_object.py create mode 100644 src/perplexity/types/shared/embeddings_usage.py create mode 100644 tests/api_resources/test_contextualized_embeddings.py create mode 100644 tests/api_resources/test_embeddings.py diff --git a/.stats.yml b/.stats.yml index 9ee7c51..f4db3cd 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 6 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/perplexity-ai%2Fperplexity-3ee0511fa1bf59b2bb44d947f7d884fd8522ef872d33bab141874941b76f1dd7.yml -openapi_spec_hash: 394bbfe74954625b70de9c85d553e3d0 -config_hash: c7d506cdee510785b58defa1a626e20b +configured_endpoints: 8 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/perplexity-ai%2Fperplexity-78325ecc9bdc8e9850866fcdd3be3d209b06f151059c774afc7e6005a1775f09.yml +openapi_spec_hash: 19a34c8ddd46f81dd0b0850af5ee42f3 +config_hash: c3b93f1bb8fa365d5a83e83b7e13e6c4 diff --git a/api.md b/api.md index c297275..9884d1d 100644 --- a/api.md +++ b/api.md @@ -6,6 +6,9 @@ from perplexity.types import ( ChatMessageInput, ChatMessageOutput, Choice, + ContextualizedEmbeddingObject, + EmbeddingObject, + EmbeddingsUsage, JsonSchemaFormat, ResponseFormat, SearchResult, @@ -65,6 +68,30 @@ Methods: - client.responses.create(\*\*params) -> ResponseCreateResponse +# Embeddings + +Types: + +```python +from perplexity.types import EmbeddingCreateResponse +``` + +Methods: + +- client.embeddings.create(\*\*params) -> EmbeddingCreateResponse + +# ContextualizedEmbeddings + +Types: + +```python +from perplexity.types import ContextualizedEmbeddingCreateResponse +``` + +Methods: + +- client.contextualized_embeddings.create(\*\*params) -> ContextualizedEmbeddingCreateResponse + # Async ## Chat diff --git a/src/perplexity/_client.py b/src/perplexity/_client.py index 90b6191..bd84502 100644 --- a/src/perplexity/_client.py +++ b/src/perplexity/_client.py @@ -31,11 +31,16 @@ ) if TYPE_CHECKING: - from .resources import chat, async_, search, responses + from .resources import chat, async_, search, responses, embeddings, contextualized_embeddings from .resources.search import SearchResource, AsyncSearchResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.responses import ResponsesResource, AsyncResponsesResource + from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource from .resources.async_.async_ import AsyncResource, AsyncAsyncResource + from .resources.contextualized_embeddings import ( + ContextualizedEmbeddingsResource, + AsyncContextualizedEmbeddingsResource, + ) __all__ = [ "Timeout", @@ -124,6 +129,18 @@ def responses(self) -> ResponsesResource: return ResponsesResource(self) + @cached_property + def embeddings(self) -> EmbeddingsResource: + from .resources.embeddings import EmbeddingsResource + + return EmbeddingsResource(self) + + @cached_property + def contextualized_embeddings(self) -> ContextualizedEmbeddingsResource: + from .resources.contextualized_embeddings import ContextualizedEmbeddingsResource + + return ContextualizedEmbeddingsResource(self) + @cached_property def async_(self) -> AsyncResource: from .resources.async_ import AsyncResource @@ -318,6 +335,18 @@ def responses(self) -> AsyncResponsesResource: return AsyncResponsesResource(self) + @cached_property + def embeddings(self) -> AsyncEmbeddingsResource: + from .resources.embeddings import AsyncEmbeddingsResource + + return AsyncEmbeddingsResource(self) + + @cached_property + def contextualized_embeddings(self) -> AsyncContextualizedEmbeddingsResource: + from .resources.contextualized_embeddings import AsyncContextualizedEmbeddingsResource + + return AsyncContextualizedEmbeddingsResource(self) + @cached_property def async_(self) -> AsyncAsyncResource: from .resources.async_ import AsyncAsyncResource @@ -461,6 +490,18 @@ def responses(self) -> responses.ResponsesResourceWithRawResponse: return ResponsesResourceWithRawResponse(self._client.responses) + @cached_property + def embeddings(self) -> embeddings.EmbeddingsResourceWithRawResponse: + from .resources.embeddings import EmbeddingsResourceWithRawResponse + + return EmbeddingsResourceWithRawResponse(self._client.embeddings) + + @cached_property + def contextualized_embeddings(self) -> contextualized_embeddings.ContextualizedEmbeddingsResourceWithRawResponse: + from .resources.contextualized_embeddings import ContextualizedEmbeddingsResourceWithRawResponse + + return ContextualizedEmbeddingsResourceWithRawResponse(self._client.contextualized_embeddings) + @cached_property def async_(self) -> async_.AsyncResourceWithRawResponse: from .resources.async_ import AsyncResourceWithRawResponse @@ -492,6 +533,20 @@ def responses(self) -> responses.AsyncResponsesResourceWithRawResponse: return AsyncResponsesResourceWithRawResponse(self._client.responses) + @cached_property + def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithRawResponse: + from .resources.embeddings import AsyncEmbeddingsResourceWithRawResponse + + return AsyncEmbeddingsResourceWithRawResponse(self._client.embeddings) + + @cached_property + def contextualized_embeddings( + self, + ) -> contextualized_embeddings.AsyncContextualizedEmbeddingsResourceWithRawResponse: + from .resources.contextualized_embeddings import AsyncContextualizedEmbeddingsResourceWithRawResponse + + return AsyncContextualizedEmbeddingsResourceWithRawResponse(self._client.contextualized_embeddings) + @cached_property def async_(self) -> async_.AsyncAsyncResourceWithRawResponse: from .resources.async_ import AsyncAsyncResourceWithRawResponse @@ -523,6 +578,20 @@ def responses(self) -> responses.ResponsesResourceWithStreamingResponse: return ResponsesResourceWithStreamingResponse(self._client.responses) + @cached_property + def embeddings(self) -> embeddings.EmbeddingsResourceWithStreamingResponse: + from .resources.embeddings import EmbeddingsResourceWithStreamingResponse + + return EmbeddingsResourceWithStreamingResponse(self._client.embeddings) + + @cached_property + def contextualized_embeddings( + self, + ) -> contextualized_embeddings.ContextualizedEmbeddingsResourceWithStreamingResponse: + from .resources.contextualized_embeddings import ContextualizedEmbeddingsResourceWithStreamingResponse + + return ContextualizedEmbeddingsResourceWithStreamingResponse(self._client.contextualized_embeddings) + @cached_property def async_(self) -> async_.AsyncResourceWithStreamingResponse: from .resources.async_ import AsyncResourceWithStreamingResponse @@ -554,6 +623,20 @@ def responses(self) -> responses.AsyncResponsesResourceWithStreamingResponse: return AsyncResponsesResourceWithStreamingResponse(self._client.responses) + @cached_property + def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithStreamingResponse: + from .resources.embeddings import AsyncEmbeddingsResourceWithStreamingResponse + + return AsyncEmbeddingsResourceWithStreamingResponse(self._client.embeddings) + + @cached_property + def contextualized_embeddings( + self, + ) -> contextualized_embeddings.AsyncContextualizedEmbeddingsResourceWithStreamingResponse: + from .resources.contextualized_embeddings import AsyncContextualizedEmbeddingsResourceWithStreamingResponse + + return AsyncContextualizedEmbeddingsResourceWithStreamingResponse(self._client.contextualized_embeddings) + @cached_property def async_(self) -> async_.AsyncAsyncResourceWithStreamingResponse: from .resources.async_ import AsyncAsyncResourceWithStreamingResponse diff --git a/src/perplexity/resources/__init__.py b/src/perplexity/resources/__init__.py index b4d1b8d..30b55b6 100644 --- a/src/perplexity/resources/__init__.py +++ b/src/perplexity/resources/__init__.py @@ -32,6 +32,22 @@ ResponsesResourceWithStreamingResponse, AsyncResponsesResourceWithStreamingResponse, ) +from .embeddings import ( + EmbeddingsResource, + AsyncEmbeddingsResource, + EmbeddingsResourceWithRawResponse, + AsyncEmbeddingsResourceWithRawResponse, + EmbeddingsResourceWithStreamingResponse, + AsyncEmbeddingsResourceWithStreamingResponse, +) +from .contextualized_embeddings import ( + ContextualizedEmbeddingsResource, + AsyncContextualizedEmbeddingsResource, + ContextualizedEmbeddingsResourceWithRawResponse, + AsyncContextualizedEmbeddingsResourceWithRawResponse, + ContextualizedEmbeddingsResourceWithStreamingResponse, + AsyncContextualizedEmbeddingsResourceWithStreamingResponse, +) __all__ = [ "ChatResource", @@ -52,6 +68,18 @@ "AsyncResponsesResourceWithRawResponse", "ResponsesResourceWithStreamingResponse", "AsyncResponsesResourceWithStreamingResponse", + "EmbeddingsResource", + "AsyncEmbeddingsResource", + "EmbeddingsResourceWithRawResponse", + "AsyncEmbeddingsResourceWithRawResponse", + "EmbeddingsResourceWithStreamingResponse", + "AsyncEmbeddingsResourceWithStreamingResponse", + "ContextualizedEmbeddingsResource", + "AsyncContextualizedEmbeddingsResource", + "ContextualizedEmbeddingsResourceWithRawResponse", + "AsyncContextualizedEmbeddingsResourceWithRawResponse", + "ContextualizedEmbeddingsResourceWithStreamingResponse", + "AsyncContextualizedEmbeddingsResourceWithStreamingResponse", "AsyncResource", "AsyncAsyncResource", "AsyncResourceWithRawResponse", diff --git a/src/perplexity/resources/contextualized_embeddings.py b/src/perplexity/resources/contextualized_embeddings.py new file mode 100644 index 0000000..5e96194 --- /dev/null +++ b/src/perplexity/resources/contextualized_embeddings.py @@ -0,0 +1,228 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal + +import httpx + +from ..types import contextualized_embedding_create_params +from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.contextualized_embedding_create_response import ContextualizedEmbeddingCreateResponse + +__all__ = ["ContextualizedEmbeddingsResource", "AsyncContextualizedEmbeddingsResource"] + + +class ContextualizedEmbeddingsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ContextualizedEmbeddingsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/perplexityai/perplexity-py#accessing-raw-response-data-eg-headers + """ + return ContextualizedEmbeddingsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ContextualizedEmbeddingsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/perplexityai/perplexity-py#with_streaming_response + """ + return ContextualizedEmbeddingsResourceWithStreamingResponse(self) + + def create( + self, + *, + input: Iterable[SequenceNotStr[str]], + model: Literal["pplx-embed-context-v1-0.6b", "pplx-embed-context-v1-4b"], + dimensions: int | Omit = omit, + encoding_format: Literal["base64_int8", "base64_binary"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ContextualizedEmbeddingCreateResponse: + """Generate contextualized embeddings for document chunks. + + Chunks from the same + document share context awareness, improving retrieval quality for document-based + applications. + + Args: + input: Nested array structure where each inner array contains chunks from a single + document. Chunks within the same document are encoded with document-level + context awareness. Maximum 512 documents. Total chunks across all documents must + not exceed 16,000. Total tokens per document must not exceed 32K. All chunks in + a single request must not exceed 120,000 tokens combined. Empty strings are not + allowed. + + model: The contextualized embedding model to use + + dimensions: Number of dimensions for output embeddings (Matryoshka). Range: 128-1024 for + pplx-embed-context-v1-0.6b, 128-2560 for pplx-embed-context-v1-4b. Defaults to + full dimensions (1024 or 2560). + + encoding_format: Output encoding format for embeddings. base64_int8 returns base64-encoded signed + int8 values. base64_binary returns base64-encoded packed binary (1 bit per + dimension). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/contextualizedembeddings", + body=maybe_transform( + { + "input": input, + "model": model, + "dimensions": dimensions, + "encoding_format": encoding_format, + }, + contextualized_embedding_create_params.ContextualizedEmbeddingCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ContextualizedEmbeddingCreateResponse, + ) + + +class AsyncContextualizedEmbeddingsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncContextualizedEmbeddingsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/perplexityai/perplexity-py#accessing-raw-response-data-eg-headers + """ + return AsyncContextualizedEmbeddingsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncContextualizedEmbeddingsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/perplexityai/perplexity-py#with_streaming_response + """ + return AsyncContextualizedEmbeddingsResourceWithStreamingResponse(self) + + async def create( + self, + *, + input: Iterable[SequenceNotStr[str]], + model: Literal["pplx-embed-context-v1-0.6b", "pplx-embed-context-v1-4b"], + dimensions: int | Omit = omit, + encoding_format: Literal["base64_int8", "base64_binary"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ContextualizedEmbeddingCreateResponse: + """Generate contextualized embeddings for document chunks. + + Chunks from the same + document share context awareness, improving retrieval quality for document-based + applications. + + Args: + input: Nested array structure where each inner array contains chunks from a single + document. Chunks within the same document are encoded with document-level + context awareness. Maximum 512 documents. Total chunks across all documents must + not exceed 16,000. Total tokens per document must not exceed 32K. All chunks in + a single request must not exceed 120,000 tokens combined. Empty strings are not + allowed. + + model: The contextualized embedding model to use + + dimensions: Number of dimensions for output embeddings (Matryoshka). Range: 128-1024 for + pplx-embed-context-v1-0.6b, 128-2560 for pplx-embed-context-v1-4b. Defaults to + full dimensions (1024 or 2560). + + encoding_format: Output encoding format for embeddings. base64_int8 returns base64-encoded signed + int8 values. base64_binary returns base64-encoded packed binary (1 bit per + dimension). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/contextualizedembeddings", + body=await async_maybe_transform( + { + "input": input, + "model": model, + "dimensions": dimensions, + "encoding_format": encoding_format, + }, + contextualized_embedding_create_params.ContextualizedEmbeddingCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ContextualizedEmbeddingCreateResponse, + ) + + +class ContextualizedEmbeddingsResourceWithRawResponse: + def __init__(self, contextualized_embeddings: ContextualizedEmbeddingsResource) -> None: + self._contextualized_embeddings = contextualized_embeddings + + self.create = to_raw_response_wrapper( + contextualized_embeddings.create, + ) + + +class AsyncContextualizedEmbeddingsResourceWithRawResponse: + def __init__(self, contextualized_embeddings: AsyncContextualizedEmbeddingsResource) -> None: + self._contextualized_embeddings = contextualized_embeddings + + self.create = async_to_raw_response_wrapper( + contextualized_embeddings.create, + ) + + +class ContextualizedEmbeddingsResourceWithStreamingResponse: + def __init__(self, contextualized_embeddings: ContextualizedEmbeddingsResource) -> None: + self._contextualized_embeddings = contextualized_embeddings + + self.create = to_streamed_response_wrapper( + contextualized_embeddings.create, + ) + + +class AsyncContextualizedEmbeddingsResourceWithStreamingResponse: + def __init__(self, contextualized_embeddings: AsyncContextualizedEmbeddingsResource) -> None: + self._contextualized_embeddings = contextualized_embeddings + + self.create = async_to_streamed_response_wrapper( + contextualized_embeddings.create, + ) diff --git a/src/perplexity/resources/embeddings.py b/src/perplexity/resources/embeddings.py new file mode 100644 index 0000000..c7bead8 --- /dev/null +++ b/src/perplexity/resources/embeddings.py @@ -0,0 +1,220 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal + +import httpx + +from ..types import embedding_create_params +from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.embedding_create_response import EmbeddingCreateResponse + +__all__ = ["EmbeddingsResource", "AsyncEmbeddingsResource"] + + +class EmbeddingsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> EmbeddingsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/perplexityai/perplexity-py#accessing-raw-response-data-eg-headers + """ + return EmbeddingsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/perplexityai/perplexity-py#with_streaming_response + """ + return EmbeddingsResourceWithStreamingResponse(self) + + def create( + self, + *, + input: Union[str, SequenceNotStr[str]], + model: Literal["pplx-embed-v1-0.6b", "pplx-embed-v1-4b"], + dimensions: int | Omit = omit, + encoding_format: Literal["base64_int8", "base64_binary"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> EmbeddingCreateResponse: + """Generate embeddings for a list of texts. + + Use these embeddings for semantic + search, clustering, and other machine learning applications. + + Args: + input: Input text to embed, encoded as a string or array of strings. Maximum 512 texts + per request. Each input must not exceed 32K tokens. All inputs in a single + request must not exceed 120,000 tokens combined. Empty strings are not allowed. + + model: The embedding model to use + + dimensions: Number of dimensions for output embeddings (Matryoshka). Range: 128-1024 for + pplx-embed-v1-0.6b, 128-2560 for pplx-embed-v1-4b. Defaults to full dimensions + (1024 or 2560). + + encoding_format: Output encoding format for embeddings. base64_int8 returns base64-encoded signed + int8 values. base64_binary returns base64-encoded packed binary (1 bit per + dimension). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/embeddings", + body=maybe_transform( + { + "input": input, + "model": model, + "dimensions": dimensions, + "encoding_format": encoding_format, + }, + embedding_create_params.EmbeddingCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EmbeddingCreateResponse, + ) + + +class AsyncEmbeddingsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/perplexityai/perplexity-py#accessing-raw-response-data-eg-headers + """ + return AsyncEmbeddingsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/perplexityai/perplexity-py#with_streaming_response + """ + return AsyncEmbeddingsResourceWithStreamingResponse(self) + + async def create( + self, + *, + input: Union[str, SequenceNotStr[str]], + model: Literal["pplx-embed-v1-0.6b", "pplx-embed-v1-4b"], + dimensions: int | Omit = omit, + encoding_format: Literal["base64_int8", "base64_binary"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> EmbeddingCreateResponse: + """Generate embeddings for a list of texts. + + Use these embeddings for semantic + search, clustering, and other machine learning applications. + + Args: + input: Input text to embed, encoded as a string or array of strings. Maximum 512 texts + per request. Each input must not exceed 32K tokens. All inputs in a single + request must not exceed 120,000 tokens combined. Empty strings are not allowed. + + model: The embedding model to use + + dimensions: Number of dimensions for output embeddings (Matryoshka). Range: 128-1024 for + pplx-embed-v1-0.6b, 128-2560 for pplx-embed-v1-4b. Defaults to full dimensions + (1024 or 2560). + + encoding_format: Output encoding format for embeddings. base64_int8 returns base64-encoded signed + int8 values. base64_binary returns base64-encoded packed binary (1 bit per + dimension). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/embeddings", + body=await async_maybe_transform( + { + "input": input, + "model": model, + "dimensions": dimensions, + "encoding_format": encoding_format, + }, + embedding_create_params.EmbeddingCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EmbeddingCreateResponse, + ) + + +class EmbeddingsResourceWithRawResponse: + def __init__(self, embeddings: EmbeddingsResource) -> None: + self._embeddings = embeddings + + self.create = to_raw_response_wrapper( + embeddings.create, + ) + + +class AsyncEmbeddingsResourceWithRawResponse: + def __init__(self, embeddings: AsyncEmbeddingsResource) -> None: + self._embeddings = embeddings + + self.create = async_to_raw_response_wrapper( + embeddings.create, + ) + + +class EmbeddingsResourceWithStreamingResponse: + def __init__(self, embeddings: EmbeddingsResource) -> None: + self._embeddings = embeddings + + self.create = to_streamed_response_wrapper( + embeddings.create, + ) + + +class AsyncEmbeddingsResourceWithStreamingResponse: + def __init__(self, embeddings: AsyncEmbeddingsResource) -> None: + self._embeddings = embeddings + + self.create = async_to_streamed_response_wrapper( + embeddings.create, + ) diff --git a/src/perplexity/types/__init__.py b/src/perplexity/types/__init__.py index 3e57b9f..a9b252f 100644 --- a/src/perplexity/types/__init__.py +++ b/src/perplexity/types/__init__.py @@ -8,11 +8,14 @@ SearchResult as SearchResult, UserLocation as UserLocation, ResponseFormat as ResponseFormat, + EmbeddingObject as EmbeddingObject, + EmbeddingsUsage as EmbeddingsUsage, ChatMessageInput as ChatMessageInput, JsonSchemaFormat as JsonSchemaFormat, WebSearchOptions as WebSearchOptions, ChatMessageOutput as ChatMessageOutput, APIPublicSearchResult as APIPublicSearchResult, + ContextualizedEmbeddingObject as ContextualizedEmbeddingObject, ) from .annotation import Annotation as Annotation from .error_info import ErrorInfo as ErrorInfo @@ -26,5 +29,13 @@ from .response_stream_chunk import ResponseStreamChunk as ResponseStreamChunk from .response_create_params import ResponseCreateParams as ResponseCreateParams from .search_create_response import SearchCreateResponse as SearchCreateResponse +from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .response_create_response import ResponseCreateResponse as ResponseCreateResponse +from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse from .function_call_output_item import FunctionCallOutputItem as FunctionCallOutputItem +from .contextualized_embedding_create_params import ( + ContextualizedEmbeddingCreateParams as ContextualizedEmbeddingCreateParams, +) +from .contextualized_embedding_create_response import ( + ContextualizedEmbeddingCreateResponse as ContextualizedEmbeddingCreateResponse, +) diff --git a/src/perplexity/types/contextualized_embedding_create_params.py b/src/perplexity/types/contextualized_embedding_create_params.py new file mode 100644 index 0000000..ecfcc24 --- /dev/null +++ b/src/perplexity/types/contextualized_embedding_create_params.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required, TypedDict + +from .._types import SequenceNotStr + +__all__ = ["ContextualizedEmbeddingCreateParams"] + + +class ContextualizedEmbeddingCreateParams(TypedDict, total=False): + input: Required[Iterable[SequenceNotStr[str]]] + """ + Nested array structure where each inner array contains chunks from a single + document. Chunks within the same document are encoded with document-level + context awareness. Maximum 512 documents. Total chunks across all documents must + not exceed 16,000. Total tokens per document must not exceed 32K. All chunks in + a single request must not exceed 120,000 tokens combined. Empty strings are not + allowed. + """ + + model: Required[Literal["pplx-embed-context-v1-0.6b", "pplx-embed-context-v1-4b"]] + """The contextualized embedding model to use""" + + dimensions: int + """Number of dimensions for output embeddings (Matryoshka). + + Range: 128-1024 for pplx-embed-context-v1-0.6b, 128-2560 for + pplx-embed-context-v1-4b. Defaults to full dimensions (1024 or 2560). + """ + + encoding_format: Literal["base64_int8", "base64_binary"] + """Output encoding format for embeddings. + + base64_int8 returns base64-encoded signed int8 values. base64_binary returns + base64-encoded packed binary (1 bit per dimension). + """ diff --git a/src/perplexity/types/contextualized_embedding_create_response.py b/src/perplexity/types/contextualized_embedding_create_response.py new file mode 100644 index 0000000..af84469 --- /dev/null +++ b/src/perplexity/types/contextualized_embedding_create_response.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .shared.embeddings_usage import EmbeddingsUsage +from .shared.contextualized_embedding_object import ContextualizedEmbeddingObject + +__all__ = ["ContextualizedEmbeddingCreateResponse"] + + +class ContextualizedEmbeddingCreateResponse(BaseModel): + """Response body for contextualized embeddings request""" + + data: Optional[List[ContextualizedEmbeddingObject]] = None + """List of contextualized embedding objects""" + + model: Optional[str] = None + """The model used to generate embeddings""" + + object: Optional[str] = None + """The object type""" + + usage: Optional[EmbeddingsUsage] = None + """Token usage for the embeddings request""" diff --git a/src/perplexity/types/embedding_create_params.py b/src/perplexity/types/embedding_create_params.py new file mode 100644 index 0000000..d661ac7 --- /dev/null +++ b/src/perplexity/types/embedding_create_params.py @@ -0,0 +1,37 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypedDict + +from .._types import SequenceNotStr + +__all__ = ["EmbeddingCreateParams"] + + +class EmbeddingCreateParams(TypedDict, total=False): + input: Required[Union[str, SequenceNotStr[str]]] + """Input text to embed, encoded as a string or array of strings. + + Maximum 512 texts per request. Each input must not exceed 32K tokens. All inputs + in a single request must not exceed 120,000 tokens combined. Empty strings are + not allowed. + """ + + model: Required[Literal["pplx-embed-v1-0.6b", "pplx-embed-v1-4b"]] + """The embedding model to use""" + + dimensions: int + """Number of dimensions for output embeddings (Matryoshka). + + Range: 128-1024 for pplx-embed-v1-0.6b, 128-2560 for pplx-embed-v1-4b. Defaults + to full dimensions (1024 or 2560). + """ + + encoding_format: Literal["base64_int8", "base64_binary"] + """Output encoding format for embeddings. + + base64_int8 returns base64-encoded signed int8 values. base64_binary returns + base64-encoded packed binary (1 bit per dimension). + """ diff --git a/src/perplexity/types/embedding_create_response.py b/src/perplexity/types/embedding_create_response.py new file mode 100644 index 0000000..20dcf10 --- /dev/null +++ b/src/perplexity/types/embedding_create_response.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .shared.embedding_object import EmbeddingObject +from .shared.embeddings_usage import EmbeddingsUsage + +__all__ = ["EmbeddingCreateResponse"] + + +class EmbeddingCreateResponse(BaseModel): + """Response body for embeddings request""" + + data: Optional[List[EmbeddingObject]] = None + """List of embedding objects""" + + model: Optional[str] = None + """The model used to generate embeddings""" + + object: Optional[str] = None + """The object type""" + + usage: Optional[EmbeddingsUsage] = None + """Token usage for the embeddings request""" diff --git a/src/perplexity/types/shared/__init__.py b/src/perplexity/types/shared/__init__.py index 50d9a34..4a3fa01 100644 --- a/src/perplexity/types/shared/__init__.py +++ b/src/perplexity/types/shared/__init__.py @@ -5,8 +5,11 @@ from .search_result import SearchResult as SearchResult from .user_location import UserLocation as UserLocation from .response_format import ResponseFormat as ResponseFormat +from .embedding_object import EmbeddingObject as EmbeddingObject +from .embeddings_usage import EmbeddingsUsage as EmbeddingsUsage from .chat_message_input import ChatMessageInput as ChatMessageInput from .json_schema_format import JsonSchemaFormat as JsonSchemaFormat from .web_search_options import WebSearchOptions as WebSearchOptions from .chat_message_output import ChatMessageOutput as ChatMessageOutput from .api_public_search_result import APIPublicSearchResult as APIPublicSearchResult +from .contextualized_embedding_object import ContextualizedEmbeddingObject as ContextualizedEmbeddingObject diff --git a/src/perplexity/types/shared/contextualized_embedding_object.py b/src/perplexity/types/shared/contextualized_embedding_object.py new file mode 100644 index 0000000..fdcd881 --- /dev/null +++ b/src/perplexity/types/shared/contextualized_embedding_object.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .embedding_object import EmbeddingObject + +__all__ = ["ContextualizedEmbeddingObject"] + + +class ContextualizedEmbeddingObject(BaseModel): + """A single contextualized embedding result""" + + data: Optional[List[EmbeddingObject]] = None + """List of embedding objects for chunks in this document""" + + index: Optional[int] = None + """The index of the document this chunk belongs to""" + + object: Optional[str] = None + """The object type""" diff --git a/src/perplexity/types/shared/embedding_object.py b/src/perplexity/types/shared/embedding_object.py new file mode 100644 index 0000000..64b713b --- /dev/null +++ b/src/perplexity/types/shared/embedding_object.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["EmbeddingObject"] + + +class EmbeddingObject(BaseModel): + """A single embedding result""" + + embedding: Optional[str] = None + """Base64-encoded embedding vector. + + For base64_int8: decode to signed int8 array (length = dimensions). For + base64_binary: decode to packed bits (length = dimensions / 8 bytes). + """ + + index: Optional[int] = None + """The index of the input text this embedding corresponds to""" + + object: Optional[str] = None + """The object type""" diff --git a/src/perplexity/types/shared/embeddings_usage.py b/src/perplexity/types/shared/embeddings_usage.py new file mode 100644 index 0000000..6d9108c --- /dev/null +++ b/src/perplexity/types/shared/embeddings_usage.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["EmbeddingsUsage", "Cost"] + + +class Cost(BaseModel): + """Cost breakdown for the request""" + + currency: Optional[Literal["USD"]] = None + """Currency of the cost values""" + + input_cost: Optional[float] = None + """Cost for input tokens in USD""" + + total_cost: Optional[float] = None + """Total cost for the request in USD""" + + +class EmbeddingsUsage(BaseModel): + """Token usage for the embeddings request""" + + cost: Optional[Cost] = None + """Cost breakdown for the request""" + + prompt_tokens: Optional[int] = None + """Number of tokens in the input texts""" + + total_tokens: Optional[int] = None + """Total number of tokens processed""" diff --git a/tests/api_resources/test_contextualized_embeddings.py b/tests/api_resources/test_contextualized_embeddings.py new file mode 100644 index 0000000..b81ab77 --- /dev/null +++ b/tests/api_resources/test_contextualized_embeddings.py @@ -0,0 +1,120 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from perplexity import Perplexity, AsyncPerplexity +from tests.utils import assert_matches_type +from perplexity.types import ContextualizedEmbeddingCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestContextualizedEmbeddings: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create(self, client: Perplexity) -> None: + contextualized_embedding = client.contextualized_embeddings.create( + input=[["x"]], + model="pplx-embed-context-v1-0.6b", + ) + assert_matches_type(ContextualizedEmbeddingCreateResponse, contextualized_embedding, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params(self, client: Perplexity) -> None: + contextualized_embedding = client.contextualized_embeddings.create( + input=[["x"]], + model="pplx-embed-context-v1-0.6b", + dimensions=128, + encoding_format="base64_int8", + ) + assert_matches_type(ContextualizedEmbeddingCreateResponse, contextualized_embedding, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create(self, client: Perplexity) -> None: + response = client.contextualized_embeddings.with_raw_response.create( + input=[["x"]], + model="pplx-embed-context-v1-0.6b", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + contextualized_embedding = response.parse() + assert_matches_type(ContextualizedEmbeddingCreateResponse, contextualized_embedding, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create(self, client: Perplexity) -> None: + with client.contextualized_embeddings.with_streaming_response.create( + input=[["x"]], + model="pplx-embed-context-v1-0.6b", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + contextualized_embedding = response.parse() + assert_matches_type(ContextualizedEmbeddingCreateResponse, contextualized_embedding, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncContextualizedEmbeddings: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create(self, async_client: AsyncPerplexity) -> None: + contextualized_embedding = await async_client.contextualized_embeddings.create( + input=[["x"]], + model="pplx-embed-context-v1-0.6b", + ) + assert_matches_type(ContextualizedEmbeddingCreateResponse, contextualized_embedding, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncPerplexity) -> None: + contextualized_embedding = await async_client.contextualized_embeddings.create( + input=[["x"]], + model="pplx-embed-context-v1-0.6b", + dimensions=128, + encoding_format="base64_int8", + ) + assert_matches_type(ContextualizedEmbeddingCreateResponse, contextualized_embedding, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create(self, async_client: AsyncPerplexity) -> None: + response = await async_client.contextualized_embeddings.with_raw_response.create( + input=[["x"]], + model="pplx-embed-context-v1-0.6b", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + contextualized_embedding = await response.parse() + assert_matches_type(ContextualizedEmbeddingCreateResponse, contextualized_embedding, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create(self, async_client: AsyncPerplexity) -> None: + async with async_client.contextualized_embeddings.with_streaming_response.create( + input=[["x"]], + model="pplx-embed-context-v1-0.6b", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + contextualized_embedding = await response.parse() + assert_matches_type(ContextualizedEmbeddingCreateResponse, contextualized_embedding, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_embeddings.py b/tests/api_resources/test_embeddings.py new file mode 100644 index 0000000..94e9cdd --- /dev/null +++ b/tests/api_resources/test_embeddings.py @@ -0,0 +1,120 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from perplexity import Perplexity, AsyncPerplexity +from tests.utils import assert_matches_type +from perplexity.types import EmbeddingCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestEmbeddings: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create(self, client: Perplexity) -> None: + embedding = client.embeddings.create( + input="x", + model="pplx-embed-v1-0.6b", + ) + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params(self, client: Perplexity) -> None: + embedding = client.embeddings.create( + input="x", + model="pplx-embed-v1-0.6b", + dimensions=128, + encoding_format="base64_int8", + ) + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create(self, client: Perplexity) -> None: + response = client.embeddings.with_raw_response.create( + input="x", + model="pplx-embed-v1-0.6b", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + embedding = response.parse() + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create(self, client: Perplexity) -> None: + with client.embeddings.with_streaming_response.create( + input="x", + model="pplx-embed-v1-0.6b", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + embedding = response.parse() + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncEmbeddings: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create(self, async_client: AsyncPerplexity) -> None: + embedding = await async_client.embeddings.create( + input="x", + model="pplx-embed-v1-0.6b", + ) + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncPerplexity) -> None: + embedding = await async_client.embeddings.create( + input="x", + model="pplx-embed-v1-0.6b", + dimensions=128, + encoding_format="base64_int8", + ) + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create(self, async_client: AsyncPerplexity) -> None: + response = await async_client.embeddings.with_raw_response.create( + input="x", + model="pplx-embed-v1-0.6b", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + embedding = await response.parse() + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create(self, async_client: AsyncPerplexity) -> None: + async with async_client.embeddings.with_streaming_response.create( + input="x", + model="pplx-embed-v1-0.6b", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + embedding = await response.parse() + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + assert cast(Any, response.is_closed) is True From 2cc4352dff277d2fc0a9a03b427de7b86d6b459d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Feb 2026 16:39:23 +0000 Subject: [PATCH 2/2] release: 0.29.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/perplexity/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a6fc929..b8dda9b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.28.1" + ".": "0.29.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 8929433..9fb2d8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 0.29.0 (2026-02-12) + +Full Changelog: [v0.28.1...v0.29.0](https://github.com/perplexityai/perplexity-py/compare/v0.28.1...v0.29.0) + +### Features + +* **api:** Add Embeddings API ([15dec35](https://github.com/perplexityai/perplexity-py/commit/15dec35d373b2fb76a737cacb02b81335e65bf20)) + ## 0.28.1 (2026-02-11) Full Changelog: [v0.28.0...v0.28.1](https://github.com/perplexityai/perplexity-py/compare/v0.28.0...v0.28.1) diff --git a/pyproject.toml b/pyproject.toml index bcb62f3..3922f15 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "perplexityai" -version = "0.28.1" +version = "0.29.0" description = "The official Python library for the perplexity API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/perplexity/_version.py b/src/perplexity/_version.py index 0844e43..3e62cf9 100644 --- a/src/perplexity/_version.py +++ b/src/perplexity/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "perplexity" -__version__ = "0.28.1" # x-release-please-version +__version__ = "0.29.0" # x-release-please-version