diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ffd92fb9..42a31c2a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -100,7 +100,7 @@ jobs: smoke: name: smoke # Only run smoke tests on pushes to main repo (not forks) so that secrets can be accessed - if: github.repository == 'stainless-sdks/gradient-python' && github.event_name == 'push' + if: github.repository == 'stainless-sdks/gradient-python' && (github.event_name == 'push' || github.event_name == 'pull_request') runs-on: ${{ github.repository == 'stainless-sdks/gradient-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} timeout-minutes: 10 steps: diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9127b1bd..25709d1b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.9.0" + ".": "3.10.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index b4763701..d321100e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 189 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-ca993fd0bc66f703323a773c92da75207266f7f9d8c54ddac2fbd271a3cdaf86.yml -openapi_spec_hash: 35d7edb04aab2ab28bc7e5851a54b4e3 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-0778b2e9d56c826f92ee69ef081d8d73fd94c139b85e11becaa88bf1cbe95fb9.yml +openapi_spec_hash: 49daca0dd735cad7200ca1c741a5dd43 config_hash: fad48c8ac796b240fe3b90181586d1a4 diff --git a/CHANGELOG.md b/CHANGELOG.md index fc39fcc7..9bd90ccd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,25 @@ # Changelog +## 3.10.0 (2025-12-19) + +Full Changelog: [v3.9.0...v3.10.0](https://github.com/digitalocean/gradient-python/compare/v3.9.0...v3.10.0) + +### Features + +* **api:** manual updates ([f1c2eb2](https://github.com/digitalocean/gradient-python/commit/f1c2eb25ae1787b661ab1323528077074aa0cab6)) +* **api:** manual updates ([355e13f](https://github.com/digitalocean/gradient-python/commit/355e13f1a4b012e09bc2056179419ede57044b97)) + + +### Bug Fixes + +* restore inference endpoints ([#120](https://github.com/digitalocean/gradient-python/issues/120)) ([ee792a1](https://github.com/digitalocean/gradient-python/commit/ee792a181e819d8fa26712fe8bc96ffd4c02d2ed)) + + +### Chores + +* **internal:** add `--fix` argument to lint script ([2825cb7](https://github.com/digitalocean/gradient-python/commit/2825cb750edd261a324c2da28afc3cb6ee90f5e9)) +* run smoke tests on prs ([#121](https://github.com/digitalocean/gradient-python/issues/121)) ([719a5fb](https://github.com/digitalocean/gradient-python/commit/719a5fb4fcf418db9ede5659710377a47d41b6a8)) + ## 3.9.0 (2025-12-17) Full Changelog: [v3.8.0...v3.9.0](https://github.com/digitalocean/gradient-python/compare/v3.8.0...v3.9.0) diff --git a/pyproject.toml b/pyproject.toml index 9cce1fde..35e2247e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.9.0" +version = "3.10.0" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/scripts/lint b/scripts/lint index 9ccb6ae5..bc51411f 100755 --- a/scripts/lint +++ b/scripts/lint @@ -4,8 +4,13 @@ set -e cd "$(dirname "$0")/.." -echo "==> Running lints" -rye run lint +if [ "$1" = "--fix" ]; then + echo "==> Running lints with --fix" + rye run fix:ruff +else + echo "==> Running lints" + rye run lint +fi echo "==> Making sure it imports" rye run python -c 'import gradient' diff --git a/src/gradient/_client.py b/src/gradient/_client.py index 47fae3cd..847121b1 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -142,10 +142,7 @@ def __init__( self._agent_endpoint = agent_endpoint if inference_endpoint is None: - inference_endpoint = ( - os.environ.get("GRADIENT_INFERENCE_ENDPOINT") - or "https://inference.do-ai.run" - ) + inference_endpoint = os.environ.get("GRADIENT_INFERENCE_ENDPOINT") or "https://inference.do-ai.run" self.inference_endpoint = inference_endpoint if kbass_endpoint is None: @@ -302,9 +299,7 @@ def default_headers(self) -> dict[str, str | Omit]: @override def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: - if ( - self.access_token or self.agent_access_key or self.model_access_key - ) and headers.get("Authorization"): + if (self.access_token or self.agent_access_key or self.model_access_key) and headers.get("Authorization"): return if isinstance(custom_headers.get("Authorization"), Omit): return @@ -513,10 +508,7 @@ def __init__( self._agent_endpoint = agent_endpoint if inference_endpoint is None: - inference_endpoint = ( - os.environ.get("GRADIENT_INFERENCE_ENDPOINT") - or "https://inference.do-ai.run" - ) + inference_endpoint = os.environ.get("GRADIENT_INFERENCE_ENDPOINT") or "https://inference.do-ai.run" self.inference_endpoint = inference_endpoint if kbass_endpoint is None: @@ -673,9 +665,7 @@ def default_headers(self) -> dict[str, str | Omit]: @override def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: - if ( - self.access_token or self.agent_access_key or self.model_access_key - ) and headers.get("Authorization"): + if (self.access_token or self.agent_access_key or self.model_access_key) and headers.get("Authorization"): return if isinstance(custom_headers.get("Authorization"), Omit): return diff --git a/src/gradient/_version.py b/src/gradient/_version.py index abe0382c..c633db37 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.9.0" # x-release-please-version +__version__ = "3.10.0" # x-release-please-version diff --git a/src/gradient/resources/knowledge_bases/knowledge_bases.py b/src/gradient/resources/knowledge_bases/knowledge_bases.py index 2378f7f4..04c19b32 100644 --- a/src/gradient/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradient/resources/knowledge_bases/knowledge_bases.py @@ -434,9 +434,7 @@ def wait_for_database( KnowledgeBaseTimeoutError: If the timeout is exceeded before the database becomes ONLINE """ if not uuid: - raise ValueError( - f"Expected a non-empty value for `uuid` but received {uuid!r}" - ) + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") start_time = time.time() failed_states = {"DECOMMISSIONED", "UNHEALTHY"} @@ -462,9 +460,7 @@ def wait_for_database( return response if status in failed_states: - raise KnowledgeBaseDatabaseError( - f"Knowledge base database entered failed state: {status}" - ) + raise KnowledgeBaseDatabaseError(f"Knowledge base database entered failed state: {status}") # Sleep before next poll, but don't exceed timeout remaining_time = timeout - elapsed @@ -874,9 +870,7 @@ async def wait_for_database( KnowledgeBaseTimeoutError: If the timeout is exceeded before the database becomes ONLINE """ if not uuid: - raise ValueError( - f"Expected a non-empty value for `uuid` but received {uuid!r}" - ) + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") start_time = time.time() failed_states = {"DECOMMISSIONED", "UNHEALTHY"} @@ -902,9 +896,7 @@ async def wait_for_database( return response if status in failed_states: - raise KnowledgeBaseDatabaseError( - f"Knowledge base database entered failed state: {status}" - ) + raise KnowledgeBaseDatabaseError(f"Knowledge base database entered failed state: {status}") # Sleep before next poll, but don't exceed timeout remaining_time = timeout - elapsed diff --git a/src/gradient/resources/retrieve.py b/src/gradient/resources/retrieve.py index f0768350..f8335ab3 100644 --- a/src/gradient/resources/retrieve.py +++ b/src/gradient/resources/retrieve.py @@ -67,12 +67,6 @@ def documents( 3. Performs vector similarity search in the knowledge base 4. Returns the most relevant document chunks - The search supports hybrid search combining: - - - Vector similarity (semantic search) - - Keyword matching (BM25) - - Custom metadata filters - Args: num_results: Number of results to return @@ -98,9 +92,11 @@ def documents( if not knowledge_base_id: raise ValueError(f"Expected a non-empty value for `knowledge_base_id` but received {knowledge_base_id!r}") return self._post( - f"/{knowledge_base_id}/retrieve" - if self._client._base_url_overridden - else f"https://kbaas.do-ai.run/v1/{knowledge_base_id}/retrieve", + ( + f"/{knowledge_base_id}/retrieve" + if self._client._base_url_overridden + else f"https://kbaas.do-ai.run/v1/{knowledge_base_id}/retrieve" + ), body=maybe_transform( { "num_results": num_results, @@ -111,7 +107,10 @@ def documents( retrieve_documents_params.RetrieveDocumentsParams, ), options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, ), cast_to=RetrieveDocumentsResponse, ) @@ -163,12 +162,6 @@ async def documents( 3. Performs vector similarity search in the knowledge base 4. Returns the most relevant document chunks - The search supports hybrid search combining: - - - Vector similarity (semantic search) - - Keyword matching (BM25) - - Custom metadata filters - Args: num_results: Number of results to return @@ -194,9 +187,11 @@ async def documents( if not knowledge_base_id: raise ValueError(f"Expected a non-empty value for `knowledge_base_id` but received {knowledge_base_id!r}") return await self._post( - f"/{knowledge_base_id}/retrieve" - if self._client._base_url_overridden - else f"https://kbaas.do-ai.run/v1/{knowledge_base_id}/retrieve", + ( + f"/{knowledge_base_id}/retrieve" + if self._client._base_url_overridden + else f"https://kbaas.do-ai.run/v1/{knowledge_base_id}/retrieve" + ), body=await async_maybe_transform( { "num_results": num_results, @@ -207,7 +202,10 @@ async def documents( retrieve_documents_params.RetrieveDocumentsParams, ), options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, ), cast_to=RetrieveDocumentsResponse, ) diff --git a/src/gradient/types/retrieve_documents_params.py b/src/gradient/types/retrieve_documents_params.py index 968d0211..bad99ad3 100644 --- a/src/gradient/types/retrieve_documents_params.py +++ b/src/gradient/types/retrieve_documents_params.py @@ -63,6 +63,8 @@ class FiltersShould(TypedDict, total=False): class Filters(TypedDict, total=False): + """Metadata filters to apply to the search""" + must: Iterable[FiltersMust] """All conditions must match (AND)""" @@ -71,3 +73,4 @@ class Filters(TypedDict, total=False): should: Iterable[FiltersShould] """At least one condition must match (OR)""" + diff --git a/tests/test_client.py b/tests/test_client.py index c2406d77..4b645c08 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -945,9 +945,7 @@ class Model(BaseModel): ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) - def test_parse_retry_after_header( - self, remaining_retries: int, retry_after: str, timeout: float - ) -> None: + def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: client = Gradient( base_url=base_url, access_token=access_token, @@ -958,9 +956,7 @@ def test_parse_retry_after_header( headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) - calculated = client._calculate_retry_timeout( - remaining_retries, options, headers - ) + calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, rel=0.5 * 0.875) # type: ignore[misc] @mock.patch( @@ -2085,9 +2081,7 @@ class Model(BaseModel): ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) @pytest.mark.asyncio - async def test_parse_retry_after_header( - self, remaining_retries: int, retry_after: str, timeout: float - ) -> None: + async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: async_client = AsyncGradient( base_url=base_url, access_token=access_token, @@ -2098,9 +2092,7 @@ async def test_parse_retry_after_header( headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) - calculated = async_client._calculate_retry_timeout( - remaining_retries, options, headers - ) + calculated = async_client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, rel=0.5 * 0.875) # type: ignore[misc] @mock.patch( diff --git a/tests/test_smoke_sdk_async.py b/tests/test_smoke_sdk_async.py index 49e1ca78..8425fa77 100644 --- a/tests/test_smoke_sdk_async.py +++ b/tests/test_smoke_sdk_async.py @@ -25,31 +25,24 @@ async def test_async_smoke_environment_and_client_state() -> None: missing = [k for k in REQUIRED_ENV_VARS if not os.getenv(k)] if missing: pytest.fail( - "Missing required environment variables for async smoke tests: " - + ", ".join(missing), + "Missing required environment variables for async smoke tests: " + ", ".join(missing), pytrace=False, ) async with AsyncGradient() as client: # Property assertions (auto-loaded from environment) - assert ( - client.access_token == os.environ["DIGITALOCEAN_ACCESS_TOKEN"] - ), "access_token not loaded from env" - assert ( - client.model_access_key == os.environ["GRADIENT_MODEL_ACCESS_KEY"] - ), "model_access_key not loaded from env" - assert ( - client.agent_access_key == os.environ["GRADIENT_AGENT_ACCESS_KEY"] - ), "agent_access_key not loaded from env" + assert client.access_token == os.environ["DIGITALOCEAN_ACCESS_TOKEN"], "access_token not loaded from env" + assert client.model_access_key == os.environ["GRADIENT_MODEL_ACCESS_KEY"], ( + "model_access_key not loaded from env" + ) + assert client.agent_access_key == os.environ["GRADIENT_AGENT_ACCESS_KEY"], ( + "agent_access_key not loaded from env" + ) expected_endpoint = os.environ["GRADIENT_AGENT_ENDPOINT"] normalized_expected = ( - expected_endpoint - if expected_endpoint.startswith("https://") - else f"https://{expected_endpoint}" + expected_endpoint if expected_endpoint.startswith("https://") else f"https://{expected_endpoint}" ) - assert ( - client.agent_endpoint == normalized_expected - ), "agent_endpoint not derived correctly from env" + assert client.agent_endpoint == normalized_expected, "agent_endpoint not derived correctly from env" @pytest.mark.smoke