diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index a84687f1e..4903f7374 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -22,11 +22,12 @@ env: WEAVIATE_128: 1.28.16 WEAVIATE_129: 1.29.11 WEAVIATE_130: 1.30.22 - WEAVIATE_131: 1.31.20 - WEAVIATE_132: 1.32.23 - WEAVIATE_133: 1.33.10 - WEAVIATE_134: 1.34.5 - WEAVIATE_135: 1.35.0 + WEAVIATE_131: 1.31.22 + WEAVIATE_132: 1.32.26 + WEAVIATE_133: 1.33.11 + WEAVIATE_134: 1.34.8 + WEAVIATE_135: 1.35.2 + WEAVIATE_136: 1.36.0-dev-0bbf31a jobs: lint-and-format: @@ -154,11 +155,11 @@ jobs: fail-fast: false matrix: versions: [ - { py: "3.10", weaviate: $WEAVIATE_132, grpc: "1.59.0"}, - { py: "3.11", weaviate: $WEAVIATE_132, grpc: "1.66.0"}, - { py: "3.12", weaviate: $WEAVIATE_132, grpc: "1.70.0"}, - { py: "3.13", weaviate: $WEAVIATE_132, grpc: "1.72.1"}, - { py: "3.14", weaviate: $WEAVIATE_132, grpc: "1.76.0"} + { py: "3.10", weaviate: $WEAVIATE_136, grpc: "1.59.0"}, + { py: "3.11", weaviate: $WEAVIATE_136, grpc: "1.66.0"}, + { py: "3.12", weaviate: $WEAVIATE_136, grpc: "1.70.0"}, + { py: "3.13", weaviate: $WEAVIATE_136, grpc: "1.72.1"}, + { py: "3.14", weaviate: $WEAVIATE_136, grpc: "1.76.0"} ] optional_dependencies: [false] steps: @@ -209,11 +210,11 @@ jobs: fail-fast: false matrix: versions: [ - { py: "3.10", weaviate: $WEAVIATE_132}, - { py: "3.11", weaviate: $WEAVIATE_132}, - { py: "3.12", weaviate: $WEAVIATE_132}, - { py: "3.13", weaviate: $WEAVIATE_132}, - { py: "3.14", weaviate: $WEAVIATE_132} + { py: "3.10", weaviate: $WEAVIATE_136}, + { py: "3.11", weaviate: $WEAVIATE_136}, + { py: "3.12", weaviate: $WEAVIATE_136}, + { py: "3.13", weaviate: $WEAVIATE_136}, + { py: "3.14", weaviate: $WEAVIATE_136} ] optional_dependencies: [false] steps: @@ -305,8 +306,9 @@ jobs: $WEAVIATE_131, $WEAVIATE_132, $WEAVIATE_133, - $WEAVIATE_134 - $WEAVIATE_135 + $WEAVIATE_134, + $WEAVIATE_135, + $WEAVIATE_136 ] steps: - name: Checkout diff --git a/integration/test_batch_v4.py b/integration/test_batch_v4.py index 4a55b0131..aac4f7e37 100644 --- a/integration/test_batch_v4.py +++ b/integration/test_batch_v4.py @@ -4,6 +4,7 @@ from typing import Callable, Generator, List, Optional, Protocol, Tuple import pytest +import pytest_asyncio from _pytest.fixtures import SubRequest import weaviate @@ -119,6 +120,53 @@ def _factory( client_fixture.close() +class AsyncClientFactory(Protocol): + """Typing for fixture.""" + + async def __call__( + self, name: str = "", ports: Tuple[int, int] = (8080, 50051), multi_tenant: bool = False + ) -> Tuple[weaviate.WeaviateAsyncClient, str]: + """Typing for fixture.""" + ... + + +@pytest_asyncio.fixture +async def async_client_factory(request: SubRequest): + name_fixtures: List[str] = [] + client_fixture: Optional[weaviate.WeaviateAsyncClient] = None + + async def _factory( + name: str = "", ports: Tuple[int, int] = (8080, 50051), multi_tenant: bool = False + ): + nonlocal client_fixture, name_fixtures # noqa: F824 + name_fixture = _sanitize_collection_name(request.node.name) + name + name_fixtures.append(name_fixture) + if client_fixture is None: + client_fixture = weaviate.use_async_with_local(grpc_port=ports[1], port=ports[0]) + await client_fixture.connect() + + if await client_fixture.collections.exists(name_fixture): + await client_fixture.collections.delete(name_fixture) + + await client_fixture.collections.create( + name=name_fixture, + properties=[ + Property(name="name", data_type=DataType.TEXT), + Property(name="age", data_type=DataType.INT), + ], + references=[ReferenceProperty(name="test", target_collection=name_fixture)], + multi_tenancy_config=Configure.multi_tenancy(multi_tenant), + vectorizer_config=Configure.Vectorizer.none(), + ) + return client_fixture, name_fixture + + try: + yield _factory + finally: + if client_fixture is not None: + await client_fixture.close() + + def test_add_objects_in_multiple_batches(client_factory: ClientFactory) -> None: client, name = client_factory() with client.batch.rate_limit(50) as batch: @@ -365,16 +413,14 @@ def test_add_ref_batch_with_tenant(client_factory: ClientFactory) -> None: @pytest.mark.parametrize( "batching_method", [ - # lambda client: client.batch.dynamic(), - # lambda client: client.batch.fixed_size(), - # lambda client: client.batch.rate_limit(9999), - lambda client: client.batch.experimental(concurrency=1), + lambda client: client.batch.dynamic(), + lambda client: client.batch.fixed_size(), + lambda client: client.batch.stream(concurrency=1), ], ids=[ - # "test_add_ten_thousand_data_objects_dynamic", - # "test_add_ten_thousand_data_objects_fixed_size", - # "test_add_ten_thousand_data_objects_rate_limit", - "test_add_ten_thousand_data_objects_experimental", + "test_add_ten_thousand_data_objects_dynamic", + "test_add_ten_thousand_data_objects_fixed_size", + "test_add_ten_thousand_data_objects_stream", ], ) def test_add_ten_thousand_data_objects( @@ -385,10 +431,10 @@ def test_add_ten_thousand_data_objects( """Test adding ten thousand data objects.""" client, name = client_factory() if ( - request.node.callspec.id == "test_add_ten_thousand_data_objects_experimental" - and client._connection._weaviate_version.is_lower_than(1, 34, 0) + request.node.callspec.id == "test_add_ten_thousand_data_objects_stream" + and client._connection._weaviate_version.is_lower_than(1, 36, 0) ): - pytest.skip("Server-side batching not supported in Weaviate < 1.34.0") + pytest.skip("Server-side batching not supported in Weaviate < 1.36.0") nr_objects = 100000 import time @@ -575,14 +621,12 @@ def test_add_1000_tenant_objects_with_async_indexing_and_wait_for_only_one( [ lambda client: client.batch.dynamic(), lambda client: client.batch.fixed_size(), - lambda client: client.batch.rate_limit(1000), - lambda client: client.batch.experimental(), + lambda client: client.batch.stream(), ], ids=[ - "test_add_one_hundred_objects_and_references_between_all_dynamic", - "test_add_one_hundred_objects_and_references_between_all_fixed_size", - "test_add_one_hundred_objects_and_references_between_all_rate_limit", - "test_add_one_hundred_objects_and_references_between_all_experimental", + "test_add_one_object_and_a_self_reference_dynamic", + "test_add_one_object_and_a_self_reference_fixed_size", + "test_add_one_object_and_a_self_reference_stream", ], ) def test_add_one_object_and_a_self_reference( @@ -593,11 +637,10 @@ def test_add_one_object_and_a_self_reference( """Test adding one object and a self reference.""" client, name = client_factory() if ( - request.node.callspec.id - == "test_add_one_hundred_objects_and_references_between_all_experimental" - and client._connection._weaviate_version.is_lower_than(1, 34, 0) + request.node.callspec.id == "test_add_one_object_and_a_self_reference_stream" + and client._connection._weaviate_version.is_lower_than(1, 36, 0) ): - pytest.skip("Server-side batching not supported in Weaviate < 1.34.0") + pytest.skip("Server-side batching not supported in Weaviate < 1.36.0") with batching_method(client) as batch: uuid = batch.add_object(collection=name, properties={}) batch.add_reference( @@ -768,3 +811,34 @@ def test_references_with_to_uuids(client_factory: ClientFactory) -> None: assert len(client.batch.failed_references) == 0, client.batch.failed_references client.collections.delete(["target", "source"]) + + +@pytest.mark.asyncio +async def test_add_one_hundred_thousand_objects_async_client( + async_client_factory: AsyncClientFactory, +) -> None: + """Test adding one hundred thousand data objects.""" + client, name = await async_client_factory() + if client._connection._weaviate_version.is_lower_than(1, 36, 0): + pytest.skip("Server-side batching not supported in Weaviate < 1.36.0") + nr_objects = 100000 + import time + + start = time.time() + async with client.batch.stream(concurrency=1) as batch: + for i in range(nr_objects): + await batch.add_object( + collection=name, + properties={"name": "test" + str(i)}, + ) + end = time.time() + print(f"Time taken to add {nr_objects} objects: {end - start} seconds") + assert len(client.batch.results.objs.errors) == 0 + assert len(client.batch.results.objs.all_responses) == nr_objects + assert len(client.batch.results.objs.uuids) == nr_objects + assert await client.collections.use(name).length() == nr_objects + assert client.batch.results.objs.has_errors is False + assert len(client.batch.failed_objects) == 0, [ + obj.message for obj in client.batch.failed_objects + ] + await client.collections.delete(name) diff --git a/integration/test_collection_batch.py b/integration/test_collection_batch.py index 72b3b7c53..40683a26f 100644 --- a/integration/test_collection_batch.py +++ b/integration/test_collection_batch.py @@ -1,10 +1,10 @@ import uuid from dataclasses import dataclass -from typing import Any, Generator, Optional, Protocol, Union +from typing import Any, Awaitable, Generator, Optional, Protocol, Union import pytest -from integration.conftest import CollectionFactory, CollectionFactoryGet +from integration.conftest import AsyncCollectionFactory, CollectionFactory, CollectionFactoryGet from weaviate.collections import Collection from weaviate.collections.classes.config import ( Configure, @@ -17,6 +17,8 @@ from weaviate.collections.classes.tenants import Tenant from weaviate.types import VECTORS +from weaviate.collections.collection.async_ import CollectionAsync + UUID = Union[str, uuid.UUID] @@ -55,11 +57,21 @@ def __call__(self, name: str = "", multi_tenancy: bool = False) -> Collection[An ... +class BatchCollectionAsync(Protocol): + """Typing for fixture.""" + + def __call__( + self, name: str = "", multi_tenancy: bool = False + ) -> Awaitable[CollectionAsync[Any, Any]]: + """Typing for fixture.""" + ... + + @pytest.fixture def batch_collection( collection_factory: CollectionFactory, ) -> Generator[BatchCollection, None, None]: - def _factory(name: str = "", multi_tenancy: bool = False) -> Collection[Any, Any]: + def _factory(name: str = "", multi_tenancy: bool = False): collection = collection_factory( name=name, vectorizer_config=Configure.Vectorizer.none(), @@ -78,6 +90,29 @@ def _factory(name: str = "", multi_tenancy: bool = False) -> Collection[Any, Any yield _factory +@pytest.fixture +def batch_collection_async( + async_collection_factory: AsyncCollectionFactory, +) -> Generator[BatchCollectionAsync, None, None]: + async def _factory(name: str = "", multi_tenancy: bool = False): + collection = await async_collection_factory( + name=name, + vectorizer_config=Configure.Vectorizer.none(), + properties=[ + Property(name="name", data_type=DataType.TEXT), + Property(name="age", data_type=DataType.INT), + ], + multi_tenancy_config=Configure.multi_tenancy(multi_tenancy), + ) + await collection.config.add_reference( + ReferenceProperty(name="test", target_collection=collection.name) + ) + + return collection + + yield _factory + + @pytest.mark.parametrize( "vector", [None, [1, 2, 3], MockNumpyTorch([1, 2, 3]), MockTensorFlow([1, 2, 3])], @@ -233,3 +268,73 @@ def test_non_existant_collection(collection_factory_get: CollectionFactoryGet) - # above should not throw - depending on the autoschema config this might create an error or # not, so we do not check for errors here + + +@pytest.mark.asyncio +async def test_batch_one_hundred_thousand_objects_async_collection( + batch_collection_async: BatchCollectionAsync, +) -> None: + """Test adding one hundred thousand data objects.""" + col = await batch_collection_async() + if col._connection._weaviate_version.is_lower_than(1, 36, 0): + pytest.skip("Server-side batching not supported in Weaviate < 1.36.0") + nr_objects = 100000 + import time + + start = time.time() + async with col.batch.stream() as batch: + for i in range(nr_objects): + await batch.add_object( + properties={"name": "test" + str(i)}, + ) + end = time.time() + print(f"Time taken to add {nr_objects} objects: {end - start} seconds") + assert len(col.batch.results.objs.errors) == 0 + assert len(col.batch.results.objs.all_responses) == nr_objects + assert len(col.batch.results.objs.uuids) == nr_objects + assert await col.length() == nr_objects + assert col.batch.results.objs.has_errors is False + assert len(col.batch.failed_objects) == 0, [obj.message for obj in col.batch.failed_objects] + + +@pytest.mark.asyncio +async def test_ingest_one_hundred_thousand_data_objects_async( + batch_collection_async: BatchCollectionAsync, +) -> None: + col = await batch_collection_async() + if col._connection._weaviate_version.is_lower_than(1, 36, 0): + pytest.skip("Server-side batching not supported in Weaviate < 1.36.0") + nr_objects = 100000 + import time + + start = time.time() + results = await col.data.ingest({"name": "test" + str(i)} for i in range(nr_objects)) + end = time.time() + print(f"Time taken to add {nr_objects} objects: {end - start} seconds") + assert len(results.errors) == 0 + assert len(results.all_responses) == nr_objects + assert len(results.uuids) == nr_objects + assert await col.length() == nr_objects + assert results.has_errors is False + assert len(results.errors) == 0, [obj.message for obj in results.errors.values()] + + +def test_ingest_one_hundred_thousand_data_objects( + batch_collection: BatchCollection, +) -> None: + col = batch_collection() + if col._connection._weaviate_version.is_lower_than(1, 36, 0): + pytest.skip("Server-side batching not supported in Weaviate < 1.36.0") + nr_objects = 100000 + import time + + start = time.time() + results = col.data.ingest({"name": "test" + str(i)} for i in range(nr_objects)) + end = time.time() + print(f"Time taken to add {nr_objects} objects: {end - start} seconds") + assert len(results.errors) == 0 + assert len(results.all_responses) == nr_objects + assert len(results.uuids) == nr_objects + assert len(col) == nr_objects + assert results.has_errors is False + assert len(results.errors) == 0, [obj.message for obj in results.errors.values()] diff --git a/integration/test_rbac.py b/integration/test_rbac.py index 3b3866986..93c930672 100644 --- a/integration/test_rbac.py +++ b/integration/test_rbac.py @@ -742,10 +742,10 @@ def test_server_side_batching_with_auth() -> None: with connect_to_local( port=RBAC_PORTS[0], grpc_port=RBAC_PORTS[1], auth_credentials=RBAC_AUTH_CREDS ) as client: - if client._connection._weaviate_version.is_lower_than(1, 34, 0): - pytest.skip("Server-side batching not supported in Weaviate < 1.34.0") + if client._connection._weaviate_version.is_lower_than(1, 36, 0): + pytest.skip("Server-side batching not supported in Weaviate < 1.36.0") collection = client.collections.create(collection_name) - with client.batch.experimental() as batch: + with client.batch.stream() as batch: batch.add_object(collection_name) batch.add_object(collection_name) batch.add_object(collection_name) diff --git a/profiling/conftest.py b/profiling/conftest.py index aac8f3048..f99cf6bc1 100644 --- a/profiling/conftest.py +++ b/profiling/conftest.py @@ -1,5 +1,6 @@ +import asyncio import os -from typing import Any, Dict, Generator, List, Optional, Protocol, Union +from typing import Any, Awaitable, Dict, List, Optional, Protocol, Union import pytest from _pytest.fixtures import SubRequest @@ -15,6 +16,8 @@ from weaviate.config import AdditionalConfig from weaviate.connect.integrations import _IntegrationConfig +from weaviate.collections.collection.async_ import CollectionAsync + def get_file_path(file_name: str) -> str: if not os.path.exists(file_name) and not os.path.exists("profiling/" + file_name): @@ -41,8 +44,25 @@ def __call__( ... +class CollectionFactoryAsync(Protocol): + """Typing for fixture.""" + + def __call__( + self, + properties: Optional[List[Property]] = None, + vectorizer_config: Optional[ + Union[_VectorizerConfigCreate, List[_NamedVectorConfigCreate]] + ] = None, + headers: Optional[Dict[str, str]] = None, + inverted_index_config: Optional[_InvertedIndexConfigCreate] = None, + integration_config: Optional[Union[_IntegrationConfig, List[_IntegrationConfig]]] = None, + ) -> Awaitable[CollectionAsync[Any, Any]]: + """Typing for fixture.""" + ... + + @pytest.fixture -def collection_factory(request: SubRequest) -> Generator[CollectionFactory, None, None]: +def collection_factory(request: SubRequest): name_fixture: Optional[str] = None client_fixture: Optional[weaviate.WeaviateClient] = None @@ -70,6 +90,9 @@ def _factory( vectorizer_config=vectorizer_config, properties=properties, inverted_index_config=inverted_index_config, + replication_config=weaviate.classes.config.Configure.replication( + factor=3, async_enabled=True + ), ) return collection @@ -81,6 +104,50 @@ def _factory( client_fixture.close() +@pytest.fixture +def collection_factory_async(request: SubRequest): + name_fixture: Optional[str] = None + client_fixture: Optional[weaviate.WeaviateAsyncClient] = None + + async def _factory( + properties: Optional[List[Property]] = None, + vectorizer_config: Optional[ + Union[_VectorizerConfigCreate, List[_NamedVectorConfigCreate]] + ] = None, + headers: Optional[Dict[str, str]] = None, + inverted_index_config: Optional[_InvertedIndexConfigCreate] = None, + integration_config: Optional[Union[_IntegrationConfig, List[_IntegrationConfig]]] = None, + ) -> CollectionAsync[Any, Any]: + nonlocal client_fixture, name_fixture + name_fixture = _sanitize_collection_name(request.node.name) + client_fixture = weaviate.use_async_with_local( + headers=headers, + additional_config=AdditionalConfig(timeout=(60, 120)), # for image tests + ) + await client_fixture.connect() + await client_fixture.collections.delete(name_fixture) + if integration_config is not None: + client_fixture.integrations.configure(integration_config) + + collection: CollectionAsync[Any, Any] = await client_fixture.collections.create( + name=name_fixture, + vectorizer_config=vectorizer_config, + properties=properties, + inverted_index_config=inverted_index_config, + replication_config=weaviate.classes.config.Configure.replication( + factor=3, async_enabled=True + ), + ) + return collection + + try: + yield _factory + finally: + if client_fixture is not None and name_fixture is not None: + asyncio.run(client_fixture.collections.delete(name_fixture)) + asyncio.run(client_fixture.close()) + + def _sanitize_collection_name(name: str) -> str: name = name.replace("[", "").replace("]", "").replace("-", "").replace(" ", "").replace(".", "") return name[0].upper() + name[1:] diff --git a/profiling/test_shutdown.py b/profiling/test_shutdown.py index 11b9264c1..c41437553 100644 --- a/profiling/test_shutdown.py +++ b/profiling/test_shutdown.py @@ -27,7 +27,7 @@ def setup(client: weaviate.WeaviateClient, collection: str) -> weaviate.collecti def import_(client: weaviate.WeaviateClient, collection: str, how_many: int = 1_000_000) -> None: uuids: dict[str, int] = {} - with client.batch.experimental(concurrency=1) as batch: + with client.batch.stream(concurrency=1) as batch: for i in range(how_many): uuid = batch.add_object( collection=collection, diff --git a/profiling/test_sphere.py b/profiling/test_sphere.py index a2c225954..7520b85d3 100644 --- a/profiling/test_sphere.py +++ b/profiling/test_sphere.py @@ -1,15 +1,16 @@ import json import time +import pytest import weaviate.classes as wvc -from .conftest import CollectionFactory, get_file_path +from .conftest import CollectionFactory, CollectionFactoryAsync, get_file_path # download sphere dataset from https://weaviate.io/blog/sphere-dataset-in-weaviate#importing-sphere-with-python # place file in profiling folder -def test_sphere(collection_factory: CollectionFactory) -> None: +def test_sphere_sync(collection_factory: CollectionFactory) -> None: sphere_file = get_file_path("sphere.1m.jsonl") collection = collection_factory( @@ -28,10 +29,13 @@ def test_sphere(collection_factory: CollectionFactory) -> None: start = time.time() import_objects = 1000000 - with collection.batch.dynamic() as batch: + with collection.batch.stream() as batch: with open(sphere_file) as jsonl_file: for i, jsonl in enumerate(jsonl_file): - if i == import_objects or batch.number_errors > 10: + if i == import_objects: + break + if batch.number_errors > 10: + print("Too many errors, stopping import") break json_parsed = json.loads(jsonl) @@ -46,7 +50,58 @@ def test_sphere(collection_factory: CollectionFactory) -> None: vector=json_parsed["vector"], ) if i % 1000 == 0: - print(f"Imported {len(collection)} objects") + print( + f"Imported {len(collection)} objects after processing {i} lines in {time.time() - start} seconds" + ) assert len(collection.batch.failed_objects) == 0 assert len(collection) == import_objects print(f"Imported {import_objects} objects in {time.time() - start}") + + +@pytest.mark.asyncio +async def test_sphere_async(collection_factory_async: CollectionFactoryAsync) -> None: + sphere_file = get_file_path("sphere.1m.jsonl") + + collection = await collection_factory_async( + properties=[ + wvc.config.Property(name="url", data_type=wvc.config.DataType.TEXT), + wvc.config.Property(name="title", data_type=wvc.config.DataType.TEXT), + wvc.config.Property(name="raw", data_type=wvc.config.DataType.TEXT), + wvc.config.Property(name="sha", data_type=wvc.config.DataType.TEXT), + ], + vectorizer_config=wvc.config.Configure.Vectorizer.none(), + # headers={ + # "X-Cohere-Api-Key": "YOUR_KEY", + # "X-OpenAI-Api-Key": "YOUR_KEY", + # }, + ) + start = time.time() + + import_objects = 1000000 + async with collection.batch.stream() as batch: + with open(sphere_file) as jsonl_file: + for i, jsonl in enumerate(jsonl_file): + if i == import_objects: + break + if batch.number_errors > 10: + print("Too many errors, stopping import") + break + + json_parsed = json.loads(jsonl) + await batch.add_object( + properties={ + "url": json_parsed["url"], + "title": json_parsed["title"], + "raw": json_parsed["raw"], + "sha": json_parsed["sha"], + }, + uuid=json_parsed["id"], + vector=json_parsed["vector"], + ) + if i % 1000 == 0: + print( + f"Imported {await collection.length()} objects after processing {i} lines in {time.time() - start} seconds" + ) + assert len(collection.batch.failed_objects) == 0 + assert await collection.length() == import_objects + print(f"Imported {import_objects} objects in {time.time() - start}") diff --git a/weaviate/client.py b/weaviate/client.py index 8cf856c51..d7f9080f4 100644 --- a/weaviate/client.py +++ b/weaviate/client.py @@ -10,7 +10,7 @@ from .auth import AuthCredentials from .backup import _Backup, _BackupAsync from .cluster import _Cluster, _ClusterAsync -from .collections.batch.client import _BatchClientWrapper +from .collections.batch.client import _BatchClientWrapper, _BatchClientWrapperAsync from .collections.collections import _Collections, _CollectionsAsync from .config import AdditionalConfig from .connect import executor @@ -76,6 +76,7 @@ def __init__( ) self.alias = _AliasAsync(self._connection) self.backup = _BackupAsync(self._connection) + self.batch = _BatchClientWrapperAsync(self._connection) self.cluster = _ClusterAsync(self._connection) self.collections = _CollectionsAsync(self._connection) self.debug = _DebugAsync(self._connection) diff --git a/weaviate/client.pyi b/weaviate/client.pyi index 205a34b4e..9b32af15f 100644 --- a/weaviate/client.pyi +++ b/weaviate/client.pyi @@ -18,7 +18,7 @@ from weaviate.users.sync import _Users from .backup import _Backup, _BackupAsync from .cluster import _Cluster, _ClusterAsync -from .collections.batch.client import _BatchClientWrapper +from .collections.batch.client import _BatchClientWrapper, _BatchClientWrapperAsync from .debug import _Debug, _DebugAsync from .rbac import _Roles, _RolesAsync from .types import NUMBER @@ -29,6 +29,7 @@ class WeaviateAsyncClient(_WeaviateClientExecutor[ConnectionAsync]): _connection: ConnectionAsync alias: _AliasAsync backup: _BackupAsync + batch: _BatchClientWrapperAsync collections: _CollectionsAsync cluster: _ClusterAsync debug: _DebugAsync diff --git a/weaviate/collections/batch/async_.py b/weaviate/collections/batch/async_.py new file mode 100644 index 000000000..7178f2caa --- /dev/null +++ b/weaviate/collections/batch/async_.py @@ -0,0 +1,604 @@ +import asyncio +import time +import uuid as uuid_package +from typing import ( + AsyncGenerator, + Generator, + List, + Optional, + Set, + Union, +) + +from pydantic import ValidationError + +from weaviate.collections.batch.base import ( + GCP_STREAM_TIMEOUT, + ObjectsBatchRequest, + ReferencesBatchRequest, + _BatchDataWrapper, + _ClusterBatchAsync, +) +from weaviate.collections.batch.grpc_batch import _BatchGRPC +from weaviate.collections.classes.batch import ( + BatchObject, + BatchObjectReturn, + BatchReference, + BatchReferenceReturn, + ErrorObject, + ErrorReference, + Shard, +) +from weaviate.collections.classes.config import ConsistencyLevel +from weaviate.collections.classes.internal import ( + ReferenceInput, + ReferenceInputs, + ReferenceToMulti, +) +from weaviate.collections.classes.types import WeaviateProperties +from weaviate.connect.executor import aresult +from weaviate.connect.v4 import ConnectionAsync +from weaviate.exceptions import ( + WeaviateBatchFailedToReestablishStreamError, + WeaviateBatchStreamError, + WeaviateBatchValidationError, + WeaviateGRPCUnavailableError, + WeaviateStartUpError, +) +from weaviate.logger import logger +from weaviate.proto.v1 import batch_pb2 +from weaviate.types import UUID, VECTORS + + +class _BgTasks: + def __init__(self, recv: asyncio.Task[None], loop: asyncio.Task[None]) -> None: + self.recv = recv + self.loop = loop + self.send_started = False + + def all_alive(self) -> bool: + return all([not self.recv.done(), not self.loop.done()]) + + async def gather(self) -> None: + tasks = [self.recv, self.loop] + await asyncio.gather(*tasks) + + +class _BatchBaseAsync: + def __init__( + self, + connection: ConnectionAsync, + consistency_level: Optional[ConsistencyLevel], + results: _BatchDataWrapper, + objects: Optional[ObjectsBatchRequest[BatchObject]] = None, + references: Optional[ReferencesBatchRequest[BatchReference]] = None, + ) -> None: + self.__batch_objects = objects or ObjectsBatchRequest[BatchObject]() + self.__batch_references = references or ReferencesBatchRequest[BatchReference]() + + self.__connection = connection + self.__is_gcp_on_wcd = connection._connection_params.is_gcp_on_wcd() + self.__is_renewing_stream = asyncio.Event() + self.__consistency_level: ConsistencyLevel = consistency_level or ConsistencyLevel.QUORUM + self.__batch_size = 100 + + self.__batch_grpc = _BatchGRPC( + connection._weaviate_version, self.__consistency_level, connection._grpc_max_msg_size + ) + self.__cluster = _ClusterBatchAsync(self.__connection) + + # lookup table for objects that are currently being processed - is used to not send references from objects that have not been added yet + self.__uuid_lookup_lock = asyncio.Lock() + self.__uuid_lookup: Set[str] = set() + + # we do not want that users can access the results directly as they are not thread-safe + self.__results_for_wrapper_backup = results + self.__results_for_wrapper = _BatchDataWrapper() + + self.__objs_count = 0 + self.__refs_count = 0 + + self.__is_oom = asyncio.Event() + self.__is_shutting_down = asyncio.Event() + self.__is_shutdown = asyncio.Event() + + self.__objs_cache_lock = asyncio.Lock() + self.__objs_cache: dict[str, BatchObject] = {} + self.__refs_cache_lock = asyncio.Lock() + self.__refs_cache: dict[str, BatchReference] = {} + + self.__inflight_objs: set[str] = set() + self.__inflight_refs: set[str] = set() + + # maxsize=1 so that __send does not run faster than generator for __recv + # thereby using too much buffer in case of server-side shutdown + self.__reqs: asyncio.Queue[Optional[batch_pb2.BatchStreamRequest]] = asyncio.Queue( + maxsize=1 + ) + + self.__stop = False + self.__bg_exception: Optional[Exception] = None + self.__bg_tasks: Optional[_BgTasks] = None + + @property + def number_errors(self) -> int: + """Return the number of errors in the batch.""" + return len(self.__results_for_wrapper.failed_objects) + len( + self.__results_for_wrapper.failed_references + ) + + def __all_tasks_alive(self) -> bool: + return self.__bg_tasks is not None and self.__bg_tasks.all_alive() + + async def _start(self): + self.__number_of_nodes = await self.__cluster.get_number_of_nodes() + + async def loop_wrapper() -> None: + try: + await self.__loop() + logger.info("exited batch loop task") + except Exception as e: + logger.error(e) + self.__bg_exception = e + + async def recv_wrapper() -> None: + socket_hung_up = False + try: + await self.__recv() + logger.info("exited batch recv task") + except Exception as e: + if isinstance(e, WeaviateBatchStreamError) and ( + "Socket closed" in e.message or "context canceled" in e.message + ): + logger.warning(e) + socket_hung_up = True + else: + logger.error(e) + self.__bg_exception = e + if socket_hung_up: + # this happens during ungraceful shutdown of the coordinator + # lets restart the stream and add the cached objects again + logger.warning("Stream closed unexpectedly, restarting...") + await self.__reconnect() + # server sets this whenever it restarts, gracefully or unexpectedly, so need to clear it now + self.__is_shutting_down.clear() + async with self.__objs_cache_lock: + await self.__batch_objects.aprepend(list(self.__objs_cache.values())) + async with self.__refs_cache_lock: + await self.__batch_references.aprepend(list(self.__refs_cache.values())) + # start a new stream with a newly reconnected channel + return await recv_wrapper() + + recv = asyncio.create_task(recv_wrapper()) + loop = asyncio.create_task(loop_wrapper()) + + self.__bg_tasks = _BgTasks( + recv=recv, + loop=loop, + ) + + async def _wait(self): + assert self.__bg_tasks is not None + await self.__bg_tasks.gather() + + # copy the results to the public results + self.__results_for_wrapper_backup.results = self.__results_for_wrapper.results + self.__results_for_wrapper_backup.failed_objects = self.__results_for_wrapper.failed_objects + self.__results_for_wrapper_backup.failed_references = ( + self.__results_for_wrapper.failed_references + ) + self.__results_for_wrapper_backup.imported_shards = ( + self.__results_for_wrapper.imported_shards + ) + + async def _shutdown(self) -> None: + self.__stop = True + + async def __loop(self) -> None: + refresh_time: float = 0.01 + while self.__bg_exception is None: + if len(self.__batch_objects) + len(self.__batch_references) > 0: + start = time.time() + while (len_o := len(self.__batch_objects)) + ( + len_r := len(self.__batch_references) + ) < self.__batch_size: + # wait for more objects to be added up to the batch size + await asyncio.sleep(refresh_time) + if time.time() - start >= 1 and ( + len_o == len(self.__batch_objects) or len_r == len(self.__batch_references) + ): + # no new objects were added in the last second, exit the loop + break + + objs = self.__batch_objects.pop_items(self.__batch_size) + async with self.__uuid_lookup_lock: + refs = self.__batch_references.pop_items( + self.__batch_size - len(objs), + uuid_lookup=self.__uuid_lookup, + ) + + for req in self.__generate_stream_requests(objs, refs): + start, paused = time.time(), False + while ( + self.__is_shutting_down.is_set() + or self.__is_shutdown.is_set() + or self.__is_oom.is_set() + ): + if not paused: + logger.info("Server is shutting down, pausing batching loop...") + await self.__reqs.put(None) + paused = True + await asyncio.sleep(1) + if time.time() - start > 300: + raise WeaviateBatchFailedToReestablishStreamError( + "Batch stream was not re-established within 5 minutes. Terminating batch." + ) + try: + await asyncio.wait_for(self.__reqs.put(req), timeout=60) + except asyncio.TimeoutError as e: + logger.warning( + "Batch queue is blocked for more than 60 seconds. Exiting the loop" + ) + self.__bg_exception = e + return + elif self.__stop: + # we are done, send the sentinel into our queue to be consumed by the batch sender + await self.__reqs.put(None) # signal the end of the stream + logger.info("Batching finished, sent stop signal to batch stream") + return + await asyncio.sleep(refresh_time) + + def __generate_stream_requests( + self, + objects: List[BatchObject], + references: List[BatchReference], + ) -> Generator[batch_pb2.BatchStreamRequest, None, None]: + per_object_overhead = 4 # extra overhead bytes per object in the request + + def request_maker(): + return batch_pb2.BatchStreamRequest() + + request = request_maker() + total_size = request.ByteSize() + + inflight_objs = set() + inflight_refs = set() + for object_ in objects: + obj = self.__batch_grpc.grpc_object(object_._to_internal()) + obj_size = obj.ByteSize() + per_object_overhead + + if total_size + obj_size >= self.__batch_grpc.grpc_max_msg_size: + self.__inflight_objs.update(inflight_objs) + self.__inflight_refs.update(inflight_refs) + yield request + request = request_maker() + total_size = request.ByteSize() + + request.data.objects.values.append(obj) + total_size += obj_size + inflight_objs.add(obj.uuid) + + for reference in references: + ref = self.__batch_grpc.grpc_reference(reference._to_internal()) + ref_size = ref.ByteSize() + per_object_overhead + + if total_size + ref_size >= self.__batch_grpc.grpc_max_msg_size: + self.__inflight_objs.update(inflight_objs) + self.__inflight_refs.update(inflight_refs) + yield request + request = request_maker() + total_size = request.ByteSize() + + request.data.references.values.append(ref) + total_size += ref_size + inflight_refs.add(reference._to_beacon()) + + if len(request.data.objects.values) > 0 or len(request.data.references.values) > 0: + self.__inflight_objs.update(inflight_objs) + self.__inflight_refs.update(inflight_refs) + yield request + + async def __send( + self, + ) -> AsyncGenerator[batch_pb2.BatchStreamRequest, None]: + yield batch_pb2.BatchStreamRequest( + start=batch_pb2.BatchStreamRequest.Start( + consistency_level=self.__batch_grpc._consistency_level, + ), + ) + stream_start = time.time() + while self.__bg_exception is None: + if self.__is_gcp_on_wcd: + assert stream_start is not None + if time.time() - stream_start > GCP_STREAM_TIMEOUT: + logger.info( + "GCP connections have a maximum lifetime. Re-establishing the batch stream to avoid timeout errors." + ) + self.__is_renewing_stream.set() + yield batch_pb2.BatchStreamRequest(stop=batch_pb2.BatchStreamRequest.Stop()) + return + try: + req = await asyncio.wait_for(self.__reqs.get(), timeout=1) + except asyncio.TimeoutError: + continue + if req is not None: + yield req + continue + if self.__stop and not ( + self.__is_shutting_down.is_set() or self.__is_shutdown.is_set() + ): + logger.info("Batching finished, closing the client-side of the stream") + yield batch_pb2.BatchStreamRequest(stop=batch_pb2.BatchStreamRequest.Stop()) + return + if self.__is_shutting_down.is_set(): + logger.info("Server shutting down, closing the client-side of the stream") + return + if self.__is_oom.is_set(): + logger.info("Server out-of-memory, closing the client-side of the stream") + return + logger.info("Received sentinel, but not stopping, continuing...") + logger.info("Batch send thread exiting due to exception...") + + async def __recv(self) -> None: + stream = self.__batch_grpc.astream( + connection=self.__connection, + requests=self.__send(), + ) + self.__is_shutdown.clear() + async for message in stream: + if message.HasField("started"): + logger.info("Batch stream started successfully") + + if message.HasField("backoff"): + if ( + message.backoff.batch_size != self.__batch_size + and not self.__is_shutting_down.is_set() + and not self.__is_shutdown.is_set() + and not self.__is_oom.is_set() + and not self.__stop + ): + self.__batch_size = message.backoff.batch_size + logger.info(f"Updated batch size to {self.__batch_size} as per server request") + + if message.HasField("acks"): + self.__inflight_objs.difference_update(message.acks.uuids) + async with self.__uuid_lookup_lock: + self.__uuid_lookup.difference_update(message.acks.uuids) + self.__inflight_refs.difference_update(message.acks.beacons) + + if message.HasField("results"): + result_objs = BatchObjectReturn() + result_refs = BatchReferenceReturn() + failed_objs: List[ErrorObject] = [] + failed_refs: List[ErrorReference] = [] + for error in message.results.errors: + if error.HasField("uuid"): + try: + async with self.__objs_cache_lock: + cached = self.__objs_cache.pop(error.uuid) + except KeyError: + continue + err = ErrorObject( + message=error.error, + object_=cached, + ) + result_objs += BatchObjectReturn( + _all_responses=[err], + errors={cached.index: err}, + ) + failed_objs.append(err) + logger.warning( + { + "error": error.error, + "object": error.uuid, + "action": "use {client,collection}.batch.failed_objects to access this error", + } + ) + if error.HasField("beacon"): + try: + async with self.__refs_cache_lock: + cached = self.__refs_cache.pop(error.beacon) + except KeyError: + continue + err = ErrorReference( + message=error.error, + reference=cached, + ) + result_refs += BatchReferenceReturn( + errors={cached.index: err}, + ) + failed_refs.append(err) + logger.warning( + { + "error": error.error, + "reference": error.beacon, + "action": "use {client,collection}.batch.failed_references to access this error", + } + ) + for success in message.results.successes: + if success.HasField("uuid"): + try: + async with self.__objs_cache_lock: + cached = self.__objs_cache.pop(success.uuid) + except KeyError: + continue + uuid = uuid_package.UUID(success.uuid) + result_objs += BatchObjectReturn( + _all_responses=[uuid], + uuids={cached.index: uuid}, + ) + if success.HasField("beacon"): + try: + async with self.__refs_cache_lock: + self.__refs_cache.pop(success.beacon) + except KeyError: + continue + self.__results_for_wrapper.results.objs += result_objs + self.__results_for_wrapper.results.refs += result_refs + self.__results_for_wrapper.failed_objects.extend(failed_objs) + self.__results_for_wrapper.failed_references.extend(failed_refs) + + if message.HasField("out_of_memory"): + logger.info( + "Server reported out-of-memory. Batching will wait at most 10 minutes for the server to scale-up. If the server does not recover within this time, the batch will terminate with an error." + ) + self.__is_oom.set() + await self.__batch_objects.aprepend( + [self.__objs_cache[uuid] for uuid in message.out_of_memory.uuids] + ) + await self.__batch_references.aprepend( + [self.__refs_cache[beacon] for beacon in message.out_of_memory.beacons] + ) + + if message.HasField("shutting_down"): + logger.info("Received shutting down message from server") + self.__is_shutting_down.set() + self.__is_oom.clear() + + if message.HasField("shutdown"): + logger.info("Received shutdown finished message from server") + self.__is_shutdown.set() + self.__is_shutting_down.clear() + + if self.__is_shutdown.is_set(): + await self.__reconnect() + logger.info("Restarting batch recv after shutdown...") + return await self.__recv() + + elif self.__is_renewing_stream.is_set(): + # restart the stream if we are renewing it (GCP connections have a max lifetime) + logger.info("Restarting batch recv after renewing stream...") + self.__is_renewing_stream.clear() + return await self.__recv() + + logger.info("Server closed the stream from its side, shutting down batch") + + async def __reconnect(self, retry: int = 0) -> None: + if self.__consistency_level == ConsistencyLevel.ALL or self.__number_of_nodes == 1: + # check that all nodes are available before reconnecting + up_nodes = await self.__cluster.get_nodes_status() + while len(up_nodes) != self.__number_of_nodes or any( + node["status"] != "HEALTHY" for node in up_nodes + ): + logger.info( + "Waiting for all nodes to be HEALTHY before reconnecting to batch stream..." + ) + await asyncio.sleep(5) + up_nodes = await self.__cluster.get_nodes_status() + try: + logger.info(f"Trying to reconnect after shutdown... {retry + 1}/{5}") + await aresult(self.__connection.close("async")) + await self.__connection.connect(force=True) + logger.info("Reconnected successfully") + except (WeaviateStartUpError, WeaviateGRPCUnavailableError) as e: + if retry < 5: + logger.warning(f"Failed to reconnect, after {retry} attempts. Retrying...") + await asyncio.sleep(2**retry) + await self.__reconnect(retry + 1) + else: + logger.error("Failed to reconnect after 5 attempts following server shutdown") + self.__bg_exception = e + + async def flush(self) -> None: + """Flush the batch queue and wait for all requests to be finished.""" + # bg thread is sending objs+refs automatically, so simply wait for everything to be done + while len(self.__batch_objects) > 0 or len(self.__batch_references) > 0: + await asyncio.sleep(0.01) + + async def _add_object( + self, + collection: str, + properties: Optional[WeaviateProperties] = None, + references: Optional[ReferenceInputs] = None, + uuid: Optional[UUID] = None, + vector: Optional[VECTORS] = None, + tenant: Optional[str] = None, + ) -> UUID: + self.__check_bg_tasks_alive() + await asyncio.sleep(0) + try: + batch_object = BatchObject( + collection=collection, + properties=properties, + references=references, + uuid=uuid, + vector=vector, + tenant=tenant, + index=self.__objs_count, + ) + self.__results_for_wrapper.imported_shards.add( + Shard(collection=collection, tenant=tenant) + ) + except ValidationError as e: + raise WeaviateBatchValidationError(repr(e)) + uuid = str(batch_object.uuid) + async with self.__uuid_lookup_lock: + self.__uuid_lookup.add(uuid) + await self.__batch_objects.aadd(batch_object) + async with self.__objs_cache_lock: + self.__objs_cache[uuid] = batch_object + self.__objs_count += 1 + + while self.__is_blocked(): + self.__check_bg_tasks_alive() + await asyncio.sleep(0.01) + + assert batch_object.uuid is not None + await asyncio.sleep(0) + return batch_object.uuid + + async def _add_reference( + self, + from_object_uuid: UUID, + from_object_collection: str, + from_property_name: str, + to: ReferenceInput, + tenant: Optional[str] = None, + ) -> None: + self.__check_bg_tasks_alive() + await asyncio.sleep(0) + if isinstance(to, ReferenceToMulti): + to_strs: Union[List[str], List[UUID]] = to.uuids_str + elif isinstance(to, str) or isinstance(to, uuid_package.UUID): + to_strs = [to] + else: + to_strs = list(to) + + for uid in to_strs: + try: + batch_reference = BatchReference( + from_object_collection=from_object_collection, + from_object_uuid=from_object_uuid, + from_property_name=from_property_name, + to_object_collection=( + to.target_collection if isinstance(to, ReferenceToMulti) else None + ), + to_object_uuid=uid, + tenant=tenant, + index=self.__refs_count, + ) + except ValidationError as e: + raise WeaviateBatchValidationError(repr(e)) + await self.__batch_references.aadd(batch_reference) + async with self.__refs_cache_lock: + self.__refs_cache[batch_reference._to_beacon()] = batch_reference + self.__refs_count += 1 + while self.__is_blocked(): + self.__check_bg_tasks_alive() + await asyncio.sleep(0.01) + + def __is_blocked(self): + return ( + len(self.__inflight_objs) >= self.__batch_size + or len(self.__inflight_refs) >= self.__batch_size * 2 + or self.__is_renewing_stream.is_set() + or self.__is_shutting_down.is_set() + or self.__is_shutdown.is_set() + or self.__is_oom.is_set() + ) + + def __check_bg_tasks_alive(self) -> None: + if self.__all_tasks_alive(): + return + + raise self.__bg_exception or Exception("Batch tasks died unexpectedly") diff --git a/weaviate/collections/batch/base.py b/weaviate/collections/batch/base.py index efc7dd163..14a0c0768 100644 --- a/weaviate/collections/batch/base.py +++ b/weaviate/collections/batch/base.py @@ -1,3 +1,4 @@ +import asyncio import contextvars import functools import math @@ -10,10 +11,8 @@ from concurrent.futures import ThreadPoolExecutor from copy import copy from dataclasses import dataclass, field -from queue import Queue -from typing import Any, Dict, Generator, Generic, List, Optional, Set, TypeVar, Union, cast +from typing import Any, Dict, Generic, List, Optional, Set, TypeVar, Union, cast -from httpx import ConnectError from pydantic import ValidationError from typing_extensions import TypeAlias @@ -29,8 +28,6 @@ ErrorObject, ErrorReference, Shard, - _BatchObject, - _BatchReference, ) from weaviate.collections.classes.config import ConsistencyLevel from weaviate.collections.classes.internal import ( @@ -40,16 +37,12 @@ ) from weaviate.collections.classes.types import WeaviateProperties from weaviate.connect import executor -from weaviate.connect.v4 import ConnectionSync +from weaviate.connect.v4 import ConnectionAsync, ConnectionSync from weaviate.exceptions import ( EmptyResponseException, - WeaviateBatchStreamError, WeaviateBatchValidationError, - WeaviateGRPCUnavailableError, - WeaviateStartUpError, ) from weaviate.logger import logger -from weaviate.proto.v1 import batch_pb2 from weaviate.types import UUID, VECTORS from weaviate.util import _decode_json_response_dict from weaviate.warnings import _Warnings @@ -67,6 +60,9 @@ MAX_RETRIES = float( os.getenv("WEAVIATE_BATCH_MAX_RETRIES", "9.299") ) # approximately 10m30s of waiting in worst case, e.g. server scale up event +GCP_STREAM_TIMEOUT = ( + 160 # GCP connections have a max lifetime of 180s, leave 20s of buffer as safety +) class BatchRequest(ABC, Generic[TBatchInput, TBatchReturn]): @@ -75,96 +71,160 @@ class BatchRequest(ABC, Generic[TBatchInput, TBatchReturn]): def __init__(self) -> None: self._items: List[TBatchInput] = [] self._lock = threading.Lock() + self._alock = asyncio.Lock() def __len__(self) -> int: - return len(self._items) + with self._lock: + return len(self._items) + + async def alen(self) -> int: + """Asynchronously get the length of the BatchRequest.""" + async with self._alock: + return len(self._items) def add(self, item: TBatchInput) -> None: """Add an item to the BatchRequest.""" - self._lock.acquire() - self._items.append(item) - self._lock.release() + with self._lock: + self._items.append(item) + + async def aadd(self, item: TBatchInput) -> None: + """Asynchronously add an item to the BatchRequest.""" + async with self._alock: + self._items.append(item) def prepend(self, item: List[TBatchInput]) -> None: """Add items to the front of the BatchRequest. This is intended to be used when objects should be retries, eg. after a temporary error. """ - self._lock.acquire() - self._items = item + self._items - self._lock.release() + with self._lock: + self._items = item + self._items + + async def aprepend(self, item: List[TBatchInput]) -> None: + """Asynchronously add items to the front of the BatchRequest. + + This is intended to be used when objects should be retries, eg. after a temporary error. + """ + async with self._alock: + self._items = item + self._items -Ref = TypeVar("Ref", bound=Union[_BatchReference, batch_pb2.BatchReference]) +Ref = TypeVar("Ref", bound=BatchReference) class ReferencesBatchRequest(BatchRequest[Ref, BatchReferenceReturn]): """Collect Weaviate-object references to add them in one request to Weaviate.""" - def pop_items(self, pop_amount: int, uuid_lookup: Set[str]) -> List[Ref]: - """Pop the given number of items from the BatchRequest queue. - - Returns: - A list of items from the BatchRequest. - """ + def __pop_items(self, pop_amount: int, uuid_lookup: Set[str]) -> List[Ref]: ret: List[Ref] = [] i = 0 - self._lock.acquire() while len(ret) < pop_amount and len(self._items) > 0 and i < len(self._items): - if self._items[i].from_uuid not in uuid_lookup and ( - self._items[i].to_uuid is None or self._items[i].to_uuid not in uuid_lookup + if self._items[i].from_object_uuid not in uuid_lookup and ( + self._items[i].to_object_uuid is None + or self._items[i].to_object_uuid not in uuid_lookup ): ret.append(self._items.pop(i)) else: i += 1 - self._lock.release() return ret + def pop_items(self, pop_amount: int, uuid_lookup: Set[str]) -> List[Ref]: + """Pop the given number of items from the BatchRequest queue. + + Returns: + A list of items from the BatchRequest. + """ + with self._lock: + return self.__pop_items(pop_amount, uuid_lookup) + + async def apop_items(self, pop_amount: int, uuid_lookup: Set[str]) -> List[Ref]: + """Asynchronously pop the given number of items from the BatchRequest queue. + + Returns: + A list of items from the BatchRequest. + """ + async with self._alock: + return self.__pop_items(pop_amount, uuid_lookup) + + def __head(self) -> Optional[Ref]: + if len(self._items) > 0: + return self._items[0] + return None + def head(self) -> Optional[Ref]: """Get the first item from the BatchRequest queue without removing it. Returns: The first item from the BatchRequest or None if the queue is empty. """ - self._lock.acquire() - item = self._items[0] if len(self._items) > 0 else None - self._lock.release() - return item + with self._lock: + return self.__head() + + async def ahead(self) -> Optional[Ref]: + """Asynchronously get the first item from the BatchRequest queue without removing it. + Returns: + The first item from the BatchRequest or None if the queue is empty. + """ + async with self._alock: + return self.__head() -Obj = TypeVar("Obj", bound=Union[_BatchObject, batch_pb2.BatchObject]) + +Obj = TypeVar("Obj", bound=BatchObject) class ObjectsBatchRequest(Generic[Obj], BatchRequest[Obj, BatchObjectReturn]): """Collect objects for one batch request to weaviate.""" - def pop_items(self, pop_amount: int) -> List[Obj]: - """Pop the given number of items from the BatchRequest queue. - - Returns: - A list of items from the BatchRequest. - """ - self._lock.acquire() + def __pop_items(self, pop_amount: int) -> List[Obj]: if pop_amount >= len(self._items): ret = copy(self._items) self._items.clear() else: ret = copy(self._items[:pop_amount]) self._items = self._items[pop_amount:] - - self._lock.release() return ret + def pop_items(self, pop_amount: int) -> List[Obj]: + """Pop the given number of items from the BatchRequest queue. + + Returns: + A list of items from the BatchRequest. + """ + with self._lock: + return self.__pop_items(pop_amount) + + async def apop_items(self, pop_amount: int) -> List[Obj]: + """Asynchronously pop the given number of items from the BatchRequest queue. + + Returns: + A list of items from the BatchRequest. + """ + async with self._alock: + return self.__pop_items(pop_amount) + + def __head(self) -> Optional[Obj]: + if len(self._items) > 0: + return self._items[0] + return None + def head(self) -> Optional[Obj]: """Get the first item from the BatchRequest queue without removing it. Returns: The first item from the BatchRequest or None if the queue is empty. """ - self._lock.acquire() - item = self._items[0] if len(self._items) > 0 else None - self._lock.release() - return item + with self._lock: + return self.__head() + + async def ahead(self) -> Optional[Obj]: + """Asynchronously get the first item from the BatchRequest queue without removing it. + + Returns: + The first item from the BatchRequest or None if the queue is empty. + """ + async with self._alock: + return self.__head() @dataclass @@ -210,11 +270,11 @@ def __init__( batch_mode: _BatchMode, executor: ThreadPoolExecutor, vectorizer_batching: bool, - objects: Optional[ObjectsBatchRequest] = None, - references: Optional[ReferencesBatchRequest] = None, + objects: Optional[ObjectsBatchRequest[BatchObject]] = None, + references: Optional[ReferencesBatchRequest[BatchReference]] = None, ) -> None: - self.__batch_objects = objects or ObjectsBatchRequest() - self.__batch_references = references or ReferencesBatchRequest() + self.__batch_objects = objects or ObjectsBatchRequest[BatchObject]() + self.__batch_references = references or ReferencesBatchRequest[BatchReference]() self.__connection = connection self.__consistency_level: Optional[ConsistencyLevel] = consistency_level @@ -286,7 +346,7 @@ def __init__( self.__uuid_lookup_lock = threading.Lock() self.__results_lock = threading.Lock() - self.__bg_thread = self.__start_bg_threads() + self.__bg_threads = self.__start_bg_threads() self.__bg_thread_exception: Optional[Exception] = None @property @@ -299,13 +359,16 @@ def number_errors(self) -> int: def _start(self): pass + def _wait(self): + pass + def _shutdown(self) -> None: """Shutdown the current batch and wait for all requests to be finished.""" self.flush() # we are done, shut bg threads down and end the event loop self.__shut_background_thread_down.set() - while self.__bg_thread.is_alive(): + while self.__bg_threads.is_alive(): time.sleep(0.01) # copy the results to the public results @@ -530,8 +593,8 @@ def __dynamic_batching(self) -> None: def __send_batch( self, - objs: List[_BatchObject], - refs: List[_BatchReference], + objs: List[BatchObject], + refs: List[BatchReference], readd_rate_limit: bool, ) -> None: if (n_objs := len(objs)) > 0: @@ -540,7 +603,7 @@ def __send_batch( response_obj = executor.result( self.__batch_grpc.objects( connection=self.__connection, - objects=objs, + objects=[obj._to_internal() for obj in objs], timeout=DEFAULT_REQUEST_TIMEOUT, max_retries=MAX_RETRIES, ) @@ -554,8 +617,7 @@ def __send_batch( ) except Exception as e: errors_obj = { - idx: ErrorObject(message=repr(e), object_=BatchObject._from_internal(obj)) - for idx, obj in enumerate(objs) + idx: ErrorObject(message=repr(e), object_=obj) for idx, obj in enumerate(objs) } logger.error( { @@ -609,9 +671,7 @@ def __send_batch( ) readd_objects = [ - err.object_._to_internal() - for i, err in response_obj.errors.items() - if i in readded_objects + err.object_ for i, err in response_obj.errors.items() if i in readded_objects ] readded_uuids = {obj.uuid for obj in readd_objects} @@ -646,7 +706,7 @@ def __send_batch( time.sleep(2**highest_retry_count) with self.__uuid_lookup_lock: self.__uuid_lookup.difference_update( - obj.uuid for obj in objs if obj.uuid not in readded_uuids + str(obj.uuid) for obj in objs if obj.uuid not in readded_uuids ) if (n_obj_errs := len(response_obj.errors)) > 0 and self.__objs_logs_count < 30: @@ -671,13 +731,14 @@ def __send_batch( start = time.time() try: response_ref = executor.result( - self.__batch_rest.references(connection=self.__connection, references=refs) + self.__batch_rest.references( + connection=self.__connection, + references=[ref._to_internal() for ref in refs], + ) ) except Exception as e: errors_ref = { - idx: ErrorReference( - message=repr(e), reference=BatchReference._from_internal(ref) - ) + idx: ErrorReference(message=repr(e), reference=ref) for idx, ref in enumerate(refs) } response_ref = BatchReferenceReturn( @@ -715,7 +776,7 @@ def flush(self) -> None: or len(self.__batch_references) > 0 ): time.sleep(0.01) - self.__check_bg_thread_alive() + self.__check_bg_threads_alive() def _add_object( self, @@ -726,7 +787,7 @@ def _add_object( vector: Optional[VECTORS] = None, tenant: Optional[str] = None, ) -> UUID: - self.__check_bg_thread_alive() + self.__check_bg_threads_alive() try: batch_object = BatchObject( collection=collection, @@ -744,7 +805,7 @@ def _add_object( except ValidationError as e: raise WeaviateBatchValidationError(repr(e)) self.__uuid_lookup.add(str(batch_object.uuid)) - self.__batch_objects.add(batch_object._to_internal()) + self.__batch_objects.add(batch_object) # block if queue gets too long or weaviate is overloaded - reading files is faster them sending them so we do # not need a long queue @@ -752,7 +813,7 @@ def _add_object( self.__recommended_num_objects == 0 or len(self.__batch_objects) >= self.__recommended_num_objects * 2 ): - self.__check_bg_thread_alive() + self.__check_bg_threads_alive() time.sleep(0.01) assert batch_object.uuid is not None @@ -766,7 +827,7 @@ def _add_reference( to: ReferenceInput, tenant: Optional[str] = None, ) -> None: - self.__check_bg_thread_alive() + self.__check_bg_threads_alive() if isinstance(to, ReferenceToMulti): to_strs: Union[List[str], List[UUID]] = to.uuids_str elif isinstance(to, str) or isinstance(to, uuid_package.UUID): @@ -790,593 +851,93 @@ def _add_reference( self.__refs_count += 1 except ValidationError as e: raise WeaviateBatchValidationError(repr(e)) - self.__batch_references.add(batch_reference._to_internal()) + self.__batch_references.add(batch_reference) # block if queue gets too long or weaviate is overloaded while self.__recommended_num_objects == 0: time.sleep(0.01) # block if weaviate is overloaded, also do not send any refs - self.__check_bg_thread_alive() + self.__check_bg_threads_alive() - def __check_bg_thread_alive(self) -> None: - if self.__bg_thread.is_alive(): + def __check_bg_threads_alive(self) -> None: + if self.__bg_threads.is_alive(): return raise self.__bg_thread_exception or Exception("Batch thread died unexpectedly") class _BgThreads: - def __init__(self, send: threading.Thread, recv: threading.Thread): - self.send = send + def __init__(self, loop: threading.Thread, recv: threading.Thread): + self.loop = loop self.recv = recv self.__started_recv = False - self.__started_send = False + self.__started_loop = False def start_recv(self) -> None: if not self.__started_recv: self.recv.start() self.__started_recv = True - def start_send(self) -> None: - if not self.__started_send: - self.send.start() - self.__started_send = True + def start_loop(self) -> None: + if not self.__started_loop: + self.loop.start() + self.__started_loop = True def is_alive(self) -> bool: """Check if the background threads are still alive.""" - return self.send_alive() or self.recv_alive() + return self.loop_alive() and self.recv_alive() - def send_alive(self) -> bool: - """Check if the send background thread is still alive.""" - return self.send.is_alive() + def loop_alive(self) -> bool: + """Check if the loop background thread is still alive.""" + if self.__started_loop: + return self.loop.is_alive() + return True # not started yet so considered alive def recv_alive(self) -> bool: """Check if the recv background thread is still alive.""" - return self.recv.is_alive() - - -class _BatchBaseNew: - def __init__( - self, - connection: ConnectionSync, - consistency_level: Optional[ConsistencyLevel], - results: _BatchDataWrapper, - batch_mode: _BatchMode, - executor: ThreadPoolExecutor, - vectorizer_batching: bool, - objects: Optional[ObjectsBatchRequest[batch_pb2.BatchObject]] = None, - references: Optional[ReferencesBatchRequest] = None, - ) -> None: - self.__batch_objects = objects or ObjectsBatchRequest[batch_pb2.BatchObject]() - self.__batch_references = references or ReferencesBatchRequest[batch_pb2.BatchReference]() - - self.__connection = connection - self.__consistency_level: ConsistencyLevel = consistency_level or ConsistencyLevel.QUORUM - self.__batch_size = 100 - - self.__batch_grpc = _BatchGRPC( - connection._weaviate_version, self.__consistency_level, connection._grpc_max_msg_size - ) - - # lookup table for objects that are currently being processed - is used to not send references from objects that have not been added yet - self.__uuid_lookup: Set[str] = set() - - # we do not want that users can access the results directly as they are not thread-safe - self.__results_for_wrapper_backup = results - self.__results_for_wrapper = _BatchDataWrapper() - - self.__objs_count = 0 - self.__refs_count = 0 - - self.__uuid_lookup_lock = threading.Lock() - self.__results_lock = threading.Lock() - - self.__bg_thread_exception: Optional[Exception] = None - self.__is_shutting_down = threading.Event() - self.__is_shutdown = threading.Event() - - self.__objs_cache_lock = threading.Lock() - self.__refs_cache_lock = threading.Lock() - self.__objs_cache: dict[str, BatchObject] = {} - self.__refs_cache: dict[str, BatchReference] = {} - - # maxsize=1 so that __batch_send does not run faster than generator for __batch_recv - # thereby using too much buffer in case of server-side shutdown - self.__reqs: Queue[Optional[batch_pb2.BatchStreamRequest]] = Queue(maxsize=1) - - self.__stop = False - - self.__batch_mode = batch_mode - - self.__total = 0 - - @property - def number_errors(self) -> int: - """Return the number of errors in the batch.""" - return len(self.__results_for_wrapper.failed_objects) + len( - self.__results_for_wrapper.failed_references - ) - - def __all_threads_alive(self) -> bool: - return self.__bg_threads is not None and all( - thread.is_alive() for thread in self.__bg_threads - ) - - def __any_threads_alive(self) -> bool: - return self.__bg_threads is not None and any( - thread.is_alive() for thread in self.__bg_threads - ) - - def _start(self) -> None: - assert isinstance(self.__batch_mode, _ServerSideBatching), ( - "Only server-side batching is supported in this mode" - ) - self.__bg_threads = [ - self.__start_bg_threads() for _ in range(self.__batch_mode.concurrency) - ] - logger.warning( - f"Provisioned {len(self.__bg_threads)} stream(s) to the server for batch processing" - ) - now = time.time() - while not self.__all_threads_alive(): - # wait for the stream to be started by __batch_stream - time.sleep(0.01) - if time.time() - now > 10: - raise WeaviateBatchValidationError( - "Batch stream was not started within 10 seconds. Please check your connection." - ) - - def _shutdown(self) -> None: - # Shutdown the current batch and wait for all requests to be finished - self.flush() - self.__stop = True - - # we are done, wait for bg threads to finish - # self.__batch_stream will set the shutdown event when it receives - # the stop message from the server - while self.__any_threads_alive(): - time.sleep(0.05) - logger.warning("Send & receive threads finished.") - - # copy the results to the public results - self.__results_for_wrapper_backup.results = self.__results_for_wrapper.results - self.__results_for_wrapper_backup.failed_objects = self.__results_for_wrapper.failed_objects - self.__results_for_wrapper_backup.failed_references = ( - self.__results_for_wrapper.failed_references - ) - self.__results_for_wrapper_backup.imported_shards = ( - self.__results_for_wrapper.imported_shards - ) - - def __batch_send(self) -> None: - refresh_time: float = 0.01 - while ( - self.__shut_background_thread_down is not None - and not self.__shut_background_thread_down.is_set() - ): - if len(self.__batch_objects) + len(self.__batch_references) > 0: - self._batch_send = True - start = time.time() - while (len_o := len(self.__batch_objects)) + ( - len_r := len(self.__batch_references) - ) < self.__batch_size: - # wait for more objects to be added up to the batch size - time.sleep(0.01) - if ( - self.__shut_background_thread_down is not None - and self.__shut_background_thread_down.is_set() - ): - logger.warning("Threads were shutdown, exiting batch send loop") - # shutdown was requested, exit early - self.__reqs.put(None) - return - if time.time() - start >= 1 and ( - len_o == len(self.__batch_objects) or len_r == len(self.__batch_references) - ): - # no new objects were added in the last second, exit the loop - break + if self.__started_recv: + return self.recv.is_alive() + return True # not started yet so considered alive - objs = self.__batch_objects.pop_items(self.__batch_size) - refs = self.__batch_references.pop_items( - self.__batch_size - len(objs), - uuid_lookup=self.__uuid_lookup, - ) - with self.__uuid_lookup_lock: - self.__uuid_lookup.difference_update(obj.uuid for obj in objs) - - for req in self.__generate_stream_requests(objs, refs): - logged = False - while self.__is_shutting_down.is_set() or self.__is_shutdown.is_set(): - # if we were shutdown by the node we were connected to, we need to wait for the stream to be restarted - # so that the connection is refreshed to a new node where the objects can be accepted - # otherwise, we wait until the stream has been started by __batch_stream to send the first batch - if not logged: - logger.warning("Waiting for stream to be re-established...") - logged = True - # put sentinel into our queue to signal the end of the current stream - self.__reqs.put(None) - time.sleep(1) - if logged: - logger.warning("Stream re-established, resuming sending batches") - self.__reqs.put(req) - elif self.__stop: - # we are done, send the sentinel into our queue to be consumed by the batch sender - self.__reqs.put(None) # signal the end of the stream - logger.warning("Batching finished, sent stop signal to batch stream") - return - time.sleep(refresh_time) - - def __generate_stream_requests( - self, - objs: List[batch_pb2.BatchObject], - refs: List[batch_pb2.BatchReference], - ) -> Generator[batch_pb2.BatchStreamRequest, None, None]: - per_object_overhead = 4 # extra overhead bytes per object in the request - - def request_maker(): - return batch_pb2.BatchStreamRequest() - - request = request_maker() - total_size = request.ByteSize() + def join(self) -> None: + """Join the background threads.""" + self.loop.join() + self.recv.join() - for obj in objs: - obj_size = obj.ByteSize() + per_object_overhead - if total_size + obj_size >= self.__batch_grpc.grpc_max_msg_size: - yield request - request = request_maker() - total_size = request.ByteSize() - - request.data.objects.values.append(obj) - total_size += obj_size - - for ref in refs: - ref_size = ref.ByteSize() + per_object_overhead - - if total_size + ref_size >= self.__batch_grpc.grpc_max_msg_size: - yield request - request = request_maker() - total_size = request.ByteSize() - - request.data.references.values.append(ref) - total_size += ref_size - - if len(request.data.objects.values) > 0 or len(request.data.references.values) > 0: - yield request - - def __generate_stream_requests_for_grpc( - self, - ) -> Generator[batch_pb2.BatchStreamRequest, None, None]: - yield batch_pb2.BatchStreamRequest( - start=batch_pb2.BatchStreamRequest.Start( - consistency_level=self.__batch_grpc._consistency_level, - ), - ) - while ( - self.__shut_background_thread_down is not None - and not self.__shut_background_thread_down.is_set() - ): - req = self.__reqs.get() - if req is not None: - self.__total += len(req.data.objects.values) + len(req.data.references.values) - yield req - continue - if self.__stop and not ( - self.__is_shutting_down.is_set() or self.__is_shutdown.is_set() - ): - logger.warning("Batching finished, closing the client-side of the stream") - yield batch_pb2.BatchStreamRequest(stop=batch_pb2.BatchStreamRequest.Stop()) - return - if self.__is_shutting_down.is_set(): - logger.warning("Server shutting down, closing the client-side of the stream") - return - logger.warning("Received sentinel, but not stopping, continuing...") - - def __batch_recv(self) -> None: - for message in self.__batch_grpc.stream( - connection=self.__connection, - requests=self.__generate_stream_requests_for_grpc(), - ): - if message.HasField("started"): - logger.warning("Batch stream started successfully") - for threads in self.__bg_threads: - threads.start_send() - if message.HasField("backoff"): - if ( - message.backoff.batch_size != self.__batch_size - and not self.__is_shutting_down.is_set() - and not self.__is_shutdown.is_set() - and not self.__stop - ): - self.__batch_size = message.backoff.batch_size - logger.warning( - f"Updated batch size to {self.__batch_size} as per server request" - ) - if message.HasField("results"): - result_objs = BatchObjectReturn() - result_refs = BatchReferenceReturn() - failed_objs: List[ErrorObject] = [] - failed_refs: List[ErrorReference] = [] - for error in message.results.errors: - if error.HasField("uuid"): - try: - cached = self.__objs_cache.pop(error.uuid) - except KeyError: - continue - err = ErrorObject( - message=error.error, - object_=cached, - ) - result_objs += BatchObjectReturn( - _all_responses=[err], - errors={cached.index: err}, - ) - failed_objs.append(err) - logger.warning( - { - "error": error.error, - "object": error.uuid, - "action": "use {client,collection}.batch.failed_objects to access this error", - } - ) - if error.HasField("beacon"): - try: - cached = self.__refs_cache.pop(error.beacon) - except KeyError: - continue - err = ErrorReference( - message=error.error, - reference=error.beacon, # pyright: ignore - ) - failed_refs.append(err) - result_refs += BatchReferenceReturn( - errors={cached.index: err}, - ) - logger.warning( - { - "error": error.error, - "reference": error.beacon, - "action": "use {client,collection}.batch.failed_references to access this error", - } - ) - for success in message.results.successes: - if success.HasField("uuid"): - try: - cached = self.__objs_cache.pop(success.uuid) - except KeyError: - continue - uuid = uuid_package.UUID(success.uuid) - result_objs += BatchObjectReturn( - _all_responses=[uuid], - uuids={cached.index: uuid}, - ) - if success.HasField("beacon"): - try: - self.__refs_cache.pop(success.beacon, None) - except KeyError: - continue - with self.__results_lock: - self.__results_for_wrapper.results.objs += result_objs - self.__results_for_wrapper.results.refs += result_refs - self.__results_for_wrapper.failed_objects.extend(failed_objs) - self.__results_for_wrapper.failed_references.extend(failed_refs) - elif message.HasField("shutting_down"): - logger.warning( - "Received shutting down message from server, pausing sending until stream is re-established" - ) - self.__is_shutting_down.set() - elif message.HasField("shutdown"): - logger.warning("Received shutdown finished message from server") - self.__is_shutdown.set() - self.__is_shutting_down.clear() - self.__reconnect() - - # restart the stream if we were shutdown by the node we were connected to ensuring that the index is - # propagated properly from it to the new one - if self.__is_shutdown.is_set(): - logger.warning("Restarting batch recv after shutdown...") - self.__is_shutdown.clear() - return self.__batch_recv() - else: - logger.warning("Server closed the stream from its side, shutting down batch") - return - - def __reconnect(self, retry: int = 0) -> None: - if self.__consistency_level == ConsistencyLevel.ALL: - # check that all nodes are available before reconnecting - cluster = _ClusterBatch(self.__connection) - while len(nodes := cluster.get_nodes_status()) != 3 or any( - node["status"] != "HEALTHY" for node in nodes - ): - logger.warning( - "Waiting for all nodes to be HEALTHY before reconnecting to batch stream due to CL=ALL..." - ) - time.sleep(5) - try: - logger.warning(f"Trying to reconnect after shutdown... {retry + 1}/{5}") - self.__connection.close("sync") - self.__connection.connect(force=True) - logger.warning("Reconnected successfully") - except (WeaviateStartUpError, WeaviateGRPCUnavailableError) as e: - if retry < 5: - time.sleep(2**retry) - self.__reconnect(retry + 1) - else: - logger.error("Failed to reconnect after 5 attempts") - self.__bg_thread_exception = e - - def __start_bg_threads(self) -> _BgThreads: - """Create a background thread that periodically checks how congested the batch queue is.""" - self.__shut_background_thread_down = threading.Event() - - def batch_send_wrapper() -> None: - try: - self.__batch_send() - logger.warning("exited batch send thread") - except Exception as e: - logger.error(e) - self.__bg_thread_exception = e - - def batch_recv_wrapper() -> None: - socket_hung_up = False - try: - self.__batch_recv() - logger.warning("exited batch receive thread") - except Exception as e: - if isinstance(e, WeaviateBatchStreamError) and ( - "Socket closed" in e.message or "context canceled" in e.message - ): - socket_hung_up = True - else: - logger.error(e) - logger.error(type(e)) - self.__bg_thread_exception = e - if socket_hung_up: - # this happens during ungraceful shutdown of the coordinator - # lets restart the stream and add the cached objects again - logger.warning("Stream closed unexpectedly, restarting...") - self.__reconnect() - # server sets this whenever it restarts, gracefully or unexpectedly, so need to clear it now - self.__is_shutting_down.clear() - with self.__objs_cache_lock: - logger.warning( - f"Re-adding {len(self.__objs_cache)} cached objects to the batch" - ) - self.__batch_objects.prepend( - [ - self.__batch_grpc.grpc_object(o._to_internal()) - for o in self.__objs_cache.values() - ] - ) - with self.__refs_cache_lock: - self.__batch_references.prepend( - [ - self.__batch_grpc.grpc_reference(o._to_internal()) - for o in self.__refs_cache.values() - ] - ) - # start a new stream with a newly reconnected channel - return batch_recv_wrapper() - - threads = _BgThreads( - send=threading.Thread( - target=batch_send_wrapper, - daemon=True, - name="BgBatchSend", - ), - recv=threading.Thread( - target=batch_recv_wrapper, - daemon=True, - name="BgBatchRecv", - ), - ) - threads.start_recv() - return threads - - def flush(self) -> None: - """Flush the batch queue and wait for all requests to be finished.""" - # bg thread is sending objs+refs automatically, so simply wait for everything to be done - while len(self.__batch_objects) > 0 or len(self.__batch_references) > 0: - time.sleep(0.01) - self.__check_bg_threads_alive() +class _ClusterBatch: + def __init__(self, connection: ConnectionSync): + self._connection = connection - def _add_object( + def get_nodes_status( self, - collection: str, - properties: Optional[WeaviateProperties] = None, - references: Optional[ReferenceInputs] = None, - uuid: Optional[UUID] = None, - vector: Optional[VECTORS] = None, - tenant: Optional[str] = None, - ) -> UUID: - self.__check_bg_threads_alive() + ) -> List[Node]: try: - batch_object = BatchObject( - collection=collection, - properties=properties, - references=references, - uuid=uuid, - vector=vector, - tenant=tenant, - index=self.__objs_count, - ) - self.__results_for_wrapper.imported_shards.add( - Shard(collection=collection, tenant=tenant) - ) - except ValidationError as e: - raise WeaviateBatchValidationError(repr(e)) - uuid = str(batch_object.uuid) - with self.__uuid_lookup_lock: - self.__uuid_lookup.add(uuid) - self.__batch_objects.add(self.__batch_grpc.grpc_object(batch_object._to_internal())) - with self.__objs_cache_lock: - self.__objs_cache[uuid] = batch_object - self.__objs_count += 1 - - # block if queue gets too long or weaviate is overloaded - reading files is faster them sending them so we do - # not need a long queue - while len(self.__batch_objects) >= self.__batch_size * 2: - self.__check_bg_threads_alive() - time.sleep(0.01) - - assert batch_object.uuid is not None - return batch_object.uuid - - def _add_reference( - self, - from_object_uuid: UUID, - from_object_collection: str, - from_property_name: str, - to: ReferenceInput, - tenant: Optional[str] = None, - ) -> None: - self.__check_bg_threads_alive() - if isinstance(to, ReferenceToMulti): - to_strs: Union[List[str], List[UUID]] = to.uuids_str - elif isinstance(to, str) or isinstance(to, uuid_package.UUID): - to_strs = [to] - else: - to_strs = list(to) - - for uid in to_strs: - try: - batch_reference = BatchReference( - from_object_collection=from_object_collection, - from_object_uuid=from_object_uuid, - from_property_name=from_property_name, - to_object_collection=( - to.target_collection if isinstance(to, ReferenceToMulti) else None - ), - to_object_uuid=uid, - tenant=tenant, - index=self.__refs_count, - ) - except ValidationError as e: - raise WeaviateBatchValidationError(repr(e)) - self.__batch_references.add( - self.__batch_grpc.grpc_reference(batch_reference._to_internal()) - ) - with self.__refs_cache_lock: - self.__refs_cache[batch_reference._to_beacon()] = batch_reference - self.__refs_count += 1 + response = executor.result(self._connection.get(path="/nodes")) + except Exception: + return [] - def __check_bg_threads_alive(self) -> None: - if self.__any_threads_alive(): - return + response_typed = _decode_json_response_dict(response, "Nodes status") + assert response_typed is not None + nodes = response_typed.get("nodes") + if nodes is None: + return [] + return cast(List[Node], nodes) - raise self.__bg_thread_exception or Exception("Batch thread died unexpectedly") + def get_number_of_nodes(self) -> int: + return len(self.get_nodes_status()) -class _ClusterBatch: - def __init__(self, connection: ConnectionSync): +class _ClusterBatchAsync: + def __init__(self, connection: ConnectionAsync): self._connection = connection - def get_nodes_status( + async def get_nodes_status( self, ) -> List[Node]: try: - response = executor.result(self._connection.get(path="/nodes")) - except ConnectError as conn_err: - raise ConnectError("Get nodes status failed due to connection error") from conn_err + response = await executor.aresult(self._connection.get(path="/nodes")) + except Exception: + return [] response_typed = _decode_json_response_dict(response, "Nodes status") assert response_typed is not None @@ -1384,3 +945,6 @@ def get_nodes_status( if nodes is None or nodes == []: raise EmptyResponseException("Nodes status response returned empty") return cast(List[Node], nodes) + + async def get_number_of_nodes(self) -> int: + return len(await self.get_nodes_status()) diff --git a/weaviate/collections/batch/batch_wrapper.py b/weaviate/collections/batch/batch_wrapper.py index a64f267ca..a3a3598d6 100644 --- a/weaviate/collections/batch/batch_wrapper.py +++ b/weaviate/collections/batch/batch_wrapper.py @@ -1,14 +1,17 @@ +import asyncio import time from typing import Any, Generic, List, Optional, Protocol, TypeVar, Union, cast +from weaviate.collections.batch.async_ import _BatchBaseAsync from weaviate.collections.batch.base import ( _BatchBase, - _BatchBaseNew, _BatchDataWrapper, _BatchMode, _ClusterBatch, + _ClusterBatchAsync, _DynamicBatching, ) +from weaviate.collections.batch.sync import _BatchBaseSync from weaviate.collections.classes.batch import ( BatchResult, ErrorObject, @@ -20,7 +23,7 @@ from weaviate.collections.classes.tenants import Tenant from weaviate.collections.classes.types import Properties, WeaviateProperties from weaviate.connect import executor -from weaviate.connect.v4 import ConnectionSync +from weaviate.connect.v4 import ConnectionAsync, ConnectionSync from weaviate.logger import logger from weaviate.types import UUID, VECTORS from weaviate.util import _capitalize_first_letter, _decode_json_response_list @@ -34,7 +37,7 @@ def __init__( ): self._connection = connection self._consistency_level = consistency_level - self._current_batch: Optional[Union[_BatchBase, _BatchBaseNew]] = None + self._current_batch: Optional[Union[_BatchBase, _BatchBaseSync]] = None # config options self._batch_mode: _BatchMode = _DynamicBatching() @@ -127,6 +130,107 @@ def results(self) -> BatchResult: return self._batch_data.results +class _BatchWrapperAsync: + def __init__( + self, + connection: ConnectionAsync, + consistency_level: Optional[ConsistencyLevel], + ): + self._connection = connection + self._consistency_level = consistency_level + self._current_batch: Optional[_BatchBaseAsync] = None + + self._batch_data = _BatchDataWrapper() + self._cluster = _ClusterBatchAsync(connection) + + async def __is_ready( + self, max_count: int, shards: Optional[List[Shard]], backoff_count: int = 0 + ) -> bool: + try: + readinesses = await asyncio.gather( + *[ + self.__get_shards_readiness(shard) + for shard in shards or self._batch_data.imported_shards + ] + ) + return all(all(readiness) for readiness in readinesses) + except Exception as e: + logger.warning( + f"Error while getting class shards statuses: {e}, trying again with 2**n={2**backoff_count}s exponential backoff with n={backoff_count}" + ) + if backoff_count >= max_count: + raise e + await asyncio.sleep(2**backoff_count) + return await self.__is_ready(max_count, shards, backoff_count + 1) + + async def wait_for_vector_indexing( + self, shards: Optional[List[Shard]] = None, how_many_failures: int = 5 + ) -> None: + """Wait for the all the vectors of the batch imported objects to be indexed. + + Upon network error, it will retry to get the shards' status for `how_many_failures` times + with exponential backoff (2**n seconds with n=0,1,2,...,how_many_failures). + + Args: + shards: The shards to check the status of. If `None` it will check the status of all the shards of the imported objects in the batch. + how_many_failures: How many times to try to get the shards' status before raising an exception. Default 5. + """ + if shards is not None and not isinstance(shards, list): + raise TypeError(f"'shards' must be of type List[Shard]. Given type: {type(shards)}.") + if shards is not None and not isinstance(shards[0], Shard): + raise TypeError(f"'shards' must be of type List[Shard]. Given type: {type(shards)}.") + + waiting_count = 0 + while not await self.__is_ready(how_many_failures, shards): + if waiting_count % 20 == 0: # print every 5s + logger.debug("Waiting for async indexing to finish...") + await asyncio.sleep(0.25) + waiting_count += 1 + logger.debug("Async indexing finished!") + + async def __get_shards_readiness(self, shard: Shard) -> List[bool]: + path = f"/schema/{_capitalize_first_letter(shard.collection)}/shards{'' if shard.tenant is None else f'?tenant={shard.tenant}'}" + response = await executor.aresult(self._connection.get(path=path)) + + res = _decode_json_response_list(response, "Get shards' status") + assert res is not None + return [ + (cast(str, shard.get("status")) == "READY") + & (cast(int, shard.get("vectorQueueSize")) == 0) + for shard in res + ] + + async def _get_shards_readiness(self, shard: Shard) -> List[bool]: + return await self.__get_shards_readiness(shard) + + @property + def failed_objects(self) -> List[ErrorObject]: + """Get all failed objects from the batch manager. + + Returns: + A list of all the failed objects from the batch. + """ + return self._batch_data.failed_objects + + @property + def failed_references(self) -> List[ErrorReference]: + """Get all failed references from the batch manager. + + Returns: + A list of all the failed references from the batch. + """ + return self._batch_data.failed_references + + @property + def results(self) -> BatchResult: + """Get the results of the batch operation. + + Returns: + The results of the batch operation. + """ + return self._batch_data.results + + class BatchClientProtocol(Protocol): def add_object( self, @@ -204,6 +308,83 @@ def number_errors(self) -> int: ... +class BatchClientProtocolAsync(Protocol): + async def add_object( + self, + collection: str, + properties: Optional[WeaviateProperties] = None, + references: Optional[ReferenceInputs] = None, + uuid: Optional[UUID] = None, + vector: Optional[VECTORS] = None, + tenant: Optional[Union[str, Tenant]] = None, + ) -> UUID: + """Add one object to this batch. + + NOTE: If the UUID of one of the objects already exists then the existing object will be + replaced by the new object. + + Args: + collection: The name of the collection this object belongs to. + properties: The data properties of the object to be added as a dictionary. + references: The references of the object to be added as a dictionary. + uuid: The UUID of the object as an uuid.UUID object or str. It can be a Weaviate beacon or Weaviate href. + If it is None an UUIDv4 will generated, by default None + vector: The embedding of the object. Can be used when a collection does not have a vectorization module or the given + vector was generated using the _identical_ vectorization module that is configured for the class. In this + case this vector takes precedence. + Supported types are: + - for single vectors: `list`, 'numpy.ndarray`, `torch.Tensor` and `tf.Tensor`, by default None. + - for named vectors: Dict[str, *list above*], where the string is the name of the vector. + tenant: The tenant name or Tenant object to be used for this request. + + Returns: + The UUID of the added object. If one was not provided a UUIDv4 will be auto-generated for you and returned here. + + Raises: + WeaviateBatchValidationError: If the provided options are in the format required by Weaviate. + """ + ... + + async def add_reference( + self, + from_uuid: UUID, + from_collection: str, + from_property: str, + to: ReferenceInput, + tenant: Optional[Union[str, Tenant]] = None, + ) -> None: + """Add one reference to this batch. + + Args: + from_uuid: The UUID of the object, as an uuid.UUID object or str, that should reference another object. + from_collection: The name of the collection that should reference another object. + from_property: The name of the property that contains the reference. + to: The UUID of the referenced object, as an uuid.UUID object or str, that is actually referenced. + For multi-target references use wvc.Reference.to_multi_target(). + tenant: The tenant name or Tenant object to be used for this request. + + Raises: + WeaviateBatchValidationError: If the provided options are in the format required by Weaviate. + """ + ... + + def flush(self) -> None: + """Flush the current batch. + + This will send all the objects and references in the current batch to Weaviate. + """ + ... + + @property + def number_errors(self) -> int: + """Get the number of errors in the current batch. + + Returns: + The number of errors in the current batch. + """ + ... + + class BatchCollectionProtocol(Generic[Properties], Protocol[Properties]): def add_object( self, @@ -260,17 +441,88 @@ def number_errors(self) -> int: ... -T = TypeVar("T", bound=Union[_BatchBase, _BatchBaseNew]) +class BatchCollectionProtocolAsync(Generic[Properties], Protocol[Properties]): + async def add_object( + self, + properties: Optional[Properties] = None, + references: Optional[ReferenceInputs] = None, + uuid: Optional[UUID] = None, + vector: Optional[VECTORS] = None, + ) -> UUID: + """Add one object to this batch. + + NOTE: If the UUID of one of the objects already exists then the existing object will be replaced by the new object. + + Args: + properties: The data properties of the object to be added as a dictionary. + references: The references of the object to be added as a dictionary. + uuid: The UUID of the object as an uuid.UUID object or str. If it is None an UUIDv4 will generated, by default None + vector: The embedding of the object. Can be used when a collection does not have a vectorization module or the given + vector was generated using the _identical_ vectorization module that is configured for the class. In this + case this vector takes precedence. Supported types are: + - for single vectors: `list`, 'numpy.ndarray`, `torch.Tensor` and `tf.Tensor`, by default None. + - for named vectors: Dict[str, *list above*], where the string is the name of the vector. + + Returns: + The UUID of the added object. If one was not provided a UUIDv4 will be auto-generated for you and returned here. + + Raises: + WeaviateBatchValidationError: If the provided options are in the format required by Weaviate. + """ + ... + + async def add_reference( + self, from_uuid: UUID, from_property: str, to: Union[ReferenceInput, List[UUID]] + ) -> None: + """Add a reference to this batch. + + Args: + from_uuid: The UUID of the object, as an uuid.UUID object or str, that should reference another object. + from_property: The name of the property that contains the reference. + to: The UUID of the referenced object, as an uuid.UUID object or str, that is actually referenced. + For multi-target references use wvc.Reference.to_multi_target(). + + Raises: + WeaviateBatchValidationError: If the provided options are in the format required by Weaviate. + """ + ... + + @property + def number_errors(self) -> int: + """Get the number of errors in the current batch. + + Returns: + The number of errors in the current batch. + """ + ... + + +T = TypeVar("T", bound=Union[_BatchBase, _BatchBaseSync]) P = TypeVar("P", bound=Union[BatchClientProtocol, BatchCollectionProtocol[Properties]]) +Q = TypeVar("Q", bound=Union[BatchClientProtocolAsync, BatchCollectionProtocolAsync[Properties]]) -class _ContextManagerWrapper(Generic[T, P]): +class _ContextManagerSync(Generic[T, P]): def __init__(self, current_batch: T): self.__current_batch: T = current_batch def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.__current_batch._shutdown() + self.__current_batch._wait() def __enter__(self) -> P: self.__current_batch._start() return self.__current_batch # pyright: ignore[reportReturnType] + + +class _ContextManagerAsync(Generic[Q]): + def __init__(self, current_batch: _BatchBaseAsync): + self.__current_batch = current_batch + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self.__current_batch._shutdown() + await self.__current_batch._wait() + + async def __aenter__(self) -> Q: + await self.__current_batch._start() + return self.__current_batch # pyright: ignore[reportReturnType] diff --git a/weaviate/collections/batch/client.py b/weaviate/collections/batch/client.py index a86a3be10..d28834c66 100644 --- a/weaviate/collections/batch/client.py +++ b/weaviate/collections/batch/client.py @@ -1,9 +1,12 @@ from concurrent.futures import ThreadPoolExecutor from typing import TYPE_CHECKING, Optional, Type, Union +from deprecation import deprecated as docstring_deprecated +from typing_extensions import deprecated as typing_deprecated + +from weaviate.collections.batch.async_ import _BatchBaseAsync from weaviate.collections.batch.base import ( _BatchBase, - _BatchBaseNew, _BatchDataWrapper, _DynamicBatching, _FixedSizeBatching, @@ -12,15 +15,19 @@ ) from weaviate.collections.batch.batch_wrapper import ( BatchClientProtocol, + BatchClientProtocolAsync, _BatchMode, _BatchWrapper, - _ContextManagerWrapper, + _BatchWrapperAsync, + _ContextManagerAsync, + _ContextManagerSync, ) +from weaviate.collections.batch.sync import _BatchBaseSync from weaviate.collections.classes.config import ConsistencyLevel, Vectorizers from weaviate.collections.classes.internal import ReferenceInput, ReferenceInputs from weaviate.collections.classes.tenants import Tenant from weaviate.collections.classes.types import WeaviateProperties -from weaviate.connect.v4 import ConnectionSync +from weaviate.connect.v4 import ConnectionAsync, ConnectionSync from weaviate.exceptions import UnexpectedStatusCodeError, WeaviateUnsupportedFeatureError from weaviate.types import UUID, VECTORS @@ -38,31 +45,6 @@ def add_object( vector: Optional[VECTORS] = None, tenant: Optional[Union[str, Tenant]] = None, ) -> UUID: - """Add one object to this batch. - - NOTE: If the UUID of one of the objects already exists then the existing object will be - replaced by the new object. - - Args: - collection: The name of the collection this object belongs to. - properties: The data properties of the object to be added as a dictionary. - references: The references of the object to be added as a dictionary. - uuid: The UUID of the object as an uuid.UUID object or str. It can be a Weaviate beacon or Weaviate href. - If it is None an UUIDv4 will generated, by default None - vector: The embedding of the object. Can be used when a collection does not have a vectorization module or the given - vector was generated using the _identical_ vectorization module that is configured for the class. In this - case this vector takes precedence. - Supported types are: - - for single vectors: `list`, 'numpy.ndarray`, `torch.Tensor` and `tf.Tensor`, by default None. - - for named vectors: Dict[str, *list above*], where the string is the name of the vector. - tenant: The tenant name or Tenant object to be used for this request. - - Returns: - The UUID of the added object. If one was not provided a UUIDv4 will be auto-generated for you and returned here. - - Raises: - WeaviateBatchValidationError: If the provided options are in the format required by Weaviate. - """ return super()._add_object( collection=collection, properties=properties, @@ -80,19 +62,6 @@ def add_reference( to: ReferenceInput, tenant: Optional[Union[str, Tenant]] = None, ) -> None: - """Add one reference to this batch. - - Args: - from_uuid: The UUID of the object, as an uuid.UUID object or str, that should reference another object. - from_collection: The name of the collection that should reference another object. - from_property: The name of the property that contains the reference. - to: The UUID of the referenced object, as an uuid.UUID object or str, that is actually referenced. - For multi-target references use wvc.Reference.to_multi_target(). - tenant: The tenant name or Tenant object to be used for this request. - - Raises: - WeaviateBatchValidationError: If the provided options are in the format required by Weaviate. - """ super()._add_reference( from_object_uuid=from_uuid, from_object_collection=from_collection, @@ -102,7 +71,7 @@ def add_reference( ) -class _BatchClientNew(_BatchBaseNew): +class _BatchClientSync(_BatchBaseSync): def add_object( self, collection: str, @@ -112,31 +81,6 @@ def add_object( vector: Optional[VECTORS] = None, tenant: Optional[Union[str, Tenant]] = None, ) -> UUID: - """Add one object to this batch. - - NOTE: If the UUID of one of the objects already exists then the existing object will be - replaced by the new object. - - Args: - collection: The name of the collection this object belongs to. - properties: The data properties of the object to be added as a dictionary. - references: The references of the object to be added as a dictionary. - uuid: The UUID of the object as an uuid.UUID object or str. It can be a Weaviate beacon or Weaviate href. - If it is None an UUIDv4 will generated, by default None - vector: The embedding of the object. Can be used when a collection does not have a vectorization module or the given - vector was generated using the _identical_ vectorization module that is configured for the class. In this - case this vector takes precedence. - Supported types are: - - for single vectors: `list`, 'numpy.ndarray`, `torch.Tensor` and `tf.Tensor`, by default None. - - for named vectors: Dict[str, *list above*], where the string is the name of the vector. - tenant: The tenant name or Tenant object to be used for this request. - - Returns: - The UUID of the added object. If one was not provided a UUIDv4 will be auto-generated for you and returned here. - - Raises: - WeaviateBatchValidationError: If the provided options are in the format required by Weaviate. - """ return super()._add_object( collection=collection, properties=properties, @@ -154,19 +98,6 @@ def add_reference( to: ReferenceInput, tenant: Optional[Union[str, Tenant]] = None, ) -> None: - """Add one reference to this batch. - - Args: - from_uuid: The UUID of the object, as an uuid.UUID object or str, that should reference another object. - from_collection: The name of the collection that should reference another object. - from_property: The name of the property that contains the reference. - to: The UUID of the referenced object, as an uuid.UUID object or str, that is actually referenced. - For multi-target references use wvc.Reference.to_multi_target(). - tenant: The tenant name or Tenant object to be used for this request. - - Raises: - WeaviateBatchValidationError: If the provided options are in the format required by Weaviate. - """ super()._add_reference( from_object_uuid=from_uuid, from_object_collection=from_collection, @@ -176,11 +107,49 @@ def add_reference( ) +class _BatchClientAsync(_BatchBaseAsync): + async def add_object( + self, + collection: str, + properties: Optional[WeaviateProperties] = None, + references: Optional[ReferenceInputs] = None, + uuid: Optional[UUID] = None, + vector: Optional[VECTORS] = None, + tenant: Optional[Union[str, Tenant]] = None, + ) -> UUID: + return await super()._add_object( + collection=collection, + properties=properties, + references=references, + uuid=uuid, + vector=vector, + tenant=tenant.name if isinstance(tenant, Tenant) else tenant, + ) + + async def add_reference( + self, + from_uuid: UUID, + from_collection: str, + from_property: str, + to: ReferenceInput, + tenant: Optional[Union[str, Tenant]] = None, + ) -> None: + await super()._add_reference( + from_object_uuid=from_uuid, + from_object_collection=from_collection, + from_property_name=from_property, + to=to, + tenant=tenant.name if isinstance(tenant, Tenant) else tenant, + ) + + BatchClient = _BatchClient -BatchClientNew = _BatchClientNew -ClientBatchingContextManager = _ContextManagerWrapper[ - Union[BatchClient, BatchClientNew], BatchClientProtocol +BatchClientSync = _BatchClientSync +BatchClientAsync = _BatchClientAsync +ClientBatchingContextManager = _ContextManagerSync[ + Union[BatchClient, BatchClientSync], BatchClientProtocol ] +ClientBatchingContextManagerAsync = _ContextManagerAsync[BatchClientProtocolAsync] class _BatchClientWrapper(_BatchWrapper): @@ -197,7 +166,7 @@ def __init__( # define one executor per client with it shared between all child batch contexts def __create_batch_and_reset( - self, batch_client: Union[Type[_BatchClient], Type[_BatchClientNew]] + self, batch_client: Union[Type[_BatchClient], Type[_BatchClientSync]] ): if self._vectorizer_batching is None or not self._vectorizer_batching: try: @@ -227,7 +196,7 @@ def __create_batch_and_reset( self._batch_data = _BatchDataWrapper() # clear old data - return _ContextManagerWrapper( + return _ContextManagerSync( batch_client( connection=self._connection, consistency_level=self._consistency_level, @@ -290,19 +259,95 @@ def rate_limit( self._consistency_level = consistency_level return self.__create_batch_and_reset(_BatchClient) + @docstring_deprecated( + details="Use the 'stream' method instead. This method will be removed in 4.21.0", + deprecated_in="4.20.0", + ) + @typing_deprecated("Use the 'stream' method instead. This method will be removed in 4.21.0") def experimental( self, *, concurrency: Optional[int] = None, consistency_level: Optional[ConsistencyLevel] = None, ) -> ClientBatchingContextManager: - """Configure the batching context manager using the experimental server-side batching mode. + return self.stream(concurrency=concurrency, consistency_level=consistency_level) + + def stream( + self, + *, + concurrency: Optional[int] = None, + consistency_level: Optional[ConsistencyLevel] = None, + ) -> ClientBatchingContextManager: + """Configure the batching context manager to use batch streaming. + + When you exit the context manager, the final batch will be sent automatically. + + Args: + concurrency: The number of concurrent streams to use when sending batches. If not provided, the default will be one. + consistency_level: The consistency level to be used when inserting data. If not provided, the default value is `None`. + """ + if self._connection._weaviate_version.is_lower_than(1, 36, 0): + raise WeaviateUnsupportedFeatureError( + "Server-side batching", str(self._connection._weaviate_version), "1.36.0" + ) + self._batch_mode = _ServerSideBatching( + # concurrency=concurrency + # if concurrency is not None + # else len(self._cluster.get_nodes_status()) + concurrency=1, # hard-code until client-side multi-threading is fixed + ) + self._consistency_level = consistency_level + return self.__create_batch_and_reset(_BatchClientSync) + + +class _BatchClientWrapperAsync(_BatchWrapperAsync): + def __init__( + self, + connection: ConnectionAsync, + ): + super().__init__(connection, None) + self._vectorizer_batching: Optional[bool] = None + + def __create_batch_and_reset(self): + self._batch_data = _BatchDataWrapper() # clear old data + return _ContextManagerAsync( + BatchClientAsync( + connection=self._connection, + consistency_level=self._consistency_level, + results=self._batch_data, + ) + ) + + @docstring_deprecated( + details="Use the 'stream' method instead. This method will be removed in 4.21.0", + deprecated_in="4.20.0", + ) + @typing_deprecated("Use the 'stream' method instead. This method will be removed in 4.21.0") + def experimental( + self, + *, + concurrency: Optional[int] = None, + consistency_level: Optional[ConsistencyLevel] = None, + ) -> ClientBatchingContextManagerAsync: + return self.stream(concurrency=concurrency, consistency_level=consistency_level) + + def stream( + self, + *, + concurrency: Optional[int] = None, + consistency_level: Optional[ConsistencyLevel] = None, + ) -> ClientBatchingContextManagerAsync: + """Configure the batching context manager to use batch streaming. When you exit the context manager, the final batch will be sent automatically. + + Args: + concurrency: The number of concurrent streams to use when sending batches. If not provided, the default will be one. + consistency_level: The consistency level to be used when inserting data. If not provided, the default value is `None`. """ - if self._connection._weaviate_version.is_lower_than(1, 34, 0): + if self._connection._weaviate_version.is_lower_than(1, 36, 0): raise WeaviateUnsupportedFeatureError( - "Server-side batching", str(self._connection._weaviate_version), "1.34.0" + "Server-side batching", str(self._connection._weaviate_version), "1.36.0" ) self._batch_mode = _ServerSideBatching( # concurrency=concurrency @@ -311,4 +356,4 @@ def experimental( concurrency=1, # hard-code until client-side multi-threading is fixed ) self._consistency_level = consistency_level - return self.__create_batch_and_reset(_BatchClientNew) + return self.__create_batch_and_reset() diff --git a/weaviate/collections/batch/collection.py b/weaviate/collections/batch/collection.py index 6abe4aaac..7889db335 100644 --- a/weaviate/collections/batch/collection.py +++ b/weaviate/collections/batch/collection.py @@ -1,9 +1,12 @@ from concurrent.futures import ThreadPoolExecutor from typing import TYPE_CHECKING, Generic, List, Optional, Type, Union +from deprecation import deprecated as docstring_deprecated +from typing_extensions import deprecated as typing_deprecated + +from weaviate.collections.batch.async_ import _BatchBaseAsync from weaviate.collections.batch.base import ( _BatchBase, - _BatchBaseNew, _BatchDataWrapper, _BatchMode, _DynamicBatching, @@ -13,13 +16,17 @@ ) from weaviate.collections.batch.batch_wrapper import ( BatchCollectionProtocol, + BatchCollectionProtocolAsync, _BatchWrapper, - _ContextManagerWrapper, + _BatchWrapperAsync, + _ContextManagerAsync, + _ContextManagerSync, ) +from weaviate.collections.batch.sync import _BatchBaseSync from weaviate.collections.classes.config import ConsistencyLevel, Vectorizers from weaviate.collections.classes.internal import ReferenceInput, ReferenceInputs from weaviate.collections.classes.types import Properties -from weaviate.connect.v4 import ConnectionSync +from weaviate.connect.v4 import ConnectionAsync, ConnectionSync from weaviate.exceptions import UnexpectedStatusCodeError, WeaviateUnsupportedFeatureError from weaviate.types import UUID, VECTORS @@ -78,17 +85,17 @@ def add_reference( ) -class _BatchCollectionNew(Generic[Properties], _BatchBaseNew): +class _BatchCollectionSync(Generic[Properties], _BatchBaseSync): def __init__( self, - executor: ThreadPoolExecutor, connection: ConnectionSync, consistency_level: Optional[ConsistencyLevel], results: _BatchDataWrapper, - batch_mode: _BatchMode, name: str, tenant: Optional[str], - vectorizer_batching: bool, + executor: Optional[ThreadPoolExecutor] = None, + batch_mode: Optional[_BatchMode] = None, + vectorizer_batching: bool = False, ) -> None: super().__init__( connection=connection, @@ -108,26 +115,6 @@ def add_object( uuid: Optional[UUID] = None, vector: Optional[VECTORS] = None, ) -> UUID: - """Add one object to this batch. - - NOTE: If the UUID of one of the objects already exists then the existing object will be replaced by the new object. - - Args: - properties: The data properties of the object to be added as a dictionary. - references: The references of the object to be added as a dictionary. - uuid: The UUID of the object as an uuid.UUID object or str. If it is None an UUIDv4 will generated, by default None - vector: The embedding of the object. Can be used when a collection does not have a vectorization module or the given - vector was generated using the _identical_ vectorization module that is configured for the class. In this - case this vector takes precedence. Supported types are: - - for single vectors: `list`, 'numpy.ndarray`, `torch.Tensor` and `tf.Tensor`, by default None. - - for named vectors: Dict[str, *list above*], where the string is the name of the vector. - - Returns: - The UUID of the added object. If one was not provided a UUIDv4 will be auto-generated for you and returned here. - - Raises: - WeaviateBatchValidationError: If the provided options are in the format required by Weaviate. - """ return self._add_object( collection=self.__name, properties=properties, @@ -140,18 +127,52 @@ def add_object( def add_reference( self, from_uuid: UUID, from_property: str, to: Union[ReferenceInput, List[UUID]] ) -> None: - """Add a reference to this batch. + self._add_reference( + from_uuid, + self.__name, + from_property, + to, + self.__tenant, + ) - Args: - from_uuid: The UUID of the object, as an uuid.UUID object or str, that should reference another object. - from_property: The name of the property that contains the reference. - to: The UUID of the referenced object, as an uuid.UUID object or str, that is actually referenced. - For multi-target references use wvc.Reference.to_multi_target(). - Raises: - WeaviateBatchValidationError: If the provided options are in the format required by Weaviate. - """ - self._add_reference( +class _BatchCollectionAsync(Generic[Properties], _BatchBaseAsync): + def __init__( + self, + connection: ConnectionAsync, + consistency_level: Optional[ConsistencyLevel], + results: _BatchDataWrapper, + name: str, + tenant: Optional[str], + ) -> None: + super().__init__( + connection=connection, + consistency_level=consistency_level, + results=results, + ) + self.__name = name + self.__tenant = tenant + + async def add_object( + self, + properties: Optional[Properties] = None, + references: Optional[ReferenceInputs] = None, + uuid: Optional[UUID] = None, + vector: Optional[VECTORS] = None, + ) -> UUID: + return await self._add_object( + collection=self.__name, + properties=properties, + references=references, + uuid=uuid, + vector=vector, + tenant=self.__tenant, + ) + + async def add_reference( + self, from_uuid: UUID, from_property: str, to: Union[ReferenceInput, List[UUID]] + ) -> None: + await self._add_reference( from_uuid, self.__name, from_property, @@ -161,11 +182,15 @@ def add_reference( BatchCollection = _BatchCollection -BatchCollectionNew = _BatchCollectionNew -CollectionBatchingContextManager = _ContextManagerWrapper[ - Union[BatchCollection[Properties], BatchCollectionNew[Properties]], +BatchCollectionSync = _BatchCollectionSync +BatchCollectionAsync = _BatchCollectionAsync +CollectionBatchingContextManager = _ContextManagerSync[ + Union[BatchCollection[Properties], BatchCollectionSync[Properties]], BatchCollectionProtocol[Properties], ] +CollectionBatchingContextManagerAsync = _ContextManagerAsync[ + BatchCollectionProtocolAsync[Properties] +] class _BatchCollectionWrapper(Generic[Properties], _BatchWrapper): @@ -177,7 +202,7 @@ def __init__( tenant: Optional[str], config: "_ConfigCollection", batch_client: Union[ - Type[_BatchCollection[Properties]], Type[_BatchCollectionNew[Properties]] + Type[_BatchCollection[Properties]], Type[_BatchCollectionSync[Properties]] ], ) -> None: super().__init__(connection, consistency_level) @@ -192,7 +217,7 @@ def __init__( def __create_batch_and_reset( self, batch_client: Union[ - Type[_BatchCollection[Properties]], Type[_BatchCollectionNew[Properties]] + Type[_BatchCollection[Properties]], Type[_BatchCollectionSync[Properties]] ], ): if self._vectorizer_batching is None: @@ -214,7 +239,7 @@ def __create_batch_and_reset( self._vectorizer_batching = False self._batch_data = _BatchDataWrapper() # clear old data - return _ContextManagerWrapper( + return _ContextManagerSync( batch_client( connection=self._connection, consistency_level=self._consistency_level, @@ -261,21 +286,99 @@ def rate_limit(self, requests_per_minute: int) -> CollectionBatchingContextManag self._batch_mode = _RateLimitedBatching(requests_per_minute) return self.__create_batch_and_reset(_BatchCollection) + @docstring_deprecated( + details="Use the 'stream' method instead. This method will be removed in 4.21.0", + deprecated_in="4.20.0", + ) + @typing_deprecated("Use the 'stream' method instead. This method will be removed in 4.21.0") def experimental( self, + *, + concurrency: Optional[int] = None, + ) -> CollectionBatchingContextManager[Properties]: + return self.stream(concurrency=concurrency) + + def stream( + self, + *, + concurrency: Optional[int] = None, ) -> CollectionBatchingContextManager[Properties]: - """Configure the batching context manager using the experimental server-side batching mode. + """Configure the batching context manager to use batch streaming. + + When you exit the context manager, the final batch will be sent automatically. + + Args: + concurrency: The number of concurrent requests when sending batches. This controls the number of concurrent requests + made to Weaviate. If not provided, the default value is 1. + """ + if self._connection._weaviate_version.is_lower_than(1, 36, 0): + raise WeaviateUnsupportedFeatureError( + "Server-side batching", str(self._connection._weaviate_version), "1.36.0" + ) + self._batch_mode = _ServerSideBatching( + # concurrency=concurrency + # if concurrency is not None + # else len(self._cluster.get_nodes_status()) + concurrency=concurrency or 1, + ) + return self.__create_batch_and_reset(_BatchCollectionSync) + + +class _BatchCollectionWrapperAsync(Generic[Properties], _BatchWrapperAsync): + def __init__( + self, + connection: ConnectionAsync, + consistency_level: Optional[ConsistencyLevel], + name: str, + tenant: Optional[str], + ) -> None: + super().__init__(connection, consistency_level) + self.__name = name + self.__tenant = tenant + + def __create_batch_and_reset(self): + self._batch_data = _BatchDataWrapper() # clear old data + return _ContextManagerAsync( + BatchCollectionAsync( + connection=self._connection, + consistency_level=self._consistency_level, + results=self._batch_data, + name=self.__name, + tenant=self.__tenant, + ) + ) + + @docstring_deprecated( + details="Use the 'stream' method instead. This method will be removed in 4.21.0", + deprecated_in="4.20.0", + ) + @typing_deprecated("Use the 'stream' method instead. This method will be removed in 4.21.0") + def experimental( + self, + ) -> CollectionBatchingContextManagerAsync[Properties]: + return self.stream() + + def stream( + self, + *, + concurrency: Optional[int] = None, + ) -> CollectionBatchingContextManagerAsync[Properties]: + """Configure the batching context manager to use batch streaming. When you exit the context manager, the final batch will be sent automatically. + + Args: + concurrency: The number of concurrent requests when sending batches. This controls the number of concurrent requests + made to Weaviate. If not provided, the default value is 1. """ - if self._connection._weaviate_version.is_lower_than(1, 34, 0): + if self._connection._weaviate_version.is_lower_than(1, 36, 0): raise WeaviateUnsupportedFeatureError( - "Server-side batching", str(self._connection._weaviate_version), "1.34.0" + "Server-side batching", str(self._connection._weaviate_version), "1.36.0" ) self._batch_mode = _ServerSideBatching( # concurrency=concurrency # if concurrency is not None # else len(self._cluster.get_nodes_status()) - concurrency=1, # hard-code until client-side multi-threading is fixed + concurrency=concurrency or 1, ) - return self.__create_batch_and_reset(_BatchCollectionNew) + return self.__create_batch_and_reset() diff --git a/weaviate/collections/batch/grpc_batch.py b/weaviate/collections/batch/grpc_batch.py index 7384dcb49..666943bef 100644 --- a/weaviate/collections/batch/grpc_batch.py +++ b/weaviate/collections/batch/grpc_batch.py @@ -2,7 +2,18 @@ import struct import time import uuid as uuid_package -from typing import Any, Dict, Generator, List, Mapping, Optional, Sequence, Union, cast +from typing import ( + Any, + AsyncGenerator, + Dict, + Generator, + List, + Mapping, + Optional, + Sequence, + Union, + cast, +) from google.protobuf.struct_pb2 import Struct @@ -20,7 +31,7 @@ from weaviate.collections.grpc.shared import _BaseGRPC, _is_1d_vector, _Pack from weaviate.connect import executor from weaviate.connect.base import MAX_GRPC_MESSAGE_LENGTH -from weaviate.connect.v4 import Connection, ConnectionSync +from weaviate.connect.v4 import Connection, ConnectionAsync, ConnectionSync from weaviate.exceptions import ( WeaviateInsertInvalidPropertyError, WeaviateInsertManyAllFailedError, @@ -203,8 +214,8 @@ def stream( connection: ConnectionSync, *, requests: Generator[batch_pb2.BatchStreamRequest, None, None], - ) -> Generator[batch_pb2.BatchStreamReply, None, None]: - """Start a new stream for receiving messages about the ongoing server-side batching from Weaviate. + ): + """Start a new sync stream for send/recv messages about the ongoing server-side batching from Weaviate. Args: connection: The connection to the Weaviate instance. @@ -212,6 +223,20 @@ def stream( """ return connection.grpc_batch_stream(requests=requests) + def astream( + self, + connection: ConnectionAsync, + *, + requests: AsyncGenerator[batch_pb2.BatchStreamRequest, None], + ): + """Start a new async stream for send/recv messages about the ongoing server-side batching from Weaviate. + + Args: + connection: The connection to the Weaviate instance. + requests: An async generator that yields `BatchStreamRequest` messages to be sent to the server. + """ + return connection.grpc_batch_stream(requests=requests) + def __translate_properties_from_python_to_grpc( self, data: Dict[str, Any], refs: ReferenceInputs ) -> batch_pb2.BatchObject.Properties: diff --git a/weaviate/collections/batch/sync.py b/weaviate/collections/batch/sync.py new file mode 100644 index 000000000..44a4fa1ba --- /dev/null +++ b/weaviate/collections/batch/sync.py @@ -0,0 +1,620 @@ +import threading +import time +import uuid as uuid_package +from concurrent.futures import ThreadPoolExecutor +from queue import Empty, Full, Queue +from typing import Generator, List, Optional, Set, Union + +from pydantic import ValidationError + +from weaviate.collections.batch.base import ( + GCP_STREAM_TIMEOUT, + ObjectsBatchRequest, + ReferencesBatchRequest, + _BatchDataWrapper, + _BatchMode, + _BgThreads, + _ClusterBatch, +) +from weaviate.collections.batch.grpc_batch import _BatchGRPC +from weaviate.collections.classes.batch import ( + BatchObject, + BatchObjectReturn, + BatchReference, + BatchReferenceReturn, + ErrorObject, + ErrorReference, + Shard, +) +from weaviate.collections.classes.config import ConsistencyLevel +from weaviate.collections.classes.internal import ( + ReferenceInput, + ReferenceInputs, + ReferenceToMulti, +) +from weaviate.collections.classes.types import WeaviateProperties +from weaviate.connect.executor import result +from weaviate.connect.v4 import ConnectionSync +from weaviate.exceptions import ( + WeaviateBatchFailedToReestablishStreamError, + WeaviateBatchStreamError, + WeaviateBatchValidationError, + WeaviateGRPCUnavailableError, + WeaviateStartUpError, +) +from weaviate.logger import logger +from weaviate.proto.v1 import batch_pb2 +from weaviate.types import UUID, VECTORS + + +class _BatchBaseSync: + def __init__( + self, + connection: ConnectionSync, + consistency_level: Optional[ConsistencyLevel], + results: _BatchDataWrapper, + batch_mode: Optional[_BatchMode] = None, + executor: Optional[ThreadPoolExecutor] = None, + vectorizer_batching: bool = False, + objects: Optional[ObjectsBatchRequest[BatchObject]] = None, + references: Optional[ReferencesBatchRequest[BatchReference]] = None, + ) -> None: + self.__batch_objects = objects or ObjectsBatchRequest[BatchObject]() + self.__batch_references = references or ReferencesBatchRequest[BatchReference]() + + self.__connection = connection + self.__is_gcp_on_wcd = connection._connection_params.is_gcp_on_wcd() + self.__stream_start: Optional[float] = None + self.__is_renewing_stream = threading.Event() + self.__consistency_level: ConsistencyLevel = consistency_level or ConsistencyLevel.QUORUM + self.__batch_size = 100 + + self.__batch_grpc = _BatchGRPC( + connection._weaviate_version, self.__consistency_level, connection._grpc_max_msg_size + ) + self.__cluster = _ClusterBatch(self.__connection) + self.__number_of_nodes = self.__cluster.get_number_of_nodes() + + # lookup table for objects that are currently being processed - is used to not send references from objects that have not been added yet + self.__uuid_lookup: Set[str] = set() + + # we do not want that users can access the results directly as they are not thread-safe + self.__results_for_wrapper_backup = results + self.__results_for_wrapper = _BatchDataWrapper() + + self.__objs_count = 0 + self.__refs_count = 0 + + self.__uuid_lookup_lock = threading.Lock() + self.__results_lock = threading.Lock() + + self.__bg_exception: Optional[Exception] = None + self.__is_oom = threading.Event() + self.__is_shutting_down = threading.Event() + self.__is_shutdown = threading.Event() + + self.__objs_cache_lock = threading.Lock() + self.__refs_cache_lock = threading.Lock() + self.__objs_cache: dict[str, BatchObject] = {} + self.__refs_cache: dict[str, BatchReference] = {} + + self.__acks_lock = threading.Lock() + self.__inflight_objs: set[str] = set() + self.__inflight_refs: set[str] = set() + + # maxsize=1 so that __loop does not run faster than generator for __recv + # thereby using too much buffer in case of server-side shutdown + self.__reqs: Queue[Optional[batch_pb2.BatchStreamRequest]] = Queue(maxsize=1) + self.__stop = False + + @property + def number_errors(self) -> int: + """Return the number of errors in the batch.""" + return len(self.__results_for_wrapper.failed_objects) + len( + self.__results_for_wrapper.failed_references + ) + + def __all_threads_alive(self) -> bool: + return self.__bg_threads is not None and all( + thread.is_alive() for thread in self.__bg_threads + ) + + def _start(self) -> None: + self.__bg_threads = [self.__start_bg_threads() for _ in range(1)] + logger.info( + f"Provisioned {len(self.__bg_threads)} stream(s) to the server for batch processing" + ) + now = time.time() + while not self.__all_threads_alive(): + # wait for the recv threads to be started + time.sleep(0.01) + if time.time() - now > 60: + raise WeaviateBatchStreamError( + "Batch stream was not started within 60 seconds. Please check your connection." + ) + + def _wait(self) -> None: + for bg_thread in self.__bg_threads: + bg_thread.join() + + # copy the results to the public results + self.__results_for_wrapper_backup.results = self.__results_for_wrapper.results + self.__results_for_wrapper_backup.failed_objects = self.__results_for_wrapper.failed_objects + self.__results_for_wrapper_backup.failed_references = ( + self.__results_for_wrapper.failed_references + ) + self.__results_for_wrapper_backup.imported_shards = ( + self.__results_for_wrapper.imported_shards + ) + + def _shutdown(self) -> None: + # Shutdown the current batch and wait for all requests to be finished + self.__stop = True + + def __loop(self) -> None: + refresh_time: float = 0.01 + while self.__bg_exception is None: + if len(self.__batch_objects) + len(self.__batch_references) > 0: + start = time.time() + while (len_o := len(self.__batch_objects)) + ( + len_r := len(self.__batch_references) + ) < self.__batch_size: + # wait for more objects to be added up to the batch size + time.sleep(refresh_time) + if time.time() - start >= 1 and ( + len_o == len(self.__batch_objects) or len_r == len(self.__batch_references) + ): + # no new objects were added in the last second, exit the loop + break + + objs = self.__batch_objects.pop_items(self.__batch_size) + refs = self.__batch_references.pop_items( + self.__batch_size - len(objs), + uuid_lookup=self.__uuid_lookup, + ) + with self.__uuid_lookup_lock: + self.__uuid_lookup.difference_update(obj.uuid for obj in objs) + + for req in self.__generate_stream_requests(objs, refs): + start, paused = time.time(), False + while ( + self.__is_shutting_down.is_set() + or self.__is_shutdown.is_set() + or self.__is_oom.is_set() + ): + if not paused: + logger.info("Server is shutting down, pausing batching loop...") + self.__reqs.put(None) + paused = True + time.sleep(1) + if time.time() - start > 300: + raise WeaviateBatchFailedToReestablishStreamError( + "Batch stream was not re-established within 5 minutes. Terminating batch." + ) + try: + self.__reqs.put(req, timeout=60) + except Full as e: + logger.warning( + "Batch queue is blocked for more than 60 seconds. Exiting the loop" + ) + self.__bg_exception = e + return + elif self.__stop: + # we are done, send the sentinel into our queue to be consumed by the batch sender + self.__reqs.put(None) # signal the end of the stream + logger.info("Batching finished, sent stop signal to batch stream") + return + time.sleep(refresh_time) + + def __generate_stream_requests( + self, + objects: List[BatchObject], + references: List[BatchReference], + ) -> Generator[batch_pb2.BatchStreamRequest, None, None]: + per_object_overhead = 4 # extra overhead bytes per object in the request + + def request_maker(): + return batch_pb2.BatchStreamRequest() + + request = request_maker() + total_size = request.ByteSize() + + inflight_objs = set() + inflight_refs = set() + for object_ in objects: + obj = self.__batch_grpc.grpc_object(object_._to_internal()) + obj_size = obj.ByteSize() + per_object_overhead + + if total_size + obj_size >= self.__batch_grpc.grpc_max_msg_size: + yield request + request = request_maker() + total_size = request.ByteSize() + + request.data.objects.values.append(obj) + total_size += obj_size + inflight_objs.add(obj.uuid) + + for reference in references: + ref = self.__batch_grpc.grpc_reference(reference._to_internal()) + ref_size = ref.ByteSize() + per_object_overhead + + if total_size + ref_size >= self.__batch_grpc.grpc_max_msg_size: + yield request + request = request_maker() + total_size = request.ByteSize() + + request.data.references.values.append(ref) + total_size += ref_size + inflight_refs.add(reference._to_beacon()) + + with self.__acks_lock: + self.__inflight_objs.update(inflight_objs) + self.__inflight_refs.update(inflight_refs) + + if len(request.data.objects.values) > 0 or len(request.data.references.values) > 0: + yield request + + def __send( + self, + ) -> Generator[batch_pb2.BatchStreamRequest, None, None]: + yield batch_pb2.BatchStreamRequest( + start=batch_pb2.BatchStreamRequest.Start( + consistency_level=self.__batch_grpc._consistency_level, + ), + ) + stream_start = time.time() + while self.__bg_exception is None: + if self.__is_gcp_on_wcd: + assert stream_start is not None + if time.time() - stream_start > GCP_STREAM_TIMEOUT: + logger.info( + "GCP connections have a maximum lifetime. Re-establishing the batch stream to avoid timeout errors." + ) + self.__is_renewing_stream.set() + yield batch_pb2.BatchStreamRequest(stop=batch_pb2.BatchStreamRequest.Stop()) + return + try: + req = self.__reqs.get(timeout=1) + except Empty: + continue + if req is not None: + yield req + continue + if self.__stop and not ( + self.__is_shutting_down.is_set() or self.__is_shutdown.is_set() + ): + logger.info("Batching finished, closing the client-side of the stream") + yield batch_pb2.BatchStreamRequest(stop=batch_pb2.BatchStreamRequest.Stop()) + return + if self.__is_shutting_down.is_set(): + logger.info("Server shutting down, closing the client-side of the stream") + return + if self.__is_oom.is_set(): + logger.info("Server out-of-memory, closing the client-side of the stream") + return + logger.info("Received sentinel, but not stopping, continuing...") + logger.info("Batch send thread exiting due to exception...") + + def __recv(self) -> None: + stream = self.__batch_grpc.stream( + connection=self.__connection, + requests=self.__send(), + ) + self.__is_shutdown.clear() + for message in stream: + if message.HasField("started"): + logger.info("Batch stream started successfully") + + if message.HasField("backoff"): + if ( + message.backoff.batch_size != self.__batch_size + and not self.__is_shutting_down.is_set() + and not self.__is_shutdown.is_set() + and not self.__is_oom.is_set() + and not self.__stop + ): + self.__batch_size = message.backoff.batch_size + logger.info(f"Updated batch size to {self.__batch_size} as per server request") + + if message.HasField("acks"): + with self.__acks_lock: + self.__inflight_objs.difference_update(message.acks.uuids) + self.__uuid_lookup.difference_update(message.acks.uuids) + self.__inflight_refs.difference_update(message.acks.beacons) + + if message.HasField("results"): + result_objs = BatchObjectReturn() + result_refs = BatchReferenceReturn() + failed_objs: List[ErrorObject] = [] + failed_refs: List[ErrorReference] = [] + for error in message.results.errors: + if error.HasField("uuid"): + try: + with self.__objs_cache_lock: + cached = self.__objs_cache.pop(error.uuid) + except KeyError: + continue + err = ErrorObject( + message=error.error, + object_=cached, + ) + result_objs += BatchObjectReturn( + _all_responses=[err], + errors={cached.index: err}, + ) + failed_objs.append(err) + logger.warning( + { + "error": error.error, + "object": error.uuid, + "action": "use {client,collection}.batch.failed_objects to access this error", + } + ) + if error.HasField("beacon"): + try: + with self.__refs_cache_lock: + cached = self.__refs_cache.pop(error.beacon) + except KeyError: + continue + err = ErrorReference( + message=error.error, + reference=cached, + ) + failed_refs.append(err) + result_refs += BatchReferenceReturn( + errors={cached.index: err}, + ) + logger.warning( + { + "error": error.error, + "reference": error.beacon, + "action": "use {client,collection}.batch.failed_references to access this error", + } + ) + for success in message.results.successes: + if success.HasField("uuid"): + try: + with self.__objs_cache_lock: + cached = self.__objs_cache.pop(success.uuid) + except KeyError: + continue + uuid = uuid_package.UUID(success.uuid) + result_objs += BatchObjectReturn( + _all_responses=[uuid], + uuids={cached.index: uuid}, + ) + if success.HasField("beacon"): + try: + with self.__refs_cache_lock: + self.__refs_cache.pop(success.beacon, None) + except KeyError: + continue + with self.__results_lock: + self.__results_for_wrapper.results.objs += result_objs + self.__results_for_wrapper.results.refs += result_refs + self.__results_for_wrapper.failed_objects.extend(failed_objs) + self.__results_for_wrapper.failed_references.extend(failed_refs) + + if message.HasField("out_of_memory"): + logger.info( + "Server reported out-of-memory. Batching will wait at most 10 minutes for the server to scale-up. If the server does not recover within this time, the batch will terminate with an error." + ) + self.__is_oom.set() + with self.__objs_cache_lock: + self.__batch_objects.prepend( + [self.__objs_cache[uuid] for uuid in message.out_of_memory.uuids] + ) + with self.__refs_cache_lock: + self.__batch_references.prepend( + [self.__refs_cache[beacon] for beacon in message.out_of_memory.beacons] + ) + + if message.HasField("shutting_down"): + logger.info("Received shutting down message from server") + self.__is_shutting_down.set() + self.__is_oom.clear() + + if message.HasField("shutdown"): + logger.info("Received shutdown finished message from server") + self.__is_shutdown.set() + self.__is_shutting_down.clear() + + # restart the stream if we were shutdown by the node we were connected to ensuring that the index is + # propagated properly from it to the new one + if self.__is_shutdown.is_set(): + self.__reconnect() + logger.info("Restarting batch recv after shutdown...") + return self.__recv() + elif self.__is_renewing_stream.is_set(): + # restart the stream if we are renewing it (GCP connections have a max lifetime) + logger.info("Restarting batch recv after renewing stream...") + self.__is_renewing_stream.clear() + return self.__recv() + + logger.info("Server closed the stream from its side, shutting down batch") + return + + def __reconnect(self, retry: int = 0) -> None: + if self.__consistency_level == ConsistencyLevel.ALL or self.__number_of_nodes == 1: + # check that all nodes are available before reconnecting + up_nodes = self.__cluster.get_nodes_status() + while len(up_nodes) != self.__number_of_nodes or any( + node["status"] != "HEALTHY" for node in up_nodes + ): + logger.info( + "Waiting for all nodes to be HEALTHY before reconnecting to batch stream..." + ) + time.sleep(5) + up_nodes = self.__cluster.get_nodes_status() + try: + logger.info(f"Trying to reconnect after shutdown... {retry + 1}/{5}") + result(self.__connection.close("sync")) + self.__connection.connect(force=True) + logger.info("Reconnected successfully") + except (WeaviateStartUpError, WeaviateGRPCUnavailableError) as e: + if retry < 5: + logger.warning(f"Failed to reconnect, after {retry} attempts. Retrying...") + time.sleep(2**retry) + self.__reconnect(retry + 1) + else: + logger.error("Failed to reconnect after 5 attempts") + self.__bg_exception = e + + def __start_bg_threads(self) -> _BgThreads: + def loop_wrapper() -> None: + try: + self.__loop() + logger.info("exited batch requests loop thread") + except Exception as e: + logger.error(e) + self.__bg_exception = e + + def recv_wrapper() -> None: + socket_hung_up = False + try: + self.__recv() + logger.info("exited batch receive thread") + except Exception as e: + if isinstance(e, WeaviateBatchStreamError) and ( + "Socket closed" in e.message + or "context canceled" in e.message + or "Connection reset" in e.message + or "Received RST_STREAM with error code 2" in e.message + ): + logger.error(f"Socket hung up detected in batch receive thread: {e.message}") + socket_hung_up = True + else: + logger.error(e) + logger.error(type(e)) + self.__bg_exception = e + if socket_hung_up: + # this happens during ungraceful shutdown of the coordinator + # lets restart the stream and add the cached objects again + logger.warning("Stream closed unexpectedly, restarting...") + self.__reconnect() + # server sets this whenever it restarts, gracefully or unexpectedly, so need to clear it now + self.__is_shutting_down.clear() + with self.__objs_cache_lock: + self.__batch_objects.prepend(list(self.__objs_cache.values())) + with self.__refs_cache_lock: + self.__batch_references.prepend(list(self.__refs_cache.values())) + # start a new stream with a newly reconnected channel + return recv_wrapper() + + threads = _BgThreads( + loop=threading.Thread( + target=loop_wrapper, + daemon=True, + name="BgBatchLoop", + ), + recv=threading.Thread( + target=recv_wrapper, + daemon=True, + name="BgBatchRecv", + ), + ) + threads.start_recv() + threads.start_loop() + return threads + + def flush(self) -> None: + """Flush the batch queue and wait for all requests to be finished.""" + # bg thread is sending objs+refs automatically, so simply wait for everything to be done + while len(self.__batch_objects) > 0 or len(self.__batch_references) > 0: + time.sleep(0.01) + self.__check_bg_threads_alive() + + def _add_object( + self, + collection: str, + properties: Optional[WeaviateProperties] = None, + references: Optional[ReferenceInputs] = None, + uuid: Optional[UUID] = None, + vector: Optional[VECTORS] = None, + tenant: Optional[str] = None, + ) -> UUID: + self.__check_bg_threads_alive() + try: + batch_object = BatchObject( + collection=collection, + properties=properties, + references=references, + uuid=uuid, + vector=vector, + tenant=tenant, + index=self.__objs_count, + ) + self.__results_for_wrapper.imported_shards.add( + Shard(collection=collection, tenant=tenant) + ) + except ValidationError as e: + raise WeaviateBatchValidationError(repr(e)) + uuid = str(batch_object.uuid) + with self.__uuid_lookup_lock: + self.__uuid_lookup.add(uuid) + self.__batch_objects.add(batch_object) + with self.__objs_cache_lock: + self.__objs_cache[uuid] = batch_object + self.__objs_count += 1 + + while self.__is_blocked(): + self.__check_bg_threads_alive() + time.sleep(0.01) + + assert batch_object.uuid is not None + return batch_object.uuid + + def _add_reference( + self, + from_object_uuid: UUID, + from_object_collection: str, + from_property_name: str, + to: ReferenceInput, + tenant: Optional[str] = None, + ) -> None: + self.__check_bg_threads_alive() + if isinstance(to, ReferenceToMulti): + to_strs: Union[List[str], List[UUID]] = to.uuids_str + elif isinstance(to, str) or isinstance(to, uuid_package.UUID): + to_strs = [to] + else: + to_strs = list(to) + + for uid in to_strs: + try: + batch_reference = BatchReference( + from_object_collection=from_object_collection, + from_object_uuid=from_object_uuid, + from_property_name=from_property_name, + to_object_collection=( + to.target_collection if isinstance(to, ReferenceToMulti) else None + ), + to_object_uuid=uid, + tenant=tenant, + index=self.__refs_count, + ) + except ValidationError as e: + raise WeaviateBatchValidationError(repr(e)) + self.__batch_references.add(batch_reference) + with self.__refs_cache_lock: + self.__refs_cache[batch_reference._to_beacon()] = batch_reference + self.__refs_count += 1 + while self.__is_blocked(): + self.__check_bg_threads_alive() + time.sleep(0.01) + + def __is_blocked(self): + return ( + len(self.__inflight_objs) >= self.__batch_size + or len(self.__inflight_refs) >= self.__batch_size * 2 + or self.__is_renewing_stream.is_set() + or self.__is_shutting_down.is_set() + or self.__is_shutdown.is_set() + or self.__is_oom.is_set() + ) + + def __check_bg_threads_alive(self) -> None: + if self.__all_threads_alive(): + return + + raise self.__bg_exception or Exception("Batch thread died unexpectedly") diff --git a/weaviate/collections/collection/async_.py b/weaviate/collections/collection/async_.py index 47ff6d2d1..2c0cea5b0 100644 --- a/weaviate/collections/collection/async_.py +++ b/weaviate/collections/collection/async_.py @@ -5,6 +5,9 @@ from weaviate.cluster import _ClusterAsync from weaviate.collections.aggregate import _AggregateCollectionAsync from weaviate.collections.backups import _CollectionBackupAsync +from weaviate.collections.batch.collection import ( + _BatchCollectionWrapperAsync, +) from weaviate.collections.classes.cluster import Shard from weaviate.collections.classes.config import ConsistencyLevel from weaviate.collections.classes.grpc import METADATA, PROPERTIES, REFERENCES @@ -77,6 +80,15 @@ def __init__( """This namespace includes all the querying methods available to you when using Weaviate's standard aggregation capabilities.""" self.backup: _CollectionBackupAsync = _CollectionBackupAsync(connection, name) """This namespace includes all the backup methods available to you when backing up a collection in Weaviate.""" + self.batch: _BatchCollectionWrapperAsync[Properties] = _BatchCollectionWrapperAsync[ + Properties + ]( + connection, + consistency_level, + name, + tenant, + ) + """This namespace contains all the functionality to upload data in batches to Weaviate for this specific collection.""" self.config = _ConfigCollectionAsync(connection, name, tenant) """This namespace includes all the CRUD methods available to you when modifying the configuration of the collection in Weaviate.""" self.data = _DataCollectionAsync[Properties]( diff --git a/weaviate/collections/collection/sync.py b/weaviate/collections/collection/sync.py index 88f728b30..d50c0c2b5 100644 --- a/weaviate/collections/collection/sync.py +++ b/weaviate/collections/collection/sync.py @@ -7,7 +7,7 @@ from weaviate.collections.backups import _CollectionBackup from weaviate.collections.batch.collection import ( _BatchCollection, - _BatchCollectionNew, + _BatchCollectionSync, _BatchCollectionWrapper, ) from weaviate.collections.classes.cluster import Shard @@ -101,10 +101,8 @@ def __init__( name, tenant, config, - batch_client=_BatchCollectionNew[Properties] - if connection._weaviate_version.is_at_least( - 1, 32, 0 - ) # todo: change to 1.33.0 when it lands + batch_client=_BatchCollectionSync[Properties] + if connection._weaviate_version.is_at_least(1, 36, 0) else _BatchCollection[Properties], ) """This namespace contains all the functionality to upload data in batches to Weaviate for this specific collection.""" diff --git a/weaviate/collections/data/async_.pyi b/weaviate/collections/data/async_.pyi index 28dd4e2e4..8dd092bf3 100644 --- a/weaviate/collections/data/async_.pyi +++ b/weaviate/collections/data/async_.pyi @@ -1,6 +1,9 @@ import uuid as uuid_package -from typing import Generic, List, Literal, Optional, Sequence, Union, overload +from typing import Generic, Iterable, List, Literal, Optional, Sequence, Union, overload +from weaviate.collections.batch.grpc_batch import _BatchGRPC +from weaviate.collections.batch.grpc_batch_delete import _BatchDeleteGRPC +from weaviate.collections.batch.rest import _BatchREST from weaviate.collections.classes.batch import ( BatchObjectReturn, BatchReferenceReturn, @@ -23,6 +26,10 @@ from .executor import _DataCollectionExecutor class _DataCollectionAsync( Generic[Properties,], _DataCollectionExecutor[ConnectionAsync, Properties] ): + __batch_delete: _BatchDeleteGRPC + __batch_grpc: _BatchGRPC + __batch_rest: _BatchREST + async def insert( self, properties: Properties, @@ -72,3 +79,6 @@ class _DataCollectionAsync( async def delete_many( self, where: _Filters, *, verbose: bool = False, dry_run: bool = False ) -> Union[DeleteManyReturn[List[DeleteManyObject]], DeleteManyReturn[None]]: ... + async def ingest( + self, objs: Iterable[Union[Properties, DataObject[Properties, Optional[ReferenceInputs]]]] + ) -> BatchObjectReturn: ... diff --git a/weaviate/collections/data/executor.py b/weaviate/collections/data/executor.py index eb63a744d..73a07b1f9 100644 --- a/weaviate/collections/data/executor.py +++ b/weaviate/collections/data/executor.py @@ -5,6 +5,7 @@ Any, Dict, Generic, + Iterable, List, Literal, Mapping, @@ -19,6 +20,13 @@ from httpx import Response +from weaviate.collections.batch.base import _BatchDataWrapper +from weaviate.collections.batch.collection import ( + BatchCollectionAsync, + BatchCollectionSync, + CollectionBatchingContextManager, + CollectionBatchingContextManagerAsync, +) from weaviate.collections.batch.grpc_batch import _BatchGRPC from weaviate.collections.batch.grpc_batch_delete import _BatchDeleteGRPC from weaviate.collections.batch.rest import _BatchREST @@ -57,6 +65,10 @@ class _DataCollectionExecutor(Generic[ConnectionType, Properties]): + __batch_delete: _BatchDeleteGRPC + __batch_grpc: _BatchGRPC + __batch_rest: _BatchREST + def __init__( self, connection: ConnectionType, @@ -698,3 +710,78 @@ def __parse_vector(self, obj: Dict[str, Any], vector: VECTORS) -> Dict[str, Any] else: obj["vector"] = _get_vector_v4(vector) return obj + + def ingest( + self, objs: Iterable[Union[Properties, DataObject[Properties, Optional[ReferenceInputs]]]] + ) -> executor.Result[BatchObjectReturn]: + """Ingest multiple objects into the collection in batches. The batching is handled automatically for you by Weaviate. + + This is different from `insert_many` which sends all objects in a single batch request. Use this method when you want to insert a large number of objects without worrying about batch sizes + and whether they will fit into the maximum allowed batch size of your Weaviate instance. In addition, use this instead of `client.batch.dynamic()` or `collection.batch.dynamic()` for a more + performant dynamic batching algorithm that utilizes server-side batching. + + Args: + objs: An iterable of objects to insert. This can be either a sequence of `Properties` or `DataObject[Properties, ReferenceInputs]` + If you didn't set `data_model` then `Properties` will be `Data[str, Any]` in which case you can insert simple dictionaries here. + """ + if isinstance(self._connection, ConnectionAsync): + con = self._connection + + async def execute() -> BatchObjectReturn: + results = _BatchDataWrapper() + ctx = CollectionBatchingContextManagerAsync( + BatchCollectionAsync( + connection=con, + results=results, + consistency_level=self._consistency_level, + name=self.name, + tenant=self._tenant, + ) + ) + async with ctx as batch: + for obj in objs: + if isinstance(obj, DataObject): + await batch.add_object( + properties=cast(dict, obj.properties), + references=obj.references, + uuid=obj.uuid, + vector=obj.vector, + ) + else: + await batch.add_object( + properties=cast(dict, obj), + references=None, + uuid=None, + vector=None, + ) + return results.results.objs + + return execute() + + results = _BatchDataWrapper() + ctx = CollectionBatchingContextManager( + BatchCollectionSync( + connection=self._connection, + results=results, + consistency_level=self._consistency_level, + name=self.name, + tenant=self._tenant, + ) + ) + with ctx as batch: + for obj in objs: + if isinstance(obj, DataObject): + batch.add_object( + properties=cast(dict, obj.properties), + references=obj.references, + uuid=obj.uuid, + vector=obj.vector, + ) + else: + batch.add_object( + properties=cast(dict, obj), + references=None, + uuid=None, + vector=None, + ) + return results.results.objs diff --git a/weaviate/collections/data/sync.pyi b/weaviate/collections/data/sync.pyi index 3fa145a4e..ab1eb3f39 100644 --- a/weaviate/collections/data/sync.pyi +++ b/weaviate/collections/data/sync.pyi @@ -1,6 +1,9 @@ import uuid as uuid_package -from typing import Generic, List, Literal, Optional, Sequence, Union, overload +from typing import Generic, Iterable, List, Literal, Optional, Sequence, Union, overload +from weaviate.collections.batch.grpc_batch import _BatchGRPC +from weaviate.collections.batch.grpc_batch_delete import _BatchDeleteGRPC +from weaviate.collections.batch.rest import _BatchREST from weaviate.collections.classes.batch import ( BatchObjectReturn, BatchReferenceReturn, @@ -21,6 +24,10 @@ from weaviate.types import UUID, VECTORS from .executor import _DataCollectionExecutor class _DataCollection(Generic[Properties,], _DataCollectionExecutor[ConnectionSync, Properties]): + __batch_delete: _BatchDeleteGRPC + __batch_grpc: _BatchGRPC + __batch_rest: _BatchREST + def insert( self, properties: Properties, @@ -70,3 +77,6 @@ class _DataCollection(Generic[Properties,], _DataCollectionExecutor[ConnectionSy def delete_many( self, where: _Filters, *, verbose: bool = False, dry_run: bool = False ) -> Union[DeleteManyReturn[List[DeleteManyObject]], DeleteManyReturn[None]]: ... + def ingest( + self, objs: Iterable[Union[Properties, DataObject[Properties, Optional[ReferenceInputs]]]] + ) -> BatchObjectReturn: ... diff --git a/weaviate/config.py b/weaviate/config.py index bc0525531..9d2006829 100644 --- a/weaviate/config.py +++ b/weaviate/config.py @@ -56,6 +56,9 @@ class Timeout(BaseModel): query: Union[int, float] = Field(default=30, ge=0) insert: Union[int, float] = Field(default=90, ge=0) init: Union[int, float] = Field(default=2, ge=0) + stream: Union[int, float, None] = Field( + default=None, ge=0, description="Timeout for streaming operations." + ) class Proxies(BaseModel): diff --git a/weaviate/connect/base.py b/weaviate/connect/base.py index 5b9d8718c..716308391 100644 --- a/weaviate/connect/base.py +++ b/weaviate/connect/base.py @@ -10,6 +10,7 @@ from weaviate.config import Proxies from weaviate.types import NUMBER +from weaviate.util import is_weaviate_domain # from grpclib.client import Channel @@ -36,6 +37,9 @@ def _check_port(cls, v: int) -> int: raise ValueError("port must be between 0 and 65535") return v + def is_gcp(self) -> bool: + return "gcp.weaviate.cloud" in self.host + T = TypeVar("T", bound="ConnectionParams") @@ -90,6 +94,9 @@ def from_params( ), ) + def is_gcp_on_wcd(self) -> bool: + return "gcp" in self.http.host.lower() and is_weaviate_domain(self.http.host) + @model_validator(mode="after") def _check_port_collision(self: T) -> T: if self.http.host == self.grpc.host and self.http.port == self.grpc.port: diff --git a/weaviate/connect/v4.py b/weaviate/connect/v4.py index 0afb62ffe..394ca7732 100644 --- a/weaviate/connect/v4.py +++ b/weaviate/connect/v4.py @@ -7,6 +7,7 @@ from threading import Event, Thread from typing import ( Any, + AsyncGenerator, Awaitable, Dict, Generator, @@ -20,13 +21,14 @@ overload, ) +import grpc from authlib.integrations.httpx_client import ( # type: ignore AsyncOAuth2Client, OAuth2Client, ) from grpc import Call, RpcError, StatusCode from grpc import Channel as SyncChannel # type: ignore -from grpc.aio import AioRpcError +from grpc.aio import AioRpcError, StreamStreamCall from grpc.aio import Channel as AsyncChannel # type: ignore # from grpclib.client import Channel @@ -398,7 +400,7 @@ def _open_connections_rest( async def get_oidc() -> None: async with self._make_client("async") as client: try: - response = await client.get(oidc_url) + response = await client.get(oidc_url, timeout=self.timeout_config.init) except Exception as e: raise WeaviateConnectionError( f"Error: {e}. \nIs Weaviate running and reachable at {self.url}?" @@ -413,7 +415,7 @@ async def get_oidc() -> None: with self._make_client("sync") as client: try: - response = client.get(oidc_url) + response = client.get(oidc_url, timeout=self.timeout_config.init) except Exception as e: raise WeaviateConnectionError( f"Error: {e}. \nIs Weaviate running and reachable at {self.url}?" @@ -1011,7 +1013,9 @@ def grpc_batch_stream( try: assert self.grpc_stub is not None for msg in self.grpc_stub.BatchStream( - request_iterator=requests, metadata=self.grpc_headers() + request_iterator=requests, + timeout=self.timeout_config.stream, + metadata=self.grpc_headers(), ): yield msg except RpcError as e: @@ -1020,7 +1024,7 @@ def grpc_batch_stream( raise InsufficientPermissionsError(error) if error.code() == StatusCode.ABORTED: raise _BatchStreamShutdownError() - raise WeaviateBatchStreamError(str(error.details())) + raise WeaviateBatchStreamError(f"{error.code()}({error.details()})") def grpc_batch_delete( self, request: batch_delete_pb2.BatchDeleteRequest @@ -1088,8 +1092,8 @@ def grpc_aggregate( class ConnectionAsync(_ConnectionBase): """Connection class used to communicate to a weaviate instance.""" - async def connect(self) -> None: - if self._connected: + async def connect(self, force: bool = False) -> None: + if self._connected and not force: return None await executor.aresult(self._open_connections_rest(self._auth, "async")) @@ -1221,6 +1225,62 @@ async def grpc_batch_delete( raise InsufficientPermissionsError(e) raise WeaviateDeleteManyError(str(e)) + async def grpc_batch_stream( + self, + requests: AsyncGenerator[batch_pb2.BatchStreamRequest, None], + ) -> AsyncGenerator[batch_pb2.BatchStreamReply, None]: + assert isinstance(self._grpc_channel, grpc.aio.Channel) + try: + async for msg in self._grpc_channel.stream_stream( + "/weaviate.v1.Weaviate/BatchStream", + request_serializer=batch_pb2.BatchStreamRequest.SerializeToString, + response_deserializer=batch_pb2.BatchStreamReply.FromString, + )( + request_iterator=requests, + timeout=self.timeout_config.stream, + metadata=self.grpc_headers(), + ): + yield msg + except RpcError as e: + error = cast(Call, e) + if error.code() == StatusCode.PERMISSION_DENIED: + raise InsufficientPermissionsError(error) + if error.code() == StatusCode.ABORTED: + raise _BatchStreamShutdownError() + raise WeaviateBatchStreamError(f"{error.code()}({error.details()})") + + async def grpc_batch_stream_write( + self, + stream: StreamStreamCall[batch_pb2.BatchStreamRequest, batch_pb2.BatchStreamReply], + request: batch_pb2.BatchStreamRequest, + ) -> None: + try: + await stream.write(request) + except AioRpcError as e: + error = cast(Call, e) + if error.code() == StatusCode.PERMISSION_DENIED: + raise InsufficientPermissionsError(error) + if error.code() == StatusCode.ABORTED: + raise _BatchStreamShutdownError() + raise WeaviateBatchStreamError(str(error.details())) + + async def grpc_batch_stream_read( + self, + stream: StreamStreamCall[batch_pb2.BatchStreamRequest, batch_pb2.BatchStreamReply], + ) -> Optional[batch_pb2.BatchStreamReply]: + try: + msg = await stream.read() + if not isinstance(msg, batch_pb2.BatchStreamReply): + return None + return msg + except AioRpcError as e: + error = cast(Call, e) + if error.code() == StatusCode.PERMISSION_DENIED: + raise InsufficientPermissionsError(error) + if error.code() == StatusCode.ABORTED: + raise _BatchStreamShutdownError() + raise WeaviateBatchStreamError(str(error.details())) + async def grpc_tenants_get( self, request: tenants_pb2.TenantsGetRequest ) -> tenants_pb2.TenantsGetReply: diff --git a/weaviate/exceptions.py b/weaviate/exceptions.py index 5c6c771c9..2a5b429d5 100644 --- a/weaviate/exceptions.py +++ b/weaviate/exceptions.py @@ -292,6 +292,15 @@ def __init__(self, message: str): self.message = message +class WeaviateBatchFailedToReestablishStreamError(WeaviateBaseError): + """Is raised when the batch stream fails to re-establish within a timeout period.""" + + def __init__(self, message: str): + msg = f"""Batch stream failed to re-establish: {message}""" + super().__init__(msg) + self.message = message + + class WeaviateInsertInvalidPropertyError(WeaviateBaseError): """Is raised when inserting an invalid property.""" diff --git a/weaviate/proto/v1/v4216/v1/batch_pb2.py b/weaviate/proto/v1/v4216/v1/batch_pb2.py index ec84caf09..f45cfa621 100644 --- a/weaviate/proto/v1/v4216/v1/batch_pb2.py +++ b/weaviate/proto/v1/v4216/v1/batch_pb2.py @@ -15,7 +15,7 @@ from weaviate.proto.v1.v4216.v1 import base_pb2 as v1_dot_base__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0ev1/batch.proto\x12\x0bweaviate.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a\rv1/base.proto\"\x95\x01\n\x13\x42\x61tchObjectsRequest\x12)\n\x07objects\x18\x01 \x03(\x0b\x32\x18.weaviate.v1.BatchObject\x12=\n\x11\x63onsistency_level\x18\x02 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\"\x9e\x01\n\x16\x42\x61tchReferencesRequest\x12/\n\nreferences\x18\x01 \x03(\x0b\x32\x1b.weaviate.v1.BatchReference\x12=\n\x11\x63onsistency_level\x18\x02 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\"\xa6\x04\n\x12\x42\x61tchStreamRequest\x12\x36\n\x05start\x18\x01 \x01(\x0b\x32%.weaviate.v1.BatchStreamRequest.StartH\x00\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.weaviate.v1.BatchStreamRequest.DataH\x00\x12\x34\n\x04stop\x18\x03 \x01(\x0b\x32$.weaviate.v1.BatchStreamRequest.StopH\x00\x1a\\\n\x05Start\x12=\n\x11\x63onsistency_level\x18\x01 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\x1a\x06\n\x04Stop\x1a\xfa\x01\n\x04\x44\x61ta\x12=\n\x07objects\x18\x01 \x01(\x0b\x32,.weaviate.v1.BatchStreamRequest.Data.Objects\x12\x43\n\nreferences\x18\x02 \x01(\x0b\x32/.weaviate.v1.BatchStreamRequest.Data.References\x1a\x33\n\x07Objects\x12(\n\x06values\x18\x01 \x03(\x0b\x32\x18.weaviate.v1.BatchObject\x1a\x39\n\nReferences\x12+\n\x06values\x18\x01 \x03(\x0b\x32\x1b.weaviate.v1.BatchReferenceB\t\n\x07message\"\x98\x05\n\x10\x42\x61tchStreamReply\x12\x38\n\x07results\x18\x01 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.ResultsH\x00\x12\x43\n\rshutting_down\x18\x02 \x01(\x0b\x32*.weaviate.v1.BatchStreamReply.ShuttingDownH\x00\x12:\n\x08shutdown\x18\x03 \x01(\x0b\x32&.weaviate.v1.BatchStreamReply.ShutdownH\x00\x12\x38\n\x07started\x18\x04 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.StartedH\x00\x12\x38\n\x07\x62\x61\x63koff\x18\x05 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.BackoffH\x00\x1a\t\n\x07Started\x1a\x0e\n\x0cShuttingDown\x1a\n\n\x08Shutdown\x1a\x1d\n\x07\x42\x61\x63koff\x12\x12\n\nbatch_size\x18\x01 \x01(\x05\x1a\x83\x02\n\x07Results\x12;\n\x06\x65rrors\x18\x01 \x03(\x0b\x32+.weaviate.v1.BatchStreamReply.Results.Error\x12@\n\tsuccesses\x18\x02 \x03(\x0b\x32-.weaviate.v1.BatchStreamReply.Results.Success\x1a\x42\n\x05\x45rror\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12\x10\n\x06\x62\x65\x61\x63on\x18\x03 \x01(\tH\x00\x42\x08\n\x06\x64\x65tail\x1a\x35\n\x07Success\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12\x10\n\x06\x62\x65\x61\x63on\x18\x03 \x01(\tH\x00\x42\x08\n\x06\x64\x65tailB\t\n\x07message\"\xde\x07\n\x0b\x42\x61tchObject\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x12\n\x06vector\x18\x02 \x03(\x02\x42\x02\x18\x01\x12\x37\n\nproperties\x18\x03 \x01(\x0b\x32#.weaviate.v1.BatchObject.Properties\x12\x12\n\ncollection\x18\x04 \x01(\t\x12\x0e\n\x06tenant\x18\x05 \x01(\t\x12\x14\n\x0cvector_bytes\x18\x06 \x01(\x0c\x12%\n\x07vectors\x18\x17 \x03(\x0b\x32\x14.weaviate.v1.Vectors\x1a\x84\x05\n\nProperties\x12\x33\n\x12non_ref_properties\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12N\n\x17single_target_ref_props\x18\x02 \x03(\x0b\x32-.weaviate.v1.BatchObject.SingleTargetRefProps\x12L\n\x16multi_target_ref_props\x18\x03 \x03(\x0b\x32,.weaviate.v1.BatchObject.MultiTargetRefProps\x12\x43\n\x17number_array_properties\x18\x04 \x03(\x0b\x32\".weaviate.v1.NumberArrayProperties\x12=\n\x14int_array_properties\x18\x05 \x03(\x0b\x32\x1f.weaviate.v1.IntArrayProperties\x12?\n\x15text_array_properties\x18\x06 \x03(\x0b\x32 .weaviate.v1.TextArrayProperties\x12\x45\n\x18\x62oolean_array_properties\x18\x07 \x03(\x0b\x32#.weaviate.v1.BooleanArrayProperties\x12\x38\n\x11object_properties\x18\x08 \x03(\x0b\x32\x1d.weaviate.v1.ObjectProperties\x12\x43\n\x17object_array_properties\x18\t \x03(\x0b\x32\".weaviate.v1.ObjectArrayProperties\x12\x18\n\x10\x65mpty_list_props\x18\n \x03(\t\x1a\x38\n\x14SingleTargetRefProps\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x11\n\tprop_name\x18\x02 \x01(\t\x1aR\n\x13MultiTargetRefProps\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x11\n\tprop_name\x18\x02 \x01(\t\x12\x19\n\x11target_collection\x18\x03 \x01(\t\"\x99\x01\n\x0e\x42\x61tchReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0f\x66rom_collection\x18\x02 \x01(\t\x12\x11\n\tfrom_uuid\x18\x03 \x01(\t\x12\x1a\n\rto_collection\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x07to_uuid\x18\x05 \x01(\t\x12\x0e\n\x06tenant\x18\x06 \x01(\tB\x10\n\x0e_to_collection\"\x88\x01\n\x11\x42\x61tchObjectsReply\x12\x0c\n\x04took\x18\x01 \x01(\x02\x12\x39\n\x06\x65rrors\x18\x02 \x03(\x0b\x32).weaviate.v1.BatchObjectsReply.BatchError\x1a*\n\nBatchError\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"\x8e\x01\n\x14\x42\x61tchReferencesReply\x12\x0c\n\x04took\x18\x01 \x01(\x02\x12<\n\x06\x65rrors\x18\x02 \x03(\x0b\x32,.weaviate.v1.BatchReferencesReply.BatchError\x1a*\n\nBatchError\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\tBo\n#io.weaviate.client.grpc.protocol.v1B\x12WeaviateProtoBatchZ4github.com/weaviate/weaviate/grpc/generated;protocolb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0ev1/batch.proto\x12\x0bweaviate.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a\rv1/base.proto\"\x95\x01\n\x13\x42\x61tchObjectsRequest\x12)\n\x07objects\x18\x01 \x03(\x0b\x32\x18.weaviate.v1.BatchObject\x12=\n\x11\x63onsistency_level\x18\x02 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\"\x9e\x01\n\x16\x42\x61tchReferencesRequest\x12/\n\nreferences\x18\x01 \x03(\x0b\x32\x1b.weaviate.v1.BatchReference\x12=\n\x11\x63onsistency_level\x18\x02 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\"\xa6\x04\n\x12\x42\x61tchStreamRequest\x12\x36\n\x05start\x18\x01 \x01(\x0b\x32%.weaviate.v1.BatchStreamRequest.StartH\x00\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.weaviate.v1.BatchStreamRequest.DataH\x00\x12\x34\n\x04stop\x18\x03 \x01(\x0b\x32$.weaviate.v1.BatchStreamRequest.StopH\x00\x1a\\\n\x05Start\x12=\n\x11\x63onsistency_level\x18\x01 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\x1a\x06\n\x04Stop\x1a\xfa\x01\n\x04\x44\x61ta\x12=\n\x07objects\x18\x01 \x01(\x0b\x32,.weaviate.v1.BatchStreamRequest.Data.Objects\x12\x43\n\nreferences\x18\x02 \x01(\x0b\x32/.weaviate.v1.BatchStreamRequest.Data.References\x1a\x33\n\x07Objects\x12(\n\x06values\x18\x01 \x03(\x0b\x32\x18.weaviate.v1.BatchObject\x1a\x39\n\nReferences\x12+\n\x06values\x18\x01 \x03(\x0b\x32\x1b.weaviate.v1.BatchReferenceB\t\n\x07message\"\xe7\x06\n\x10\x42\x61tchStreamReply\x12\x38\n\x07results\x18\x01 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.ResultsH\x00\x12\x43\n\rshutting_down\x18\x02 \x01(\x0b\x32*.weaviate.v1.BatchStreamReply.ShuttingDownH\x00\x12:\n\x08shutdown\x18\x03 \x01(\x0b\x32&.weaviate.v1.BatchStreamReply.ShutdownH\x00\x12\x38\n\x07started\x18\x04 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.StartedH\x00\x12\x38\n\x07\x62\x61\x63koff\x18\x05 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.BackoffH\x00\x12\x32\n\x04\x61\x63ks\x18\x06 \x01(\x0b\x32\".weaviate.v1.BatchStreamReply.AcksH\x00\x12\x42\n\rout_of_memory\x18\x07 \x01(\x0b\x32).weaviate.v1.BatchStreamReply.OutOfMemoryH\x00\x1a\t\n\x07Started\x1a\x0e\n\x0cShuttingDown\x1a\n\n\x08Shutdown\x1a-\n\x0bOutOfMemory\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x0f\n\x07\x62\x65\x61\x63ons\x18\x02 \x03(\t\x1a\x1d\n\x07\x42\x61\x63koff\x12\x12\n\nbatch_size\x18\x01 \x01(\x05\x1a&\n\x04\x41\x63ks\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x0f\n\x07\x62\x65\x61\x63ons\x18\x02 \x03(\t\x1a\x83\x02\n\x07Results\x12;\n\x06\x65rrors\x18\x01 \x03(\x0b\x32+.weaviate.v1.BatchStreamReply.Results.Error\x12@\n\tsuccesses\x18\x02 \x03(\x0b\x32-.weaviate.v1.BatchStreamReply.Results.Success\x1a\x42\n\x05\x45rror\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12\x10\n\x06\x62\x65\x61\x63on\x18\x03 \x01(\tH\x00\x42\x08\n\x06\x64\x65tail\x1a\x35\n\x07Success\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12\x10\n\x06\x62\x65\x61\x63on\x18\x03 \x01(\tH\x00\x42\x08\n\x06\x64\x65tailB\t\n\x07message\"\xde\x07\n\x0b\x42\x61tchObject\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x12\n\x06vector\x18\x02 \x03(\x02\x42\x02\x18\x01\x12\x37\n\nproperties\x18\x03 \x01(\x0b\x32#.weaviate.v1.BatchObject.Properties\x12\x12\n\ncollection\x18\x04 \x01(\t\x12\x0e\n\x06tenant\x18\x05 \x01(\t\x12\x14\n\x0cvector_bytes\x18\x06 \x01(\x0c\x12%\n\x07vectors\x18\x17 \x03(\x0b\x32\x14.weaviate.v1.Vectors\x1a\x84\x05\n\nProperties\x12\x33\n\x12non_ref_properties\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12N\n\x17single_target_ref_props\x18\x02 \x03(\x0b\x32-.weaviate.v1.BatchObject.SingleTargetRefProps\x12L\n\x16multi_target_ref_props\x18\x03 \x03(\x0b\x32,.weaviate.v1.BatchObject.MultiTargetRefProps\x12\x43\n\x17number_array_properties\x18\x04 \x03(\x0b\x32\".weaviate.v1.NumberArrayProperties\x12=\n\x14int_array_properties\x18\x05 \x03(\x0b\x32\x1f.weaviate.v1.IntArrayProperties\x12?\n\x15text_array_properties\x18\x06 \x03(\x0b\x32 .weaviate.v1.TextArrayProperties\x12\x45\n\x18\x62oolean_array_properties\x18\x07 \x03(\x0b\x32#.weaviate.v1.BooleanArrayProperties\x12\x38\n\x11object_properties\x18\x08 \x03(\x0b\x32\x1d.weaviate.v1.ObjectProperties\x12\x43\n\x17object_array_properties\x18\t \x03(\x0b\x32\".weaviate.v1.ObjectArrayProperties\x12\x18\n\x10\x65mpty_list_props\x18\n \x03(\t\x1a\x38\n\x14SingleTargetRefProps\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x11\n\tprop_name\x18\x02 \x01(\t\x1aR\n\x13MultiTargetRefProps\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x11\n\tprop_name\x18\x02 \x01(\t\x12\x19\n\x11target_collection\x18\x03 \x01(\t\"\x99\x01\n\x0e\x42\x61tchReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0f\x66rom_collection\x18\x02 \x01(\t\x12\x11\n\tfrom_uuid\x18\x03 \x01(\t\x12\x1a\n\rto_collection\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x07to_uuid\x18\x05 \x01(\t\x12\x0e\n\x06tenant\x18\x06 \x01(\tB\x10\n\x0e_to_collection\"\x88\x01\n\x11\x42\x61tchObjectsReply\x12\x0c\n\x04took\x18\x01 \x01(\x02\x12\x39\n\x06\x65rrors\x18\x02 \x03(\x0b\x32).weaviate.v1.BatchObjectsReply.BatchError\x1a*\n\nBatchError\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"\x8e\x01\n\x14\x42\x61tchReferencesReply\x12\x0c\n\x04took\x18\x01 \x01(\x02\x12<\n\x06\x65rrors\x18\x02 \x03(\x0b\x32,.weaviate.v1.BatchReferencesReply.BatchError\x1a*\n\nBatchError\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\tBo\n#io.weaviate.client.grpc.protocol.v1B\x12WeaviateProtoBatchZ4github.com/weaviate/weaviate/grpc/generated;protocolb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -42,37 +42,41 @@ _globals['_BATCHSTREAMREQUEST_DATA_REFERENCES']._serialized_start=872 _globals['_BATCHSTREAMREQUEST_DATA_REFERENCES']._serialized_end=929 _globals['_BATCHSTREAMREPLY']._serialized_start=943 - _globals['_BATCHSTREAMREPLY']._serialized_end=1607 - _globals['_BATCHSTREAMREPLY_STARTED']._serialized_start=1266 - _globals['_BATCHSTREAMREPLY_STARTED']._serialized_end=1275 - _globals['_BATCHSTREAMREPLY_SHUTTINGDOWN']._serialized_start=1277 - _globals['_BATCHSTREAMREPLY_SHUTTINGDOWN']._serialized_end=1291 - _globals['_BATCHSTREAMREPLY_SHUTDOWN']._serialized_start=1293 - _globals['_BATCHSTREAMREPLY_SHUTDOWN']._serialized_end=1303 - _globals['_BATCHSTREAMREPLY_BACKOFF']._serialized_start=1305 - _globals['_BATCHSTREAMREPLY_BACKOFF']._serialized_end=1334 - _globals['_BATCHSTREAMREPLY_RESULTS']._serialized_start=1337 - _globals['_BATCHSTREAMREPLY_RESULTS']._serialized_end=1596 - _globals['_BATCHSTREAMREPLY_RESULTS_ERROR']._serialized_start=1475 - _globals['_BATCHSTREAMREPLY_RESULTS_ERROR']._serialized_end=1541 - _globals['_BATCHSTREAMREPLY_RESULTS_SUCCESS']._serialized_start=1543 - _globals['_BATCHSTREAMREPLY_RESULTS_SUCCESS']._serialized_end=1596 - _globals['_BATCHOBJECT']._serialized_start=1610 - _globals['_BATCHOBJECT']._serialized_end=2600 - _globals['_BATCHOBJECT_PROPERTIES']._serialized_start=1814 - _globals['_BATCHOBJECT_PROPERTIES']._serialized_end=2458 - _globals['_BATCHOBJECT_SINGLETARGETREFPROPS']._serialized_start=2460 - _globals['_BATCHOBJECT_SINGLETARGETREFPROPS']._serialized_end=2516 - _globals['_BATCHOBJECT_MULTITARGETREFPROPS']._serialized_start=2518 - _globals['_BATCHOBJECT_MULTITARGETREFPROPS']._serialized_end=2600 - _globals['_BATCHREFERENCE']._serialized_start=2603 - _globals['_BATCHREFERENCE']._serialized_end=2756 - _globals['_BATCHOBJECTSREPLY']._serialized_start=2759 - _globals['_BATCHOBJECTSREPLY']._serialized_end=2895 - _globals['_BATCHOBJECTSREPLY_BATCHERROR']._serialized_start=2853 - _globals['_BATCHOBJECTSREPLY_BATCHERROR']._serialized_end=2895 - _globals['_BATCHREFERENCESREPLY']._serialized_start=2898 - _globals['_BATCHREFERENCESREPLY']._serialized_end=3040 - _globals['_BATCHREFERENCESREPLY_BATCHERROR']._serialized_start=2853 - _globals['_BATCHREFERENCESREPLY_BATCHERROR']._serialized_end=2895 + _globals['_BATCHSTREAMREPLY']._serialized_end=1814 + _globals['_BATCHSTREAMREPLY_STARTED']._serialized_start=1386 + _globals['_BATCHSTREAMREPLY_STARTED']._serialized_end=1395 + _globals['_BATCHSTREAMREPLY_SHUTTINGDOWN']._serialized_start=1397 + _globals['_BATCHSTREAMREPLY_SHUTTINGDOWN']._serialized_end=1411 + _globals['_BATCHSTREAMREPLY_SHUTDOWN']._serialized_start=1413 + _globals['_BATCHSTREAMREPLY_SHUTDOWN']._serialized_end=1423 + _globals['_BATCHSTREAMREPLY_OUTOFMEMORY']._serialized_start=1425 + _globals['_BATCHSTREAMREPLY_OUTOFMEMORY']._serialized_end=1470 + _globals['_BATCHSTREAMREPLY_BACKOFF']._serialized_start=1472 + _globals['_BATCHSTREAMREPLY_BACKOFF']._serialized_end=1501 + _globals['_BATCHSTREAMREPLY_ACKS']._serialized_start=1503 + _globals['_BATCHSTREAMREPLY_ACKS']._serialized_end=1541 + _globals['_BATCHSTREAMREPLY_RESULTS']._serialized_start=1544 + _globals['_BATCHSTREAMREPLY_RESULTS']._serialized_end=1803 + _globals['_BATCHSTREAMREPLY_RESULTS_ERROR']._serialized_start=1682 + _globals['_BATCHSTREAMREPLY_RESULTS_ERROR']._serialized_end=1748 + _globals['_BATCHSTREAMREPLY_RESULTS_SUCCESS']._serialized_start=1750 + _globals['_BATCHSTREAMREPLY_RESULTS_SUCCESS']._serialized_end=1803 + _globals['_BATCHOBJECT']._serialized_start=1817 + _globals['_BATCHOBJECT']._serialized_end=2807 + _globals['_BATCHOBJECT_PROPERTIES']._serialized_start=2021 + _globals['_BATCHOBJECT_PROPERTIES']._serialized_end=2665 + _globals['_BATCHOBJECT_SINGLETARGETREFPROPS']._serialized_start=2667 + _globals['_BATCHOBJECT_SINGLETARGETREFPROPS']._serialized_end=2723 + _globals['_BATCHOBJECT_MULTITARGETREFPROPS']._serialized_start=2725 + _globals['_BATCHOBJECT_MULTITARGETREFPROPS']._serialized_end=2807 + _globals['_BATCHREFERENCE']._serialized_start=2810 + _globals['_BATCHREFERENCE']._serialized_end=2963 + _globals['_BATCHOBJECTSREPLY']._serialized_start=2966 + _globals['_BATCHOBJECTSREPLY']._serialized_end=3102 + _globals['_BATCHOBJECTSREPLY_BATCHERROR']._serialized_start=3060 + _globals['_BATCHOBJECTSREPLY_BATCHERROR']._serialized_end=3102 + _globals['_BATCHREFERENCESREPLY']._serialized_start=3105 + _globals['_BATCHREFERENCESREPLY']._serialized_end=3247 + _globals['_BATCHREFERENCESREPLY_BATCHERROR']._serialized_start=3060 + _globals['_BATCHREFERENCESREPLY_BATCHERROR']._serialized_end=3102 # @@protoc_insertion_point(module_scope) diff --git a/weaviate/proto/v1/v4216/v1/batch_pb2.pyi b/weaviate/proto/v1/v4216/v1/batch_pb2.pyi index 17ef5a965..a0ada063c 100644 --- a/weaviate/proto/v1/v4216/v1/batch_pb2.pyi +++ b/weaviate/proto/v1/v4216/v1/batch_pb2.pyi @@ -59,7 +59,7 @@ class BatchStreamRequest(_message.Message): def __init__(self, start: _Optional[_Union[BatchStreamRequest.Start, _Mapping]] = ..., data: _Optional[_Union[BatchStreamRequest.Data, _Mapping]] = ..., stop: _Optional[_Union[BatchStreamRequest.Stop, _Mapping]] = ...) -> None: ... class BatchStreamReply(_message.Message): - __slots__ = ["results", "shutting_down", "shutdown", "started", "backoff"] + __slots__ = ["results", "shutting_down", "shutdown", "started", "backoff", "acks", "out_of_memory"] class Started(_message.Message): __slots__ = [] def __init__(self) -> None: ... @@ -69,11 +69,25 @@ class BatchStreamReply(_message.Message): class Shutdown(_message.Message): __slots__ = [] def __init__(self) -> None: ... + class OutOfMemory(_message.Message): + __slots__ = ["uuids", "beacons"] + UUIDS_FIELD_NUMBER: _ClassVar[int] + BEACONS_FIELD_NUMBER: _ClassVar[int] + uuids: _containers.RepeatedScalarFieldContainer[str] + beacons: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, uuids: _Optional[_Iterable[str]] = ..., beacons: _Optional[_Iterable[str]] = ...) -> None: ... class Backoff(_message.Message): __slots__ = ["batch_size"] BATCH_SIZE_FIELD_NUMBER: _ClassVar[int] batch_size: int def __init__(self, batch_size: _Optional[int] = ...) -> None: ... + class Acks(_message.Message): + __slots__ = ["uuids", "beacons"] + UUIDS_FIELD_NUMBER: _ClassVar[int] + BEACONS_FIELD_NUMBER: _ClassVar[int] + uuids: _containers.RepeatedScalarFieldContainer[str] + beacons: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, uuids: _Optional[_Iterable[str]] = ..., beacons: _Optional[_Iterable[str]] = ...) -> None: ... class Results(_message.Message): __slots__ = ["errors", "successes"] class Error(_message.Message): @@ -102,12 +116,16 @@ class BatchStreamReply(_message.Message): SHUTDOWN_FIELD_NUMBER: _ClassVar[int] STARTED_FIELD_NUMBER: _ClassVar[int] BACKOFF_FIELD_NUMBER: _ClassVar[int] + ACKS_FIELD_NUMBER: _ClassVar[int] + OUT_OF_MEMORY_FIELD_NUMBER: _ClassVar[int] results: BatchStreamReply.Results shutting_down: BatchStreamReply.ShuttingDown shutdown: BatchStreamReply.Shutdown started: BatchStreamReply.Started backoff: BatchStreamReply.Backoff - def __init__(self, results: _Optional[_Union[BatchStreamReply.Results, _Mapping]] = ..., shutting_down: _Optional[_Union[BatchStreamReply.ShuttingDown, _Mapping]] = ..., shutdown: _Optional[_Union[BatchStreamReply.Shutdown, _Mapping]] = ..., started: _Optional[_Union[BatchStreamReply.Started, _Mapping]] = ..., backoff: _Optional[_Union[BatchStreamReply.Backoff, _Mapping]] = ...) -> None: ... + acks: BatchStreamReply.Acks + out_of_memory: BatchStreamReply.OutOfMemory + def __init__(self, results: _Optional[_Union[BatchStreamReply.Results, _Mapping]] = ..., shutting_down: _Optional[_Union[BatchStreamReply.ShuttingDown, _Mapping]] = ..., shutdown: _Optional[_Union[BatchStreamReply.Shutdown, _Mapping]] = ..., started: _Optional[_Union[BatchStreamReply.Started, _Mapping]] = ..., backoff: _Optional[_Union[BatchStreamReply.Backoff, _Mapping]] = ..., acks: _Optional[_Union[BatchStreamReply.Acks, _Mapping]] = ..., out_of_memory: _Optional[_Union[BatchStreamReply.OutOfMemory, _Mapping]] = ...) -> None: ... class BatchObject(_message.Message): __slots__ = ["uuid", "vector", "properties", "collection", "tenant", "vector_bytes", "vectors"] diff --git a/weaviate/proto/v1/v4216/v1/generative_pb2.py b/weaviate/proto/v1/v4216/v1/generative_pb2.py index 139e0b8a7..46bef0181 100644 --- a/weaviate/proto/v1/v4216/v1/generative_pb2.py +++ b/weaviate/proto/v1/v4216/v1/generative_pb2.py @@ -14,7 +14,7 @@ from weaviate.proto.v1.v4216.v1 import base_pb2 as v1_dot_base__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13v1/generative.proto\x12\x0bweaviate.v1\x1a\rv1/base.proto\"\xdd\x03\n\x10GenerativeSearch\x12\"\n\x16single_response_prompt\x18\x01 \x01(\tB\x02\x18\x01\x12!\n\x15grouped_response_task\x18\x02 \x01(\tB\x02\x18\x01\x12\x1e\n\x12grouped_properties\x18\x03 \x03(\tB\x02\x18\x01\x12\x34\n\x06single\x18\x04 \x01(\x0b\x32$.weaviate.v1.GenerativeSearch.Single\x12\x36\n\x07grouped\x18\x05 \x01(\x0b\x32%.weaviate.v1.GenerativeSearch.Grouped\x1aY\n\x06Single\x12\x0e\n\x06prompt\x18\x01 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x02 \x01(\x08\x12\x30\n\x07queries\x18\x03 \x03(\x0b\x32\x1f.weaviate.v1.GenerativeProvider\x1a\x98\x01\n\x07Grouped\x12\x0c\n\x04task\x18\x01 \x01(\t\x12/\n\nproperties\x18\x02 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x00\x88\x01\x01\x12\x30\n\x07queries\x18\x03 \x03(\x0b\x32\x1f.weaviate.v1.GenerativeProvider\x12\r\n\x05\x64\x65\x62ug\x18\x04 \x01(\x08\x42\r\n\x0b_properties\"\xfd\x05\n\x12GenerativeProvider\x12\x17\n\x0freturn_metadata\x18\x01 \x01(\x08\x12\x35\n\tanthropic\x18\x02 \x01(\x0b\x32 .weaviate.v1.GenerativeAnthropicH\x00\x12\x33\n\x08\x61nyscale\x18\x03 \x01(\x0b\x32\x1f.weaviate.v1.GenerativeAnyscaleH\x00\x12)\n\x03\x61ws\x18\x04 \x01(\x0b\x32\x1a.weaviate.v1.GenerativeAWSH\x00\x12/\n\x06\x63ohere\x18\x05 \x01(\x0b\x32\x1d.weaviate.v1.GenerativeCohereH\x00\x12-\n\x05\x64ummy\x18\x06 \x01(\x0b\x32\x1c.weaviate.v1.GenerativeDummyH\x00\x12\x31\n\x07mistral\x18\x07 \x01(\x0b\x32\x1e.weaviate.v1.GenerativeMistralH\x00\x12/\n\x06ollama\x18\x08 \x01(\x0b\x32\x1d.weaviate.v1.GenerativeOllamaH\x00\x12/\n\x06openai\x18\t \x01(\x0b\x32\x1d.weaviate.v1.GenerativeOpenAIH\x00\x12/\n\x06google\x18\n \x01(\x0b\x32\x1d.weaviate.v1.GenerativeGoogleH\x00\x12\x37\n\ndatabricks\x18\x0b \x01(\x0b\x32!.weaviate.v1.GenerativeDatabricksH\x00\x12\x37\n\nfriendliai\x18\x0c \x01(\x0b\x32!.weaviate.v1.GenerativeFriendliAIH\x00\x12/\n\x06nvidia\x18\r \x01(\x0b\x32\x1d.weaviate.v1.GenerativeNvidiaH\x00\x12)\n\x03xai\x18\x0e \x01(\x0b\x32\x1a.weaviate.v1.GenerativeXAIH\x00\x12;\n\x0c\x63ontextualai\x18\x0f \x01(\x0b\x32#.weaviate.v1.GenerativeContextualAIH\x00\x42\x06\n\x04kind\"\xb1\x03\n\x13GenerativeAnthropic\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x12\n\x05top_k\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x12\n\x05top_p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x12+\n\x06images\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x35\n\x10image_properties\x18\t \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x42\x0b\n\t_base_urlB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_kB\x08\n\x06_top_pB\x11\n\x0f_stop_sequencesB\t\n\x07_imagesB\x13\n\x11_image_properties\"\x80\x01\n\x12GenerativeAnyscale\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperature\"\xc5\x03\n\rGenerativeAWS\x12\x12\n\x05model\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0btemperature\x18\x08 \x01(\x01H\x01\x88\x01\x01\x12\x14\n\x07service\x18\t \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06region\x18\n \x01(\tH\x03\x88\x01\x01\x12\x15\n\x08\x65ndpoint\x18\x0b \x01(\tH\x04\x88\x01\x01\x12\x19\n\x0ctarget_model\x18\x0c \x01(\tH\x05\x88\x01\x01\x12\x1b\n\x0etarget_variant\x18\r \x01(\tH\x06\x88\x01\x01\x12+\n\x06images\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0f \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x10 \x01(\x03H\t\x88\x01\x01\x42\x08\n\x06_modelB\x0e\n\x0c_temperatureB\n\n\x08_serviceB\t\n\x07_regionB\x0b\n\t_endpointB\x0f\n\r_target_modelB\x11\n\x0f_target_variantB\t\n\x07_imagesB\x13\n\x11_image_propertiesB\r\n\x0b_max_tokens\"\x88\x04\n\x10GenerativeCohere\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1e\n\x11\x66requency_penalty\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x12\n\x05model\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x0e\n\x01k\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x0e\n\x01p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x18\n\x0btemperature\x18\t \x01(\x01H\x08\x88\x01\x01\x12+\n\x06images\x18\n \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\t\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0b \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\n\x88\x01\x01\x42\x0b\n\t_base_urlB\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x04\n\x02_kB\x04\n\x02_pB\x13\n\x11_presence_penaltyB\x11\n\x0f_stop_sequencesB\x0e\n\x0c_temperatureB\t\n\x07_imagesB\x13\n\x11_image_properties\"\x11\n\x0fGenerativeDummy\"\xc5\x01\n\x11GenerativeMistral\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x12\n\x05top_p\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x0b\n\t_base_urlB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_p\"\x8a\x02\n\x10GenerativeOllama\x12\x19\n\x0c\x61pi_endpoint\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12+\n\x06images\x18\x04 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x03\x88\x01\x01\x12\x35\n\x10image_properties\x18\x05 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x04\x88\x01\x01\x42\x0f\n\r_api_endpointB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xe3\x08\n\x10GenerativeOpenAI\x12\x1e\n\x11\x66requency_penalty\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0e\n\x01n\x18\x04 \x01(\x03H\x03\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12)\n\x04stop\x18\x06 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x05\x88\x01\x01\x12\x18\n\x0btemperature\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x12\n\x05top_p\x18\x08 \x01(\x01H\x07\x88\x01\x01\x12\x15\n\x08\x62\x61se_url\x18\t \x01(\tH\x08\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\n \x01(\tH\t\x88\x01\x01\x12\x1a\n\rresource_name\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x1a\n\rdeployment_id\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12\x15\n\x08is_azure\x18\r \x01(\x08H\x0c\x88\x01\x01\x12+\n\x06images\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\r\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0f \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x0e\x88\x01\x01\x12L\n\x10reasoning_effort\x18\x10 \x01(\x0e\x32-.weaviate.v1.GenerativeOpenAI.ReasoningEffortH\x0f\x88\x01\x01\x12?\n\tverbosity\x18\x11 \x01(\x0e\x32\'.weaviate.v1.GenerativeOpenAI.VerbosityH\x10\x88\x01\x01\"\xa3\x01\n\x0fReasoningEffort\x12 \n\x1cREASONING_EFFORT_UNSPECIFIED\x10\x00\x12\x1c\n\x18REASONING_EFFORT_MINIMAL\x10\x01\x12\x18\n\x14REASONING_EFFORT_LOW\x10\x02\x12\x1b\n\x17REASONING_EFFORT_MEDIUM\x10\x03\x12\x19\n\x15REASONING_EFFORT_HIGH\x10\x04\"c\n\tVerbosity\x12\x19\n\x15VERBOSITY_UNSPECIFIED\x10\x00\x12\x11\n\rVERBOSITY_LOW\x10\x01\x12\x14\n\x10VERBOSITY_MEDIUM\x10\x02\x12\x12\n\x0eVERBOSITY_HIGH\x10\x03\x42\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x04\n\x02_nB\x13\n\x11_presence_penaltyB\x07\n\x05_stopB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\x0b\n\t_base_urlB\x0e\n\x0c_api_versionB\x10\n\x0e_resource_nameB\x10\n\x0e_deployment_idB\x0b\n\t_is_azureB\t\n\x07_imagesB\x13\n\x11_image_propertiesB\x13\n\x11_reasoning_effortB\x0c\n\n_verbosity\"\x92\x05\n\x10GenerativeGoogle\x12\x1e\n\x11\x66requency_penalty\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x18\n\x0btemperature\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12\x12\n\x05top_k\x18\x06 \x01(\x03H\x05\x88\x01\x01\x12\x12\n\x05top_p\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x19\n\x0c\x61pi_endpoint\x18\t \x01(\tH\x08\x88\x01\x01\x12\x17\n\nproject_id\x18\n \x01(\tH\t\x88\x01\x01\x12\x18\n\x0b\x65ndpoint_id\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x13\n\x06region\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12+\n\x06images\x18\r \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x0c\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\r\x88\x01\x01\x42\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x13\n\x11_presence_penaltyB\x0e\n\x0c_temperatureB\x08\n\x06_top_kB\x08\n\x06_top_pB\x11\n\x0f_stop_sequencesB\x0f\n\r_api_endpointB\r\n\x0b_project_idB\x0e\n\x0c_endpoint_idB\t\n\x07_regionB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xd0\x03\n\x14GenerativeDatabricks\x12\x15\n\x08\x65ndpoint\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x1e\n\x11\x66requency_penalty\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x16\n\tlog_probs\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x1a\n\rtop_log_probs\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x06 \x01(\x03H\x05\x88\x01\x01\x12\x0e\n\x01n\x18\x07 \x01(\x03H\x06\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x08 \x01(\x01H\x07\x88\x01\x01\x12)\n\x04stop\x18\t \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x12\x18\n\x0btemperature\x18\n \x01(\x01H\t\x88\x01\x01\x12\x12\n\x05top_p\x18\x0b \x01(\x01H\n\x88\x01\x01\x42\x0b\n\t_endpointB\x08\n\x06_modelB\x14\n\x12_frequency_penaltyB\x0c\n\n_log_probsB\x10\n\x0e_top_log_probsB\r\n\x0b_max_tokensB\x04\n\x02_nB\x13\n\x11_presence_penaltyB\x07\n\x05_stopB\x0e\n\x0c_temperatureB\x08\n\x06_top_p\"\xde\x01\n\x14GenerativeFriendliAI\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x0e\n\x01n\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x12\n\x05top_p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\r\n\x0b_max_tokensB\x0e\n\x0c_temperatureB\x04\n\x02_nB\x08\n\x06_top_p\"\xc4\x01\n\x10GenerativeNvidia\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x12\n\x05top_p\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x05 \x01(\x03H\x04\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\r\n\x0b_max_tokens\"\xc5\x02\n\rGenerativeXAI\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x12\n\x05top_p\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12+\n\x06images\x18\x06 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x05\x88\x01\x01\x12\x35\n\x10image_properties\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\r\n\x0b_max_tokensB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xce\x02\n\x16GenerativeContextualAI\x12\x12\n\x05model\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0btemperature\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x12\n\x05top_p\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x1b\n\x0emax_new_tokens\x18\x04 \x01(\x03H\x03\x88\x01\x01\x12\x1a\n\rsystem_prompt\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1d\n\x10\x61void_commentary\x18\x06 \x01(\x08H\x05\x88\x01\x01\x12.\n\tknowledge\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x42\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\x11\n\x0f_max_new_tokensB\x10\n\x0e_system_promptB\x13\n\x11_avoid_commentaryB\x0c\n\n_knowledge\"\x92\x01\n\x1bGenerativeAnthropicMetadata\x12=\n\x05usage\x18\x01 \x01(\x0b\x32..weaviate.v1.GenerativeAnthropicMetadata.Usage\x1a\x34\n\x05Usage\x12\x14\n\x0cinput_tokens\x18\x01 \x01(\x03\x12\x15\n\routput_tokens\x18\x02 \x01(\x03\"\x1c\n\x1aGenerativeAnyscaleMetadata\"\x17\n\x15GenerativeAWSMetadata\"\x9c\x06\n\x18GenerativeCohereMetadata\x12J\n\x0b\x61pi_version\x18\x01 \x01(\x0b\x32\x30.weaviate.v1.GenerativeCohereMetadata.ApiVersionH\x00\x88\x01\x01\x12L\n\x0c\x62illed_units\x18\x02 \x01(\x0b\x32\x31.weaviate.v1.GenerativeCohereMetadata.BilledUnitsH\x01\x88\x01\x01\x12\x41\n\x06tokens\x18\x03 \x01(\x0b\x32,.weaviate.v1.GenerativeCohereMetadata.TokensH\x02\x88\x01\x01\x12-\n\x08warnings\x18\x04 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x03\x88\x01\x01\x1a\x8e\x01\n\nApiVersion\x12\x14\n\x07version\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1a\n\ris_deprecated\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\x1c\n\x0fis_experimental\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\n\n\x08_versionB\x10\n\x0e_is_deprecatedB\x12\n\x10_is_experimental\x1a\xc5\x01\n\x0b\x42illedUnits\x12\x19\n\x0cinput_tokens\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1a\n\routput_tokens\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x19\n\x0csearch_units\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x1c\n\x0f\x63lassifications\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x0f\n\r_input_tokensB\x10\n\x0e_output_tokensB\x0f\n\r_search_unitsB\x12\n\x10_classifications\x1a\x62\n\x06Tokens\x12\x19\n\x0cinput_tokens\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1a\n\routput_tokens\x18\x02 \x01(\x01H\x01\x88\x01\x01\x42\x0f\n\r_input_tokensB\x10\n\x0e_output_tokensB\x0e\n\x0c_api_versionB\x0f\n\r_billed_unitsB\t\n\x07_tokensB\x0b\n\t_warnings\"\x19\n\x17GenerativeDummyMetadata\"\x81\x02\n\x19GenerativeMistralMetadata\x12@\n\x05usage\x18\x01 \x01(\x0b\x32,.weaviate.v1.GenerativeMistralMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x1a\n\x18GenerativeOllamaMetadata\"\xff\x01\n\x18GenerativeOpenAIMetadata\x12?\n\x05usage\x18\x01 \x01(\x0b\x32+.weaviate.v1.GenerativeOpenAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xe8\x06\n\x18GenerativeGoogleMetadata\x12\x45\n\x08metadata\x18\x01 \x01(\x0b\x32..weaviate.v1.GenerativeGoogleMetadata.MetadataH\x00\x88\x01\x01\x12P\n\x0eusage_metadata\x18\x02 \x01(\x0b\x32\x33.weaviate.v1.GenerativeGoogleMetadata.UsageMetadataH\x01\x88\x01\x01\x1a~\n\nTokenCount\x12&\n\x19total_billable_characters\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x42\x1c\n\x1a_total_billable_charactersB\x0f\n\r_total_tokens\x1a\xe1\x01\n\rTokenMetadata\x12P\n\x11input_token_count\x18\x01 \x01(\x0b\x32\x30.weaviate.v1.GenerativeGoogleMetadata.TokenCountH\x00\x88\x01\x01\x12Q\n\x12output_token_count\x18\x02 \x01(\x0b\x32\x30.weaviate.v1.GenerativeGoogleMetadata.TokenCountH\x01\x88\x01\x01\x42\x14\n\x12_input_token_countB\x15\n\x13_output_token_count\x1ao\n\x08Metadata\x12P\n\x0etoken_metadata\x18\x01 \x01(\x0b\x32\x33.weaviate.v1.GenerativeGoogleMetadata.TokenMetadataH\x00\x88\x01\x01\x42\x11\n\x0f_token_metadata\x1a\xbd\x01\n\rUsageMetadata\x12\x1f\n\x12prompt_token_count\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12#\n\x16\x63\x61ndidates_token_count\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1e\n\x11total_token_count\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x15\n\x13_prompt_token_countB\x19\n\x17_candidates_token_countB\x14\n\x12_total_token_countB\x0b\n\t_metadataB\x11\n\x0f_usage_metadata\"\x87\x02\n\x1cGenerativeDatabricksMetadata\x12\x43\n\x05usage\x18\x01 \x01(\x0b\x32/.weaviate.v1.GenerativeDatabricksMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x87\x02\n\x1cGenerativeFriendliAIMetadata\x12\x43\n\x05usage\x18\x01 \x01(\x0b\x32/.weaviate.v1.GenerativeFriendliAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xff\x01\n\x18GenerativeNvidiaMetadata\x12?\n\x05usage\x18\x01 \x01(\x0b\x32+.weaviate.v1.GenerativeNvidiaMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xf9\x01\n\x15GenerativeXAIMetadata\x12<\n\x05usage\x18\x01 \x01(\x0b\x32(.weaviate.v1.GenerativeXAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x8f\x06\n\x12GenerativeMetadata\x12=\n\tanthropic\x18\x01 \x01(\x0b\x32(.weaviate.v1.GenerativeAnthropicMetadataH\x00\x12;\n\x08\x61nyscale\x18\x02 \x01(\x0b\x32\'.weaviate.v1.GenerativeAnyscaleMetadataH\x00\x12\x31\n\x03\x61ws\x18\x03 \x01(\x0b\x32\".weaviate.v1.GenerativeAWSMetadataH\x00\x12\x37\n\x06\x63ohere\x18\x04 \x01(\x0b\x32%.weaviate.v1.GenerativeCohereMetadataH\x00\x12\x35\n\x05\x64ummy\x18\x05 \x01(\x0b\x32$.weaviate.v1.GenerativeDummyMetadataH\x00\x12\x39\n\x07mistral\x18\x06 \x01(\x0b\x32&.weaviate.v1.GenerativeMistralMetadataH\x00\x12\x37\n\x06ollama\x18\x07 \x01(\x0b\x32%.weaviate.v1.GenerativeOllamaMetadataH\x00\x12\x37\n\x06openai\x18\x08 \x01(\x0b\x32%.weaviate.v1.GenerativeOpenAIMetadataH\x00\x12\x37\n\x06google\x18\t \x01(\x0b\x32%.weaviate.v1.GenerativeGoogleMetadataH\x00\x12?\n\ndatabricks\x18\n \x01(\x0b\x32).weaviate.v1.GenerativeDatabricksMetadataH\x00\x12?\n\nfriendliai\x18\x0b \x01(\x0b\x32).weaviate.v1.GenerativeFriendliAIMetadataH\x00\x12\x37\n\x06nvidia\x18\x0c \x01(\x0b\x32%.weaviate.v1.GenerativeNvidiaMetadataH\x00\x12\x31\n\x03xai\x18\r \x01(\x0b\x32\".weaviate.v1.GenerativeXAIMetadataH\x00\x42\x06\n\x04kind\"\xa2\x01\n\x0fGenerativeReply\x12\x0e\n\x06result\x18\x01 \x01(\t\x12\x30\n\x05\x64\x65\x62ug\x18\x02 \x01(\x0b\x32\x1c.weaviate.v1.GenerativeDebugH\x00\x88\x01\x01\x12\x36\n\x08metadata\x18\x03 \x01(\x0b\x32\x1f.weaviate.v1.GenerativeMetadataH\x01\x88\x01\x01\x42\x08\n\x06_debugB\x0b\n\t_metadata\"@\n\x10GenerativeResult\x12,\n\x06values\x18\x01 \x03(\x0b\x32\x1c.weaviate.v1.GenerativeReply\";\n\x0fGenerativeDebug\x12\x18\n\x0b\x66ull_prompt\x18\x01 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_full_promptBt\n#io.weaviate.client.grpc.protocol.v1B\x17WeaviateProtoGenerativeZ4github.com/weaviate/weaviate/grpc/generated;protocolb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13v1/generative.proto\x12\x0bweaviate.v1\x1a\rv1/base.proto\"\xdd\x03\n\x10GenerativeSearch\x12\"\n\x16single_response_prompt\x18\x01 \x01(\tB\x02\x18\x01\x12!\n\x15grouped_response_task\x18\x02 \x01(\tB\x02\x18\x01\x12\x1e\n\x12grouped_properties\x18\x03 \x03(\tB\x02\x18\x01\x12\x34\n\x06single\x18\x04 \x01(\x0b\x32$.weaviate.v1.GenerativeSearch.Single\x12\x36\n\x07grouped\x18\x05 \x01(\x0b\x32%.weaviate.v1.GenerativeSearch.Grouped\x1aY\n\x06Single\x12\x0e\n\x06prompt\x18\x01 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x02 \x01(\x08\x12\x30\n\x07queries\x18\x03 \x03(\x0b\x32\x1f.weaviate.v1.GenerativeProvider\x1a\x98\x01\n\x07Grouped\x12\x0c\n\x04task\x18\x01 \x01(\t\x12/\n\nproperties\x18\x02 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x00\x88\x01\x01\x12\x30\n\x07queries\x18\x03 \x03(\x0b\x32\x1f.weaviate.v1.GenerativeProvider\x12\r\n\x05\x64\x65\x62ug\x18\x04 \x01(\x08\x42\r\n\x0b_properties\"\xfd\x05\n\x12GenerativeProvider\x12\x17\n\x0freturn_metadata\x18\x01 \x01(\x08\x12\x35\n\tanthropic\x18\x02 \x01(\x0b\x32 .weaviate.v1.GenerativeAnthropicH\x00\x12\x33\n\x08\x61nyscale\x18\x03 \x01(\x0b\x32\x1f.weaviate.v1.GenerativeAnyscaleH\x00\x12)\n\x03\x61ws\x18\x04 \x01(\x0b\x32\x1a.weaviate.v1.GenerativeAWSH\x00\x12/\n\x06\x63ohere\x18\x05 \x01(\x0b\x32\x1d.weaviate.v1.GenerativeCohereH\x00\x12-\n\x05\x64ummy\x18\x06 \x01(\x0b\x32\x1c.weaviate.v1.GenerativeDummyH\x00\x12\x31\n\x07mistral\x18\x07 \x01(\x0b\x32\x1e.weaviate.v1.GenerativeMistralH\x00\x12/\n\x06ollama\x18\x08 \x01(\x0b\x32\x1d.weaviate.v1.GenerativeOllamaH\x00\x12/\n\x06openai\x18\t \x01(\x0b\x32\x1d.weaviate.v1.GenerativeOpenAIH\x00\x12/\n\x06google\x18\n \x01(\x0b\x32\x1d.weaviate.v1.GenerativeGoogleH\x00\x12\x37\n\ndatabricks\x18\x0b \x01(\x0b\x32!.weaviate.v1.GenerativeDatabricksH\x00\x12\x37\n\nfriendliai\x18\x0c \x01(\x0b\x32!.weaviate.v1.GenerativeFriendliAIH\x00\x12/\n\x06nvidia\x18\r \x01(\x0b\x32\x1d.weaviate.v1.GenerativeNvidiaH\x00\x12)\n\x03xai\x18\x0e \x01(\x0b\x32\x1a.weaviate.v1.GenerativeXAIH\x00\x12;\n\x0c\x63ontextualai\x18\x0f \x01(\x0b\x32#.weaviate.v1.GenerativeContextualAIH\x00\x42\x06\n\x04kind\"\xb1\x03\n\x13GenerativeAnthropic\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x12\n\x05top_k\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x12\n\x05top_p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x12+\n\x06images\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x35\n\x10image_properties\x18\t \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x42\x0b\n\t_base_urlB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_kB\x08\n\x06_top_pB\x11\n\x0f_stop_sequencesB\t\n\x07_imagesB\x13\n\x11_image_properties\"\x80\x01\n\x12GenerativeAnyscale\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperature\"\x8d\x04\n\rGenerativeAWS\x12\x12\n\x05model\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0btemperature\x18\x08 \x01(\x01H\x01\x88\x01\x01\x12\x14\n\x07service\x18\t \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06region\x18\n \x01(\tH\x03\x88\x01\x01\x12\x15\n\x08\x65ndpoint\x18\x0b \x01(\tH\x04\x88\x01\x01\x12\x19\n\x0ctarget_model\x18\x0c \x01(\tH\x05\x88\x01\x01\x12\x1b\n\x0etarget_variant\x18\r \x01(\tH\x06\x88\x01\x01\x12+\n\x06images\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0f \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x10 \x01(\x03H\t\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x11 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\n\x88\x01\x01\x42\x08\n\x06_modelB\x0e\n\x0c_temperatureB\n\n\x08_serviceB\t\n\x07_regionB\x0b\n\t_endpointB\x0f\n\r_target_modelB\x11\n\x0f_target_variantB\t\n\x07_imagesB\x13\n\x11_image_propertiesB\r\n\x0b_max_tokensB\x11\n\x0f_stop_sequences\"\x88\x04\n\x10GenerativeCohere\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1e\n\x11\x66requency_penalty\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x12\n\x05model\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x0e\n\x01k\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x0e\n\x01p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x18\n\x0btemperature\x18\t \x01(\x01H\x08\x88\x01\x01\x12+\n\x06images\x18\n \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\t\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0b \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\n\x88\x01\x01\x42\x0b\n\t_base_urlB\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x04\n\x02_kB\x04\n\x02_pB\x13\n\x11_presence_penaltyB\x11\n\x0f_stop_sequencesB\x0e\n\x0c_temperatureB\t\n\x07_imagesB\x13\n\x11_image_properties\"\x11\n\x0fGenerativeDummy\"\xc5\x01\n\x11GenerativeMistral\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x12\n\x05top_p\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x0b\n\t_base_urlB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_p\"\x8a\x02\n\x10GenerativeOllama\x12\x19\n\x0c\x61pi_endpoint\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12+\n\x06images\x18\x04 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x03\x88\x01\x01\x12\x35\n\x10image_properties\x18\x05 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x04\x88\x01\x01\x42\x0f\n\r_api_endpointB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xe3\x08\n\x10GenerativeOpenAI\x12\x1e\n\x11\x66requency_penalty\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0e\n\x01n\x18\x04 \x01(\x03H\x03\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12)\n\x04stop\x18\x06 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x05\x88\x01\x01\x12\x18\n\x0btemperature\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x12\n\x05top_p\x18\x08 \x01(\x01H\x07\x88\x01\x01\x12\x15\n\x08\x62\x61se_url\x18\t \x01(\tH\x08\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\n \x01(\tH\t\x88\x01\x01\x12\x1a\n\rresource_name\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x1a\n\rdeployment_id\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12\x15\n\x08is_azure\x18\r \x01(\x08H\x0c\x88\x01\x01\x12+\n\x06images\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\r\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0f \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x0e\x88\x01\x01\x12L\n\x10reasoning_effort\x18\x10 \x01(\x0e\x32-.weaviate.v1.GenerativeOpenAI.ReasoningEffortH\x0f\x88\x01\x01\x12?\n\tverbosity\x18\x11 \x01(\x0e\x32\'.weaviate.v1.GenerativeOpenAI.VerbosityH\x10\x88\x01\x01\"\xa3\x01\n\x0fReasoningEffort\x12 \n\x1cREASONING_EFFORT_UNSPECIFIED\x10\x00\x12\x1c\n\x18REASONING_EFFORT_MINIMAL\x10\x01\x12\x18\n\x14REASONING_EFFORT_LOW\x10\x02\x12\x1b\n\x17REASONING_EFFORT_MEDIUM\x10\x03\x12\x19\n\x15REASONING_EFFORT_HIGH\x10\x04\"c\n\tVerbosity\x12\x19\n\x15VERBOSITY_UNSPECIFIED\x10\x00\x12\x11\n\rVERBOSITY_LOW\x10\x01\x12\x14\n\x10VERBOSITY_MEDIUM\x10\x02\x12\x12\n\x0eVERBOSITY_HIGH\x10\x03\x42\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x04\n\x02_nB\x13\n\x11_presence_penaltyB\x07\n\x05_stopB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\x0b\n\t_base_urlB\x0e\n\x0c_api_versionB\x10\n\x0e_resource_nameB\x10\n\x0e_deployment_idB\x0b\n\t_is_azureB\t\n\x07_imagesB\x13\n\x11_image_propertiesB\x13\n\x11_reasoning_effortB\x0c\n\n_verbosity\"\x92\x05\n\x10GenerativeGoogle\x12\x1e\n\x11\x66requency_penalty\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x18\n\x0btemperature\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12\x12\n\x05top_k\x18\x06 \x01(\x03H\x05\x88\x01\x01\x12\x12\n\x05top_p\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x19\n\x0c\x61pi_endpoint\x18\t \x01(\tH\x08\x88\x01\x01\x12\x17\n\nproject_id\x18\n \x01(\tH\t\x88\x01\x01\x12\x18\n\x0b\x65ndpoint_id\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x13\n\x06region\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12+\n\x06images\x18\r \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x0c\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\r\x88\x01\x01\x42\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x13\n\x11_presence_penaltyB\x0e\n\x0c_temperatureB\x08\n\x06_top_kB\x08\n\x06_top_pB\x11\n\x0f_stop_sequencesB\x0f\n\r_api_endpointB\r\n\x0b_project_idB\x0e\n\x0c_endpoint_idB\t\n\x07_regionB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xd0\x03\n\x14GenerativeDatabricks\x12\x15\n\x08\x65ndpoint\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x1e\n\x11\x66requency_penalty\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x16\n\tlog_probs\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x1a\n\rtop_log_probs\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x06 \x01(\x03H\x05\x88\x01\x01\x12\x0e\n\x01n\x18\x07 \x01(\x03H\x06\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x08 \x01(\x01H\x07\x88\x01\x01\x12)\n\x04stop\x18\t \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x12\x18\n\x0btemperature\x18\n \x01(\x01H\t\x88\x01\x01\x12\x12\n\x05top_p\x18\x0b \x01(\x01H\n\x88\x01\x01\x42\x0b\n\t_endpointB\x08\n\x06_modelB\x14\n\x12_frequency_penaltyB\x0c\n\n_log_probsB\x10\n\x0e_top_log_probsB\r\n\x0b_max_tokensB\x04\n\x02_nB\x13\n\x11_presence_penaltyB\x07\n\x05_stopB\x0e\n\x0c_temperatureB\x08\n\x06_top_p\"\xde\x01\n\x14GenerativeFriendliAI\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x0e\n\x01n\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x12\n\x05top_p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\r\n\x0b_max_tokensB\x0e\n\x0c_temperatureB\x04\n\x02_nB\x08\n\x06_top_p\"\xc4\x01\n\x10GenerativeNvidia\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x12\n\x05top_p\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x05 \x01(\x03H\x04\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\r\n\x0b_max_tokens\"\xc5\x02\n\rGenerativeXAI\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x12\n\x05top_p\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12+\n\x06images\x18\x06 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x05\x88\x01\x01\x12\x35\n\x10image_properties\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\r\n\x0b_max_tokensB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xce\x02\n\x16GenerativeContextualAI\x12\x12\n\x05model\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0btemperature\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x12\n\x05top_p\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x1b\n\x0emax_new_tokens\x18\x04 \x01(\x03H\x03\x88\x01\x01\x12\x1a\n\rsystem_prompt\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1d\n\x10\x61void_commentary\x18\x06 \x01(\x08H\x05\x88\x01\x01\x12.\n\tknowledge\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x42\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\x11\n\x0f_max_new_tokensB\x10\n\x0e_system_promptB\x13\n\x11_avoid_commentaryB\x0c\n\n_knowledge\"\x92\x01\n\x1bGenerativeAnthropicMetadata\x12=\n\x05usage\x18\x01 \x01(\x0b\x32..weaviate.v1.GenerativeAnthropicMetadata.Usage\x1a\x34\n\x05Usage\x12\x14\n\x0cinput_tokens\x18\x01 \x01(\x03\x12\x15\n\routput_tokens\x18\x02 \x01(\x03\"\x1c\n\x1aGenerativeAnyscaleMetadata\"\x17\n\x15GenerativeAWSMetadata\"\x9c\x06\n\x18GenerativeCohereMetadata\x12J\n\x0b\x61pi_version\x18\x01 \x01(\x0b\x32\x30.weaviate.v1.GenerativeCohereMetadata.ApiVersionH\x00\x88\x01\x01\x12L\n\x0c\x62illed_units\x18\x02 \x01(\x0b\x32\x31.weaviate.v1.GenerativeCohereMetadata.BilledUnitsH\x01\x88\x01\x01\x12\x41\n\x06tokens\x18\x03 \x01(\x0b\x32,.weaviate.v1.GenerativeCohereMetadata.TokensH\x02\x88\x01\x01\x12-\n\x08warnings\x18\x04 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x03\x88\x01\x01\x1a\x8e\x01\n\nApiVersion\x12\x14\n\x07version\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1a\n\ris_deprecated\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\x1c\n\x0fis_experimental\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\n\n\x08_versionB\x10\n\x0e_is_deprecatedB\x12\n\x10_is_experimental\x1a\xc5\x01\n\x0b\x42illedUnits\x12\x19\n\x0cinput_tokens\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1a\n\routput_tokens\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x19\n\x0csearch_units\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x1c\n\x0f\x63lassifications\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x0f\n\r_input_tokensB\x10\n\x0e_output_tokensB\x0f\n\r_search_unitsB\x12\n\x10_classifications\x1a\x62\n\x06Tokens\x12\x19\n\x0cinput_tokens\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1a\n\routput_tokens\x18\x02 \x01(\x01H\x01\x88\x01\x01\x42\x0f\n\r_input_tokensB\x10\n\x0e_output_tokensB\x0e\n\x0c_api_versionB\x0f\n\r_billed_unitsB\t\n\x07_tokensB\x0b\n\t_warnings\"\x19\n\x17GenerativeDummyMetadata\"\x81\x02\n\x19GenerativeMistralMetadata\x12@\n\x05usage\x18\x01 \x01(\x0b\x32,.weaviate.v1.GenerativeMistralMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x1a\n\x18GenerativeOllamaMetadata\"\xff\x01\n\x18GenerativeOpenAIMetadata\x12?\n\x05usage\x18\x01 \x01(\x0b\x32+.weaviate.v1.GenerativeOpenAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xe8\x06\n\x18GenerativeGoogleMetadata\x12\x45\n\x08metadata\x18\x01 \x01(\x0b\x32..weaviate.v1.GenerativeGoogleMetadata.MetadataH\x00\x88\x01\x01\x12P\n\x0eusage_metadata\x18\x02 \x01(\x0b\x32\x33.weaviate.v1.GenerativeGoogleMetadata.UsageMetadataH\x01\x88\x01\x01\x1a~\n\nTokenCount\x12&\n\x19total_billable_characters\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x42\x1c\n\x1a_total_billable_charactersB\x0f\n\r_total_tokens\x1a\xe1\x01\n\rTokenMetadata\x12P\n\x11input_token_count\x18\x01 \x01(\x0b\x32\x30.weaviate.v1.GenerativeGoogleMetadata.TokenCountH\x00\x88\x01\x01\x12Q\n\x12output_token_count\x18\x02 \x01(\x0b\x32\x30.weaviate.v1.GenerativeGoogleMetadata.TokenCountH\x01\x88\x01\x01\x42\x14\n\x12_input_token_countB\x15\n\x13_output_token_count\x1ao\n\x08Metadata\x12P\n\x0etoken_metadata\x18\x01 \x01(\x0b\x32\x33.weaviate.v1.GenerativeGoogleMetadata.TokenMetadataH\x00\x88\x01\x01\x42\x11\n\x0f_token_metadata\x1a\xbd\x01\n\rUsageMetadata\x12\x1f\n\x12prompt_token_count\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12#\n\x16\x63\x61ndidates_token_count\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1e\n\x11total_token_count\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x15\n\x13_prompt_token_countB\x19\n\x17_candidates_token_countB\x14\n\x12_total_token_countB\x0b\n\t_metadataB\x11\n\x0f_usage_metadata\"\x87\x02\n\x1cGenerativeDatabricksMetadata\x12\x43\n\x05usage\x18\x01 \x01(\x0b\x32/.weaviate.v1.GenerativeDatabricksMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x87\x02\n\x1cGenerativeFriendliAIMetadata\x12\x43\n\x05usage\x18\x01 \x01(\x0b\x32/.weaviate.v1.GenerativeFriendliAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xff\x01\n\x18GenerativeNvidiaMetadata\x12?\n\x05usage\x18\x01 \x01(\x0b\x32+.weaviate.v1.GenerativeNvidiaMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xf9\x01\n\x15GenerativeXAIMetadata\x12<\n\x05usage\x18\x01 \x01(\x0b\x32(.weaviate.v1.GenerativeXAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x8f\x06\n\x12GenerativeMetadata\x12=\n\tanthropic\x18\x01 \x01(\x0b\x32(.weaviate.v1.GenerativeAnthropicMetadataH\x00\x12;\n\x08\x61nyscale\x18\x02 \x01(\x0b\x32\'.weaviate.v1.GenerativeAnyscaleMetadataH\x00\x12\x31\n\x03\x61ws\x18\x03 \x01(\x0b\x32\".weaviate.v1.GenerativeAWSMetadataH\x00\x12\x37\n\x06\x63ohere\x18\x04 \x01(\x0b\x32%.weaviate.v1.GenerativeCohereMetadataH\x00\x12\x35\n\x05\x64ummy\x18\x05 \x01(\x0b\x32$.weaviate.v1.GenerativeDummyMetadataH\x00\x12\x39\n\x07mistral\x18\x06 \x01(\x0b\x32&.weaviate.v1.GenerativeMistralMetadataH\x00\x12\x37\n\x06ollama\x18\x07 \x01(\x0b\x32%.weaviate.v1.GenerativeOllamaMetadataH\x00\x12\x37\n\x06openai\x18\x08 \x01(\x0b\x32%.weaviate.v1.GenerativeOpenAIMetadataH\x00\x12\x37\n\x06google\x18\t \x01(\x0b\x32%.weaviate.v1.GenerativeGoogleMetadataH\x00\x12?\n\ndatabricks\x18\n \x01(\x0b\x32).weaviate.v1.GenerativeDatabricksMetadataH\x00\x12?\n\nfriendliai\x18\x0b \x01(\x0b\x32).weaviate.v1.GenerativeFriendliAIMetadataH\x00\x12\x37\n\x06nvidia\x18\x0c \x01(\x0b\x32%.weaviate.v1.GenerativeNvidiaMetadataH\x00\x12\x31\n\x03xai\x18\r \x01(\x0b\x32\".weaviate.v1.GenerativeXAIMetadataH\x00\x42\x06\n\x04kind\"\xa2\x01\n\x0fGenerativeReply\x12\x0e\n\x06result\x18\x01 \x01(\t\x12\x30\n\x05\x64\x65\x62ug\x18\x02 \x01(\x0b\x32\x1c.weaviate.v1.GenerativeDebugH\x00\x88\x01\x01\x12\x36\n\x08metadata\x18\x03 \x01(\x0b\x32\x1f.weaviate.v1.GenerativeMetadataH\x01\x88\x01\x01\x42\x08\n\x06_debugB\x0b\n\t_metadata\"@\n\x10GenerativeResult\x12,\n\x06values\x18\x01 \x03(\x0b\x32\x1c.weaviate.v1.GenerativeReply\";\n\x0fGenerativeDebug\x12\x18\n\x0b\x66ull_prompt\x18\x01 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_full_promptBt\n#io.weaviate.client.grpc.protocol.v1B\x17WeaviateProtoGenerativeZ4github.com/weaviate/weaviate/grpc/generated;protocolb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -41,93 +41,93 @@ _globals['_GENERATIVEANYSCALE']._serialized_start=1736 _globals['_GENERATIVEANYSCALE']._serialized_end=1864 _globals['_GENERATIVEAWS']._serialized_start=1867 - _globals['_GENERATIVEAWS']._serialized_end=2320 - _globals['_GENERATIVECOHERE']._serialized_start=2323 - _globals['_GENERATIVECOHERE']._serialized_end=2843 - _globals['_GENERATIVEDUMMY']._serialized_start=2845 - _globals['_GENERATIVEDUMMY']._serialized_end=2862 - _globals['_GENERATIVEMISTRAL']._serialized_start=2865 - _globals['_GENERATIVEMISTRAL']._serialized_end=3062 - _globals['_GENERATIVEOLLAMA']._serialized_start=3065 - _globals['_GENERATIVEOLLAMA']._serialized_end=3331 - _globals['_GENERATIVEOPENAI']._serialized_start=3334 - _globals['_GENERATIVEOPENAI']._serialized_end=4457 - _globals['_GENERATIVEOPENAI_REASONINGEFFORT']._serialized_start=3939 - _globals['_GENERATIVEOPENAI_REASONINGEFFORT']._serialized_end=4102 - _globals['_GENERATIVEOPENAI_VERBOSITY']._serialized_start=4104 - _globals['_GENERATIVEOPENAI_VERBOSITY']._serialized_end=4203 - _globals['_GENERATIVEGOOGLE']._serialized_start=4460 - _globals['_GENERATIVEGOOGLE']._serialized_end=5118 - _globals['_GENERATIVEDATABRICKS']._serialized_start=5121 - _globals['_GENERATIVEDATABRICKS']._serialized_end=5585 - _globals['_GENERATIVEFRIENDLIAI']._serialized_start=5588 - _globals['_GENERATIVEFRIENDLIAI']._serialized_end=5810 - _globals['_GENERATIVENVIDIA']._serialized_start=5813 - _globals['_GENERATIVENVIDIA']._serialized_end=6009 - _globals['_GENERATIVEXAI']._serialized_start=6012 - _globals['_GENERATIVEXAI']._serialized_end=6337 - _globals['_GENERATIVECONTEXTUALAI']._serialized_start=6340 - _globals['_GENERATIVECONTEXTUALAI']._serialized_end=6674 - _globals['_GENERATIVEANTHROPICMETADATA']._serialized_start=6677 - _globals['_GENERATIVEANTHROPICMETADATA']._serialized_end=6823 - _globals['_GENERATIVEANTHROPICMETADATA_USAGE']._serialized_start=6771 - _globals['_GENERATIVEANTHROPICMETADATA_USAGE']._serialized_end=6823 - _globals['_GENERATIVEANYSCALEMETADATA']._serialized_start=6825 - _globals['_GENERATIVEANYSCALEMETADATA']._serialized_end=6853 - _globals['_GENERATIVEAWSMETADATA']._serialized_start=6855 - _globals['_GENERATIVEAWSMETADATA']._serialized_end=6878 - _globals['_GENERATIVECOHEREMETADATA']._serialized_start=6881 - _globals['_GENERATIVECOHEREMETADATA']._serialized_end=7677 - _globals['_GENERATIVECOHEREMETADATA_APIVERSION']._serialized_start=7178 - _globals['_GENERATIVECOHEREMETADATA_APIVERSION']._serialized_end=7320 - _globals['_GENERATIVECOHEREMETADATA_BILLEDUNITS']._serialized_start=7323 - _globals['_GENERATIVECOHEREMETADATA_BILLEDUNITS']._serialized_end=7520 - _globals['_GENERATIVECOHEREMETADATA_TOKENS']._serialized_start=7522 - _globals['_GENERATIVECOHEREMETADATA_TOKENS']._serialized_end=7620 - _globals['_GENERATIVEDUMMYMETADATA']._serialized_start=7679 - _globals['_GENERATIVEDUMMYMETADATA']._serialized_end=7704 - _globals['_GENERATIVEMISTRALMETADATA']._serialized_start=7707 - _globals['_GENERATIVEMISTRALMETADATA']._serialized_end=7964 - _globals['_GENERATIVEMISTRALMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEMISTRALMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEOLLAMAMETADATA']._serialized_start=7966 - _globals['_GENERATIVEOLLAMAMETADATA']._serialized_end=7992 - _globals['_GENERATIVEOPENAIMETADATA']._serialized_start=7995 - _globals['_GENERATIVEOPENAIMETADATA']._serialized_end=8250 - _globals['_GENERATIVEOPENAIMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEOPENAIMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEGOOGLEMETADATA']._serialized_start=8253 - _globals['_GENERATIVEGOOGLEMETADATA']._serialized_end=9125 - _globals['_GENERATIVEGOOGLEMETADATA_TOKENCOUNT']._serialized_start=8434 - _globals['_GENERATIVEGOOGLEMETADATA_TOKENCOUNT']._serialized_end=8560 - _globals['_GENERATIVEGOOGLEMETADATA_TOKENMETADATA']._serialized_start=8563 - _globals['_GENERATIVEGOOGLEMETADATA_TOKENMETADATA']._serialized_end=8788 - _globals['_GENERATIVEGOOGLEMETADATA_METADATA']._serialized_start=8790 - _globals['_GENERATIVEGOOGLEMETADATA_METADATA']._serialized_end=8901 - _globals['_GENERATIVEGOOGLEMETADATA_USAGEMETADATA']._serialized_start=8904 - _globals['_GENERATIVEGOOGLEMETADATA_USAGEMETADATA']._serialized_end=9093 - _globals['_GENERATIVEDATABRICKSMETADATA']._serialized_start=9128 - _globals['_GENERATIVEDATABRICKSMETADATA']._serialized_end=9391 - _globals['_GENERATIVEDATABRICKSMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEDATABRICKSMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEFRIENDLIAIMETADATA']._serialized_start=9394 - _globals['_GENERATIVEFRIENDLIAIMETADATA']._serialized_end=9657 - _globals['_GENERATIVEFRIENDLIAIMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEFRIENDLIAIMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVENVIDIAMETADATA']._serialized_start=9660 - _globals['_GENERATIVENVIDIAMETADATA']._serialized_end=9915 - _globals['_GENERATIVENVIDIAMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVENVIDIAMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEXAIMETADATA']._serialized_start=9918 - _globals['_GENERATIVEXAIMETADATA']._serialized_end=10167 - _globals['_GENERATIVEXAIMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEXAIMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEMETADATA']._serialized_start=10170 - _globals['_GENERATIVEMETADATA']._serialized_end=10953 - _globals['_GENERATIVEREPLY']._serialized_start=10956 - _globals['_GENERATIVEREPLY']._serialized_end=11118 - _globals['_GENERATIVERESULT']._serialized_start=11120 - _globals['_GENERATIVERESULT']._serialized_end=11184 - _globals['_GENERATIVEDEBUG']._serialized_start=11186 - _globals['_GENERATIVEDEBUG']._serialized_end=11245 + _globals['_GENERATIVEAWS']._serialized_end=2392 + _globals['_GENERATIVECOHERE']._serialized_start=2395 + _globals['_GENERATIVECOHERE']._serialized_end=2915 + _globals['_GENERATIVEDUMMY']._serialized_start=2917 + _globals['_GENERATIVEDUMMY']._serialized_end=2934 + _globals['_GENERATIVEMISTRAL']._serialized_start=2937 + _globals['_GENERATIVEMISTRAL']._serialized_end=3134 + _globals['_GENERATIVEOLLAMA']._serialized_start=3137 + _globals['_GENERATIVEOLLAMA']._serialized_end=3403 + _globals['_GENERATIVEOPENAI']._serialized_start=3406 + _globals['_GENERATIVEOPENAI']._serialized_end=4529 + _globals['_GENERATIVEOPENAI_REASONINGEFFORT']._serialized_start=4011 + _globals['_GENERATIVEOPENAI_REASONINGEFFORT']._serialized_end=4174 + _globals['_GENERATIVEOPENAI_VERBOSITY']._serialized_start=4176 + _globals['_GENERATIVEOPENAI_VERBOSITY']._serialized_end=4275 + _globals['_GENERATIVEGOOGLE']._serialized_start=4532 + _globals['_GENERATIVEGOOGLE']._serialized_end=5190 + _globals['_GENERATIVEDATABRICKS']._serialized_start=5193 + _globals['_GENERATIVEDATABRICKS']._serialized_end=5657 + _globals['_GENERATIVEFRIENDLIAI']._serialized_start=5660 + _globals['_GENERATIVEFRIENDLIAI']._serialized_end=5882 + _globals['_GENERATIVENVIDIA']._serialized_start=5885 + _globals['_GENERATIVENVIDIA']._serialized_end=6081 + _globals['_GENERATIVEXAI']._serialized_start=6084 + _globals['_GENERATIVEXAI']._serialized_end=6409 + _globals['_GENERATIVECONTEXTUALAI']._serialized_start=6412 + _globals['_GENERATIVECONTEXTUALAI']._serialized_end=6746 + _globals['_GENERATIVEANTHROPICMETADATA']._serialized_start=6749 + _globals['_GENERATIVEANTHROPICMETADATA']._serialized_end=6895 + _globals['_GENERATIVEANTHROPICMETADATA_USAGE']._serialized_start=6843 + _globals['_GENERATIVEANTHROPICMETADATA_USAGE']._serialized_end=6895 + _globals['_GENERATIVEANYSCALEMETADATA']._serialized_start=6897 + _globals['_GENERATIVEANYSCALEMETADATA']._serialized_end=6925 + _globals['_GENERATIVEAWSMETADATA']._serialized_start=6927 + _globals['_GENERATIVEAWSMETADATA']._serialized_end=6950 + _globals['_GENERATIVECOHEREMETADATA']._serialized_start=6953 + _globals['_GENERATIVECOHEREMETADATA']._serialized_end=7749 + _globals['_GENERATIVECOHEREMETADATA_APIVERSION']._serialized_start=7250 + _globals['_GENERATIVECOHEREMETADATA_APIVERSION']._serialized_end=7392 + _globals['_GENERATIVECOHEREMETADATA_BILLEDUNITS']._serialized_start=7395 + _globals['_GENERATIVECOHEREMETADATA_BILLEDUNITS']._serialized_end=7592 + _globals['_GENERATIVECOHEREMETADATA_TOKENS']._serialized_start=7594 + _globals['_GENERATIVECOHEREMETADATA_TOKENS']._serialized_end=7692 + _globals['_GENERATIVEDUMMYMETADATA']._serialized_start=7751 + _globals['_GENERATIVEDUMMYMETADATA']._serialized_end=7776 + _globals['_GENERATIVEMISTRALMETADATA']._serialized_start=7779 + _globals['_GENERATIVEMISTRALMETADATA']._serialized_end=8036 + _globals['_GENERATIVEMISTRALMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEMISTRALMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEOLLAMAMETADATA']._serialized_start=8038 + _globals['_GENERATIVEOLLAMAMETADATA']._serialized_end=8064 + _globals['_GENERATIVEOPENAIMETADATA']._serialized_start=8067 + _globals['_GENERATIVEOPENAIMETADATA']._serialized_end=8322 + _globals['_GENERATIVEOPENAIMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEOPENAIMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEGOOGLEMETADATA']._serialized_start=8325 + _globals['_GENERATIVEGOOGLEMETADATA']._serialized_end=9197 + _globals['_GENERATIVEGOOGLEMETADATA_TOKENCOUNT']._serialized_start=8506 + _globals['_GENERATIVEGOOGLEMETADATA_TOKENCOUNT']._serialized_end=8632 + _globals['_GENERATIVEGOOGLEMETADATA_TOKENMETADATA']._serialized_start=8635 + _globals['_GENERATIVEGOOGLEMETADATA_TOKENMETADATA']._serialized_end=8860 + _globals['_GENERATIVEGOOGLEMETADATA_METADATA']._serialized_start=8862 + _globals['_GENERATIVEGOOGLEMETADATA_METADATA']._serialized_end=8973 + _globals['_GENERATIVEGOOGLEMETADATA_USAGEMETADATA']._serialized_start=8976 + _globals['_GENERATIVEGOOGLEMETADATA_USAGEMETADATA']._serialized_end=9165 + _globals['_GENERATIVEDATABRICKSMETADATA']._serialized_start=9200 + _globals['_GENERATIVEDATABRICKSMETADATA']._serialized_end=9463 + _globals['_GENERATIVEDATABRICKSMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEDATABRICKSMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEFRIENDLIAIMETADATA']._serialized_start=9466 + _globals['_GENERATIVEFRIENDLIAIMETADATA']._serialized_end=9729 + _globals['_GENERATIVEFRIENDLIAIMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEFRIENDLIAIMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVENVIDIAMETADATA']._serialized_start=9732 + _globals['_GENERATIVENVIDIAMETADATA']._serialized_end=9987 + _globals['_GENERATIVENVIDIAMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVENVIDIAMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEXAIMETADATA']._serialized_start=9990 + _globals['_GENERATIVEXAIMETADATA']._serialized_end=10239 + _globals['_GENERATIVEXAIMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEXAIMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEMETADATA']._serialized_start=10242 + _globals['_GENERATIVEMETADATA']._serialized_end=11025 + _globals['_GENERATIVEREPLY']._serialized_start=11028 + _globals['_GENERATIVEREPLY']._serialized_end=11190 + _globals['_GENERATIVERESULT']._serialized_start=11192 + _globals['_GENERATIVERESULT']._serialized_end=11256 + _globals['_GENERATIVEDEBUG']._serialized_start=11258 + _globals['_GENERATIVEDEBUG']._serialized_end=11317 # @@protoc_insertion_point(module_scope) diff --git a/weaviate/proto/v1/v4216/v1/generative_pb2.pyi b/weaviate/proto/v1/v4216/v1/generative_pb2.pyi index 0aec8d653..0cbad0694 100644 --- a/weaviate/proto/v1/v4216/v1/generative_pb2.pyi +++ b/weaviate/proto/v1/v4216/v1/generative_pb2.pyi @@ -108,7 +108,7 @@ class GenerativeAnyscale(_message.Message): def __init__(self, base_url: _Optional[str] = ..., model: _Optional[str] = ..., temperature: _Optional[float] = ...) -> None: ... class GenerativeAWS(_message.Message): - __slots__ = ["model", "temperature", "service", "region", "endpoint", "target_model", "target_variant", "images", "image_properties", "max_tokens"] + __slots__ = ["model", "temperature", "service", "region", "endpoint", "target_model", "target_variant", "images", "image_properties", "max_tokens", "stop_sequences"] MODEL_FIELD_NUMBER: _ClassVar[int] TEMPERATURE_FIELD_NUMBER: _ClassVar[int] SERVICE_FIELD_NUMBER: _ClassVar[int] @@ -119,6 +119,7 @@ class GenerativeAWS(_message.Message): IMAGES_FIELD_NUMBER: _ClassVar[int] IMAGE_PROPERTIES_FIELD_NUMBER: _ClassVar[int] MAX_TOKENS_FIELD_NUMBER: _ClassVar[int] + STOP_SEQUENCES_FIELD_NUMBER: _ClassVar[int] model: str temperature: float service: str @@ -129,7 +130,8 @@ class GenerativeAWS(_message.Message): images: _base_pb2.TextArray image_properties: _base_pb2.TextArray max_tokens: int - def __init__(self, model: _Optional[str] = ..., temperature: _Optional[float] = ..., service: _Optional[str] = ..., region: _Optional[str] = ..., endpoint: _Optional[str] = ..., target_model: _Optional[str] = ..., target_variant: _Optional[str] = ..., images: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ..., image_properties: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ..., max_tokens: _Optional[int] = ...) -> None: ... + stop_sequences: _base_pb2.TextArray + def __init__(self, model: _Optional[str] = ..., temperature: _Optional[float] = ..., service: _Optional[str] = ..., region: _Optional[str] = ..., endpoint: _Optional[str] = ..., target_model: _Optional[str] = ..., target_variant: _Optional[str] = ..., images: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ..., image_properties: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ..., max_tokens: _Optional[int] = ..., stop_sequences: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ...) -> None: ... class GenerativeCohere(_message.Message): __slots__ = ["base_url", "frequency_penalty", "max_tokens", "model", "k", "p", "presence_penalty", "stop_sequences", "temperature", "images", "image_properties"] diff --git a/weaviate/proto/v1/v5261/v1/batch_pb2.py b/weaviate/proto/v1/v5261/v1/batch_pb2.py index 86b722431..de1c4807a 100644 --- a/weaviate/proto/v1/v5261/v1/batch_pb2.py +++ b/weaviate/proto/v1/v5261/v1/batch_pb2.py @@ -16,7 +16,7 @@ from weaviate.proto.v1.v5261.v1 import base_pb2 as v1_dot_base__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0ev1/batch.proto\x12\x0bweaviate.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a\rv1/base.proto\"\x95\x01\n\x13\x42\x61tchObjectsRequest\x12)\n\x07objects\x18\x01 \x03(\x0b\x32\x18.weaviate.v1.BatchObject\x12=\n\x11\x63onsistency_level\x18\x02 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\"\x9e\x01\n\x16\x42\x61tchReferencesRequest\x12/\n\nreferences\x18\x01 \x03(\x0b\x32\x1b.weaviate.v1.BatchReference\x12=\n\x11\x63onsistency_level\x18\x02 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\"\xa6\x04\n\x12\x42\x61tchStreamRequest\x12\x36\n\x05start\x18\x01 \x01(\x0b\x32%.weaviate.v1.BatchStreamRequest.StartH\x00\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.weaviate.v1.BatchStreamRequest.DataH\x00\x12\x34\n\x04stop\x18\x03 \x01(\x0b\x32$.weaviate.v1.BatchStreamRequest.StopH\x00\x1a\\\n\x05Start\x12=\n\x11\x63onsistency_level\x18\x01 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\x1a\x06\n\x04Stop\x1a\xfa\x01\n\x04\x44\x61ta\x12=\n\x07objects\x18\x01 \x01(\x0b\x32,.weaviate.v1.BatchStreamRequest.Data.Objects\x12\x43\n\nreferences\x18\x02 \x01(\x0b\x32/.weaviate.v1.BatchStreamRequest.Data.References\x1a\x33\n\x07Objects\x12(\n\x06values\x18\x01 \x03(\x0b\x32\x18.weaviate.v1.BatchObject\x1a\x39\n\nReferences\x12+\n\x06values\x18\x01 \x03(\x0b\x32\x1b.weaviate.v1.BatchReferenceB\t\n\x07message\"\x98\x05\n\x10\x42\x61tchStreamReply\x12\x38\n\x07results\x18\x01 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.ResultsH\x00\x12\x43\n\rshutting_down\x18\x02 \x01(\x0b\x32*.weaviate.v1.BatchStreamReply.ShuttingDownH\x00\x12:\n\x08shutdown\x18\x03 \x01(\x0b\x32&.weaviate.v1.BatchStreamReply.ShutdownH\x00\x12\x38\n\x07started\x18\x04 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.StartedH\x00\x12\x38\n\x07\x62\x61\x63koff\x18\x05 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.BackoffH\x00\x1a\t\n\x07Started\x1a\x0e\n\x0cShuttingDown\x1a\n\n\x08Shutdown\x1a\x1d\n\x07\x42\x61\x63koff\x12\x12\n\nbatch_size\x18\x01 \x01(\x05\x1a\x83\x02\n\x07Results\x12;\n\x06\x65rrors\x18\x01 \x03(\x0b\x32+.weaviate.v1.BatchStreamReply.Results.Error\x12@\n\tsuccesses\x18\x02 \x03(\x0b\x32-.weaviate.v1.BatchStreamReply.Results.Success\x1a\x42\n\x05\x45rror\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12\x10\n\x06\x62\x65\x61\x63on\x18\x03 \x01(\tH\x00\x42\x08\n\x06\x64\x65tail\x1a\x35\n\x07Success\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12\x10\n\x06\x62\x65\x61\x63on\x18\x03 \x01(\tH\x00\x42\x08\n\x06\x64\x65tailB\t\n\x07message\"\xde\x07\n\x0b\x42\x61tchObject\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x12\n\x06vector\x18\x02 \x03(\x02\x42\x02\x18\x01\x12\x37\n\nproperties\x18\x03 \x01(\x0b\x32#.weaviate.v1.BatchObject.Properties\x12\x12\n\ncollection\x18\x04 \x01(\t\x12\x0e\n\x06tenant\x18\x05 \x01(\t\x12\x14\n\x0cvector_bytes\x18\x06 \x01(\x0c\x12%\n\x07vectors\x18\x17 \x03(\x0b\x32\x14.weaviate.v1.Vectors\x1a\x84\x05\n\nProperties\x12\x33\n\x12non_ref_properties\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12N\n\x17single_target_ref_props\x18\x02 \x03(\x0b\x32-.weaviate.v1.BatchObject.SingleTargetRefProps\x12L\n\x16multi_target_ref_props\x18\x03 \x03(\x0b\x32,.weaviate.v1.BatchObject.MultiTargetRefProps\x12\x43\n\x17number_array_properties\x18\x04 \x03(\x0b\x32\".weaviate.v1.NumberArrayProperties\x12=\n\x14int_array_properties\x18\x05 \x03(\x0b\x32\x1f.weaviate.v1.IntArrayProperties\x12?\n\x15text_array_properties\x18\x06 \x03(\x0b\x32 .weaviate.v1.TextArrayProperties\x12\x45\n\x18\x62oolean_array_properties\x18\x07 \x03(\x0b\x32#.weaviate.v1.BooleanArrayProperties\x12\x38\n\x11object_properties\x18\x08 \x03(\x0b\x32\x1d.weaviate.v1.ObjectProperties\x12\x43\n\x17object_array_properties\x18\t \x03(\x0b\x32\".weaviate.v1.ObjectArrayProperties\x12\x18\n\x10\x65mpty_list_props\x18\n \x03(\t\x1a\x38\n\x14SingleTargetRefProps\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x11\n\tprop_name\x18\x02 \x01(\t\x1aR\n\x13MultiTargetRefProps\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x11\n\tprop_name\x18\x02 \x01(\t\x12\x19\n\x11target_collection\x18\x03 \x01(\t\"\x99\x01\n\x0e\x42\x61tchReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0f\x66rom_collection\x18\x02 \x01(\t\x12\x11\n\tfrom_uuid\x18\x03 \x01(\t\x12\x1a\n\rto_collection\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x07to_uuid\x18\x05 \x01(\t\x12\x0e\n\x06tenant\x18\x06 \x01(\tB\x10\n\x0e_to_collection\"\x88\x01\n\x11\x42\x61tchObjectsReply\x12\x0c\n\x04took\x18\x01 \x01(\x02\x12\x39\n\x06\x65rrors\x18\x02 \x03(\x0b\x32).weaviate.v1.BatchObjectsReply.BatchError\x1a*\n\nBatchError\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"\x8e\x01\n\x14\x42\x61tchReferencesReply\x12\x0c\n\x04took\x18\x01 \x01(\x02\x12<\n\x06\x65rrors\x18\x02 \x03(\x0b\x32,.weaviate.v1.BatchReferencesReply.BatchError\x1a*\n\nBatchError\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\tBo\n#io.weaviate.client.grpc.protocol.v1B\x12WeaviateProtoBatchZ4github.com/weaviate/weaviate/grpc/generated;protocolb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0ev1/batch.proto\x12\x0bweaviate.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a\rv1/base.proto\"\x95\x01\n\x13\x42\x61tchObjectsRequest\x12)\n\x07objects\x18\x01 \x03(\x0b\x32\x18.weaviate.v1.BatchObject\x12=\n\x11\x63onsistency_level\x18\x02 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\"\x9e\x01\n\x16\x42\x61tchReferencesRequest\x12/\n\nreferences\x18\x01 \x03(\x0b\x32\x1b.weaviate.v1.BatchReference\x12=\n\x11\x63onsistency_level\x18\x02 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\"\xa6\x04\n\x12\x42\x61tchStreamRequest\x12\x36\n\x05start\x18\x01 \x01(\x0b\x32%.weaviate.v1.BatchStreamRequest.StartH\x00\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.weaviate.v1.BatchStreamRequest.DataH\x00\x12\x34\n\x04stop\x18\x03 \x01(\x0b\x32$.weaviate.v1.BatchStreamRequest.StopH\x00\x1a\\\n\x05Start\x12=\n\x11\x63onsistency_level\x18\x01 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\x1a\x06\n\x04Stop\x1a\xfa\x01\n\x04\x44\x61ta\x12=\n\x07objects\x18\x01 \x01(\x0b\x32,.weaviate.v1.BatchStreamRequest.Data.Objects\x12\x43\n\nreferences\x18\x02 \x01(\x0b\x32/.weaviate.v1.BatchStreamRequest.Data.References\x1a\x33\n\x07Objects\x12(\n\x06values\x18\x01 \x03(\x0b\x32\x18.weaviate.v1.BatchObject\x1a\x39\n\nReferences\x12+\n\x06values\x18\x01 \x03(\x0b\x32\x1b.weaviate.v1.BatchReferenceB\t\n\x07message\"\xe7\x06\n\x10\x42\x61tchStreamReply\x12\x38\n\x07results\x18\x01 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.ResultsH\x00\x12\x43\n\rshutting_down\x18\x02 \x01(\x0b\x32*.weaviate.v1.BatchStreamReply.ShuttingDownH\x00\x12:\n\x08shutdown\x18\x03 \x01(\x0b\x32&.weaviate.v1.BatchStreamReply.ShutdownH\x00\x12\x38\n\x07started\x18\x04 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.StartedH\x00\x12\x38\n\x07\x62\x61\x63koff\x18\x05 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.BackoffH\x00\x12\x32\n\x04\x61\x63ks\x18\x06 \x01(\x0b\x32\".weaviate.v1.BatchStreamReply.AcksH\x00\x12\x42\n\rout_of_memory\x18\x07 \x01(\x0b\x32).weaviate.v1.BatchStreamReply.OutOfMemoryH\x00\x1a\t\n\x07Started\x1a\x0e\n\x0cShuttingDown\x1a\n\n\x08Shutdown\x1a-\n\x0bOutOfMemory\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x0f\n\x07\x62\x65\x61\x63ons\x18\x02 \x03(\t\x1a\x1d\n\x07\x42\x61\x63koff\x12\x12\n\nbatch_size\x18\x01 \x01(\x05\x1a&\n\x04\x41\x63ks\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x0f\n\x07\x62\x65\x61\x63ons\x18\x02 \x03(\t\x1a\x83\x02\n\x07Results\x12;\n\x06\x65rrors\x18\x01 \x03(\x0b\x32+.weaviate.v1.BatchStreamReply.Results.Error\x12@\n\tsuccesses\x18\x02 \x03(\x0b\x32-.weaviate.v1.BatchStreamReply.Results.Success\x1a\x42\n\x05\x45rror\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12\x10\n\x06\x62\x65\x61\x63on\x18\x03 \x01(\tH\x00\x42\x08\n\x06\x64\x65tail\x1a\x35\n\x07Success\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12\x10\n\x06\x62\x65\x61\x63on\x18\x03 \x01(\tH\x00\x42\x08\n\x06\x64\x65tailB\t\n\x07message\"\xde\x07\n\x0b\x42\x61tchObject\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x12\n\x06vector\x18\x02 \x03(\x02\x42\x02\x18\x01\x12\x37\n\nproperties\x18\x03 \x01(\x0b\x32#.weaviate.v1.BatchObject.Properties\x12\x12\n\ncollection\x18\x04 \x01(\t\x12\x0e\n\x06tenant\x18\x05 \x01(\t\x12\x14\n\x0cvector_bytes\x18\x06 \x01(\x0c\x12%\n\x07vectors\x18\x17 \x03(\x0b\x32\x14.weaviate.v1.Vectors\x1a\x84\x05\n\nProperties\x12\x33\n\x12non_ref_properties\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12N\n\x17single_target_ref_props\x18\x02 \x03(\x0b\x32-.weaviate.v1.BatchObject.SingleTargetRefProps\x12L\n\x16multi_target_ref_props\x18\x03 \x03(\x0b\x32,.weaviate.v1.BatchObject.MultiTargetRefProps\x12\x43\n\x17number_array_properties\x18\x04 \x03(\x0b\x32\".weaviate.v1.NumberArrayProperties\x12=\n\x14int_array_properties\x18\x05 \x03(\x0b\x32\x1f.weaviate.v1.IntArrayProperties\x12?\n\x15text_array_properties\x18\x06 \x03(\x0b\x32 .weaviate.v1.TextArrayProperties\x12\x45\n\x18\x62oolean_array_properties\x18\x07 \x03(\x0b\x32#.weaviate.v1.BooleanArrayProperties\x12\x38\n\x11object_properties\x18\x08 \x03(\x0b\x32\x1d.weaviate.v1.ObjectProperties\x12\x43\n\x17object_array_properties\x18\t \x03(\x0b\x32\".weaviate.v1.ObjectArrayProperties\x12\x18\n\x10\x65mpty_list_props\x18\n \x03(\t\x1a\x38\n\x14SingleTargetRefProps\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x11\n\tprop_name\x18\x02 \x01(\t\x1aR\n\x13MultiTargetRefProps\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x11\n\tprop_name\x18\x02 \x01(\t\x12\x19\n\x11target_collection\x18\x03 \x01(\t\"\x99\x01\n\x0e\x42\x61tchReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0f\x66rom_collection\x18\x02 \x01(\t\x12\x11\n\tfrom_uuid\x18\x03 \x01(\t\x12\x1a\n\rto_collection\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x07to_uuid\x18\x05 \x01(\t\x12\x0e\n\x06tenant\x18\x06 \x01(\tB\x10\n\x0e_to_collection\"\x88\x01\n\x11\x42\x61tchObjectsReply\x12\x0c\n\x04took\x18\x01 \x01(\x02\x12\x39\n\x06\x65rrors\x18\x02 \x03(\x0b\x32).weaviate.v1.BatchObjectsReply.BatchError\x1a*\n\nBatchError\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"\x8e\x01\n\x14\x42\x61tchReferencesReply\x12\x0c\n\x04took\x18\x01 \x01(\x02\x12<\n\x06\x65rrors\x18\x02 \x03(\x0b\x32,.weaviate.v1.BatchReferencesReply.BatchError\x1a*\n\nBatchError\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\tBo\n#io.weaviate.client.grpc.protocol.v1B\x12WeaviateProtoBatchZ4github.com/weaviate/weaviate/grpc/generated;protocolb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -43,37 +43,41 @@ _globals['_BATCHSTREAMREQUEST_DATA_REFERENCES']._serialized_start=872 _globals['_BATCHSTREAMREQUEST_DATA_REFERENCES']._serialized_end=929 _globals['_BATCHSTREAMREPLY']._serialized_start=943 - _globals['_BATCHSTREAMREPLY']._serialized_end=1607 - _globals['_BATCHSTREAMREPLY_STARTED']._serialized_start=1266 - _globals['_BATCHSTREAMREPLY_STARTED']._serialized_end=1275 - _globals['_BATCHSTREAMREPLY_SHUTTINGDOWN']._serialized_start=1277 - _globals['_BATCHSTREAMREPLY_SHUTTINGDOWN']._serialized_end=1291 - _globals['_BATCHSTREAMREPLY_SHUTDOWN']._serialized_start=1293 - _globals['_BATCHSTREAMREPLY_SHUTDOWN']._serialized_end=1303 - _globals['_BATCHSTREAMREPLY_BACKOFF']._serialized_start=1305 - _globals['_BATCHSTREAMREPLY_BACKOFF']._serialized_end=1334 - _globals['_BATCHSTREAMREPLY_RESULTS']._serialized_start=1337 - _globals['_BATCHSTREAMREPLY_RESULTS']._serialized_end=1596 - _globals['_BATCHSTREAMREPLY_RESULTS_ERROR']._serialized_start=1475 - _globals['_BATCHSTREAMREPLY_RESULTS_ERROR']._serialized_end=1541 - _globals['_BATCHSTREAMREPLY_RESULTS_SUCCESS']._serialized_start=1543 - _globals['_BATCHSTREAMREPLY_RESULTS_SUCCESS']._serialized_end=1596 - _globals['_BATCHOBJECT']._serialized_start=1610 - _globals['_BATCHOBJECT']._serialized_end=2600 - _globals['_BATCHOBJECT_PROPERTIES']._serialized_start=1814 - _globals['_BATCHOBJECT_PROPERTIES']._serialized_end=2458 - _globals['_BATCHOBJECT_SINGLETARGETREFPROPS']._serialized_start=2460 - _globals['_BATCHOBJECT_SINGLETARGETREFPROPS']._serialized_end=2516 - _globals['_BATCHOBJECT_MULTITARGETREFPROPS']._serialized_start=2518 - _globals['_BATCHOBJECT_MULTITARGETREFPROPS']._serialized_end=2600 - _globals['_BATCHREFERENCE']._serialized_start=2603 - _globals['_BATCHREFERENCE']._serialized_end=2756 - _globals['_BATCHOBJECTSREPLY']._serialized_start=2759 - _globals['_BATCHOBJECTSREPLY']._serialized_end=2895 - _globals['_BATCHOBJECTSREPLY_BATCHERROR']._serialized_start=2853 - _globals['_BATCHOBJECTSREPLY_BATCHERROR']._serialized_end=2895 - _globals['_BATCHREFERENCESREPLY']._serialized_start=2898 - _globals['_BATCHREFERENCESREPLY']._serialized_end=3040 - _globals['_BATCHREFERENCESREPLY_BATCHERROR']._serialized_start=2853 - _globals['_BATCHREFERENCESREPLY_BATCHERROR']._serialized_end=2895 + _globals['_BATCHSTREAMREPLY']._serialized_end=1814 + _globals['_BATCHSTREAMREPLY_STARTED']._serialized_start=1386 + _globals['_BATCHSTREAMREPLY_STARTED']._serialized_end=1395 + _globals['_BATCHSTREAMREPLY_SHUTTINGDOWN']._serialized_start=1397 + _globals['_BATCHSTREAMREPLY_SHUTTINGDOWN']._serialized_end=1411 + _globals['_BATCHSTREAMREPLY_SHUTDOWN']._serialized_start=1413 + _globals['_BATCHSTREAMREPLY_SHUTDOWN']._serialized_end=1423 + _globals['_BATCHSTREAMREPLY_OUTOFMEMORY']._serialized_start=1425 + _globals['_BATCHSTREAMREPLY_OUTOFMEMORY']._serialized_end=1470 + _globals['_BATCHSTREAMREPLY_BACKOFF']._serialized_start=1472 + _globals['_BATCHSTREAMREPLY_BACKOFF']._serialized_end=1501 + _globals['_BATCHSTREAMREPLY_ACKS']._serialized_start=1503 + _globals['_BATCHSTREAMREPLY_ACKS']._serialized_end=1541 + _globals['_BATCHSTREAMREPLY_RESULTS']._serialized_start=1544 + _globals['_BATCHSTREAMREPLY_RESULTS']._serialized_end=1803 + _globals['_BATCHSTREAMREPLY_RESULTS_ERROR']._serialized_start=1682 + _globals['_BATCHSTREAMREPLY_RESULTS_ERROR']._serialized_end=1748 + _globals['_BATCHSTREAMREPLY_RESULTS_SUCCESS']._serialized_start=1750 + _globals['_BATCHSTREAMREPLY_RESULTS_SUCCESS']._serialized_end=1803 + _globals['_BATCHOBJECT']._serialized_start=1817 + _globals['_BATCHOBJECT']._serialized_end=2807 + _globals['_BATCHOBJECT_PROPERTIES']._serialized_start=2021 + _globals['_BATCHOBJECT_PROPERTIES']._serialized_end=2665 + _globals['_BATCHOBJECT_SINGLETARGETREFPROPS']._serialized_start=2667 + _globals['_BATCHOBJECT_SINGLETARGETREFPROPS']._serialized_end=2723 + _globals['_BATCHOBJECT_MULTITARGETREFPROPS']._serialized_start=2725 + _globals['_BATCHOBJECT_MULTITARGETREFPROPS']._serialized_end=2807 + _globals['_BATCHREFERENCE']._serialized_start=2810 + _globals['_BATCHREFERENCE']._serialized_end=2963 + _globals['_BATCHOBJECTSREPLY']._serialized_start=2966 + _globals['_BATCHOBJECTSREPLY']._serialized_end=3102 + _globals['_BATCHOBJECTSREPLY_BATCHERROR']._serialized_start=3060 + _globals['_BATCHOBJECTSREPLY_BATCHERROR']._serialized_end=3102 + _globals['_BATCHREFERENCESREPLY']._serialized_start=3105 + _globals['_BATCHREFERENCESREPLY']._serialized_end=3247 + _globals['_BATCHREFERENCESREPLY_BATCHERROR']._serialized_start=3060 + _globals['_BATCHREFERENCESREPLY_BATCHERROR']._serialized_end=3102 # @@protoc_insertion_point(module_scope) diff --git a/weaviate/proto/v1/v5261/v1/batch_pb2.pyi b/weaviate/proto/v1/v5261/v1/batch_pb2.pyi index d975b0157..bf476fecf 100644 --- a/weaviate/proto/v1/v5261/v1/batch_pb2.pyi +++ b/weaviate/proto/v1/v5261/v1/batch_pb2.pyi @@ -59,7 +59,7 @@ class BatchStreamRequest(_message.Message): def __init__(self, start: _Optional[_Union[BatchStreamRequest.Start, _Mapping]] = ..., data: _Optional[_Union[BatchStreamRequest.Data, _Mapping]] = ..., stop: _Optional[_Union[BatchStreamRequest.Stop, _Mapping]] = ...) -> None: ... class BatchStreamReply(_message.Message): - __slots__ = ("results", "shutting_down", "shutdown", "started", "backoff") + __slots__ = ("results", "shutting_down", "shutdown", "started", "backoff", "acks", "out_of_memory") class Started(_message.Message): __slots__ = () def __init__(self) -> None: ... @@ -69,11 +69,25 @@ class BatchStreamReply(_message.Message): class Shutdown(_message.Message): __slots__ = () def __init__(self) -> None: ... + class OutOfMemory(_message.Message): + __slots__ = ("uuids", "beacons") + UUIDS_FIELD_NUMBER: _ClassVar[int] + BEACONS_FIELD_NUMBER: _ClassVar[int] + uuids: _containers.RepeatedScalarFieldContainer[str] + beacons: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, uuids: _Optional[_Iterable[str]] = ..., beacons: _Optional[_Iterable[str]] = ...) -> None: ... class Backoff(_message.Message): __slots__ = ("batch_size",) BATCH_SIZE_FIELD_NUMBER: _ClassVar[int] batch_size: int def __init__(self, batch_size: _Optional[int] = ...) -> None: ... + class Acks(_message.Message): + __slots__ = ("uuids", "beacons") + UUIDS_FIELD_NUMBER: _ClassVar[int] + BEACONS_FIELD_NUMBER: _ClassVar[int] + uuids: _containers.RepeatedScalarFieldContainer[str] + beacons: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, uuids: _Optional[_Iterable[str]] = ..., beacons: _Optional[_Iterable[str]] = ...) -> None: ... class Results(_message.Message): __slots__ = ("errors", "successes") class Error(_message.Message): @@ -102,12 +116,16 @@ class BatchStreamReply(_message.Message): SHUTDOWN_FIELD_NUMBER: _ClassVar[int] STARTED_FIELD_NUMBER: _ClassVar[int] BACKOFF_FIELD_NUMBER: _ClassVar[int] + ACKS_FIELD_NUMBER: _ClassVar[int] + OUT_OF_MEMORY_FIELD_NUMBER: _ClassVar[int] results: BatchStreamReply.Results shutting_down: BatchStreamReply.ShuttingDown shutdown: BatchStreamReply.Shutdown started: BatchStreamReply.Started backoff: BatchStreamReply.Backoff - def __init__(self, results: _Optional[_Union[BatchStreamReply.Results, _Mapping]] = ..., shutting_down: _Optional[_Union[BatchStreamReply.ShuttingDown, _Mapping]] = ..., shutdown: _Optional[_Union[BatchStreamReply.Shutdown, _Mapping]] = ..., started: _Optional[_Union[BatchStreamReply.Started, _Mapping]] = ..., backoff: _Optional[_Union[BatchStreamReply.Backoff, _Mapping]] = ...) -> None: ... + acks: BatchStreamReply.Acks + out_of_memory: BatchStreamReply.OutOfMemory + def __init__(self, results: _Optional[_Union[BatchStreamReply.Results, _Mapping]] = ..., shutting_down: _Optional[_Union[BatchStreamReply.ShuttingDown, _Mapping]] = ..., shutdown: _Optional[_Union[BatchStreamReply.Shutdown, _Mapping]] = ..., started: _Optional[_Union[BatchStreamReply.Started, _Mapping]] = ..., backoff: _Optional[_Union[BatchStreamReply.Backoff, _Mapping]] = ..., acks: _Optional[_Union[BatchStreamReply.Acks, _Mapping]] = ..., out_of_memory: _Optional[_Union[BatchStreamReply.OutOfMemory, _Mapping]] = ...) -> None: ... class BatchObject(_message.Message): __slots__ = ("uuid", "vector", "properties", "collection", "tenant", "vector_bytes", "vectors") diff --git a/weaviate/proto/v1/v5261/v1/generative_pb2.py b/weaviate/proto/v1/v5261/v1/generative_pb2.py index 1b84afcd7..cf4f84a1d 100644 --- a/weaviate/proto/v1/v5261/v1/generative_pb2.py +++ b/weaviate/proto/v1/v5261/v1/generative_pb2.py @@ -15,7 +15,7 @@ from weaviate.proto.v1.v5261.v1 import base_pb2 as v1_dot_base__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13v1/generative.proto\x12\x0bweaviate.v1\x1a\rv1/base.proto\"\xdd\x03\n\x10GenerativeSearch\x12\"\n\x16single_response_prompt\x18\x01 \x01(\tB\x02\x18\x01\x12!\n\x15grouped_response_task\x18\x02 \x01(\tB\x02\x18\x01\x12\x1e\n\x12grouped_properties\x18\x03 \x03(\tB\x02\x18\x01\x12\x34\n\x06single\x18\x04 \x01(\x0b\x32$.weaviate.v1.GenerativeSearch.Single\x12\x36\n\x07grouped\x18\x05 \x01(\x0b\x32%.weaviate.v1.GenerativeSearch.Grouped\x1aY\n\x06Single\x12\x0e\n\x06prompt\x18\x01 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x02 \x01(\x08\x12\x30\n\x07queries\x18\x03 \x03(\x0b\x32\x1f.weaviate.v1.GenerativeProvider\x1a\x98\x01\n\x07Grouped\x12\x0c\n\x04task\x18\x01 \x01(\t\x12/\n\nproperties\x18\x02 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x00\x88\x01\x01\x12\x30\n\x07queries\x18\x03 \x03(\x0b\x32\x1f.weaviate.v1.GenerativeProvider\x12\r\n\x05\x64\x65\x62ug\x18\x04 \x01(\x08\x42\r\n\x0b_properties\"\xfd\x05\n\x12GenerativeProvider\x12\x17\n\x0freturn_metadata\x18\x01 \x01(\x08\x12\x35\n\tanthropic\x18\x02 \x01(\x0b\x32 .weaviate.v1.GenerativeAnthropicH\x00\x12\x33\n\x08\x61nyscale\x18\x03 \x01(\x0b\x32\x1f.weaviate.v1.GenerativeAnyscaleH\x00\x12)\n\x03\x61ws\x18\x04 \x01(\x0b\x32\x1a.weaviate.v1.GenerativeAWSH\x00\x12/\n\x06\x63ohere\x18\x05 \x01(\x0b\x32\x1d.weaviate.v1.GenerativeCohereH\x00\x12-\n\x05\x64ummy\x18\x06 \x01(\x0b\x32\x1c.weaviate.v1.GenerativeDummyH\x00\x12\x31\n\x07mistral\x18\x07 \x01(\x0b\x32\x1e.weaviate.v1.GenerativeMistralH\x00\x12/\n\x06ollama\x18\x08 \x01(\x0b\x32\x1d.weaviate.v1.GenerativeOllamaH\x00\x12/\n\x06openai\x18\t \x01(\x0b\x32\x1d.weaviate.v1.GenerativeOpenAIH\x00\x12/\n\x06google\x18\n \x01(\x0b\x32\x1d.weaviate.v1.GenerativeGoogleH\x00\x12\x37\n\ndatabricks\x18\x0b \x01(\x0b\x32!.weaviate.v1.GenerativeDatabricksH\x00\x12\x37\n\nfriendliai\x18\x0c \x01(\x0b\x32!.weaviate.v1.GenerativeFriendliAIH\x00\x12/\n\x06nvidia\x18\r \x01(\x0b\x32\x1d.weaviate.v1.GenerativeNvidiaH\x00\x12)\n\x03xai\x18\x0e \x01(\x0b\x32\x1a.weaviate.v1.GenerativeXAIH\x00\x12;\n\x0c\x63ontextualai\x18\x0f \x01(\x0b\x32#.weaviate.v1.GenerativeContextualAIH\x00\x42\x06\n\x04kind\"\xb1\x03\n\x13GenerativeAnthropic\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x12\n\x05top_k\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x12\n\x05top_p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x12+\n\x06images\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x35\n\x10image_properties\x18\t \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x42\x0b\n\t_base_urlB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_kB\x08\n\x06_top_pB\x11\n\x0f_stop_sequencesB\t\n\x07_imagesB\x13\n\x11_image_properties\"\x80\x01\n\x12GenerativeAnyscale\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperature\"\xc5\x03\n\rGenerativeAWS\x12\x12\n\x05model\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0btemperature\x18\x08 \x01(\x01H\x01\x88\x01\x01\x12\x14\n\x07service\x18\t \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06region\x18\n \x01(\tH\x03\x88\x01\x01\x12\x15\n\x08\x65ndpoint\x18\x0b \x01(\tH\x04\x88\x01\x01\x12\x19\n\x0ctarget_model\x18\x0c \x01(\tH\x05\x88\x01\x01\x12\x1b\n\x0etarget_variant\x18\r \x01(\tH\x06\x88\x01\x01\x12+\n\x06images\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0f \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x10 \x01(\x03H\t\x88\x01\x01\x42\x08\n\x06_modelB\x0e\n\x0c_temperatureB\n\n\x08_serviceB\t\n\x07_regionB\x0b\n\t_endpointB\x0f\n\r_target_modelB\x11\n\x0f_target_variantB\t\n\x07_imagesB\x13\n\x11_image_propertiesB\r\n\x0b_max_tokens\"\x88\x04\n\x10GenerativeCohere\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1e\n\x11\x66requency_penalty\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x12\n\x05model\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x0e\n\x01k\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x0e\n\x01p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x18\n\x0btemperature\x18\t \x01(\x01H\x08\x88\x01\x01\x12+\n\x06images\x18\n \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\t\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0b \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\n\x88\x01\x01\x42\x0b\n\t_base_urlB\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x04\n\x02_kB\x04\n\x02_pB\x13\n\x11_presence_penaltyB\x11\n\x0f_stop_sequencesB\x0e\n\x0c_temperatureB\t\n\x07_imagesB\x13\n\x11_image_properties\"\x11\n\x0fGenerativeDummy\"\xc5\x01\n\x11GenerativeMistral\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x12\n\x05top_p\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x0b\n\t_base_urlB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_p\"\x8a\x02\n\x10GenerativeOllama\x12\x19\n\x0c\x61pi_endpoint\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12+\n\x06images\x18\x04 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x03\x88\x01\x01\x12\x35\n\x10image_properties\x18\x05 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x04\x88\x01\x01\x42\x0f\n\r_api_endpointB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xe3\x08\n\x10GenerativeOpenAI\x12\x1e\n\x11\x66requency_penalty\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0e\n\x01n\x18\x04 \x01(\x03H\x03\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12)\n\x04stop\x18\x06 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x05\x88\x01\x01\x12\x18\n\x0btemperature\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x12\n\x05top_p\x18\x08 \x01(\x01H\x07\x88\x01\x01\x12\x15\n\x08\x62\x61se_url\x18\t \x01(\tH\x08\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\n \x01(\tH\t\x88\x01\x01\x12\x1a\n\rresource_name\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x1a\n\rdeployment_id\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12\x15\n\x08is_azure\x18\r \x01(\x08H\x0c\x88\x01\x01\x12+\n\x06images\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\r\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0f \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x0e\x88\x01\x01\x12L\n\x10reasoning_effort\x18\x10 \x01(\x0e\x32-.weaviate.v1.GenerativeOpenAI.ReasoningEffortH\x0f\x88\x01\x01\x12?\n\tverbosity\x18\x11 \x01(\x0e\x32\'.weaviate.v1.GenerativeOpenAI.VerbosityH\x10\x88\x01\x01\"\xa3\x01\n\x0fReasoningEffort\x12 \n\x1cREASONING_EFFORT_UNSPECIFIED\x10\x00\x12\x1c\n\x18REASONING_EFFORT_MINIMAL\x10\x01\x12\x18\n\x14REASONING_EFFORT_LOW\x10\x02\x12\x1b\n\x17REASONING_EFFORT_MEDIUM\x10\x03\x12\x19\n\x15REASONING_EFFORT_HIGH\x10\x04\"c\n\tVerbosity\x12\x19\n\x15VERBOSITY_UNSPECIFIED\x10\x00\x12\x11\n\rVERBOSITY_LOW\x10\x01\x12\x14\n\x10VERBOSITY_MEDIUM\x10\x02\x12\x12\n\x0eVERBOSITY_HIGH\x10\x03\x42\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x04\n\x02_nB\x13\n\x11_presence_penaltyB\x07\n\x05_stopB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\x0b\n\t_base_urlB\x0e\n\x0c_api_versionB\x10\n\x0e_resource_nameB\x10\n\x0e_deployment_idB\x0b\n\t_is_azureB\t\n\x07_imagesB\x13\n\x11_image_propertiesB\x13\n\x11_reasoning_effortB\x0c\n\n_verbosity\"\x92\x05\n\x10GenerativeGoogle\x12\x1e\n\x11\x66requency_penalty\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x18\n\x0btemperature\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12\x12\n\x05top_k\x18\x06 \x01(\x03H\x05\x88\x01\x01\x12\x12\n\x05top_p\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x19\n\x0c\x61pi_endpoint\x18\t \x01(\tH\x08\x88\x01\x01\x12\x17\n\nproject_id\x18\n \x01(\tH\t\x88\x01\x01\x12\x18\n\x0b\x65ndpoint_id\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x13\n\x06region\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12+\n\x06images\x18\r \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x0c\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\r\x88\x01\x01\x42\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x13\n\x11_presence_penaltyB\x0e\n\x0c_temperatureB\x08\n\x06_top_kB\x08\n\x06_top_pB\x11\n\x0f_stop_sequencesB\x0f\n\r_api_endpointB\r\n\x0b_project_idB\x0e\n\x0c_endpoint_idB\t\n\x07_regionB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xd0\x03\n\x14GenerativeDatabricks\x12\x15\n\x08\x65ndpoint\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x1e\n\x11\x66requency_penalty\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x16\n\tlog_probs\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x1a\n\rtop_log_probs\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x06 \x01(\x03H\x05\x88\x01\x01\x12\x0e\n\x01n\x18\x07 \x01(\x03H\x06\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x08 \x01(\x01H\x07\x88\x01\x01\x12)\n\x04stop\x18\t \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x12\x18\n\x0btemperature\x18\n \x01(\x01H\t\x88\x01\x01\x12\x12\n\x05top_p\x18\x0b \x01(\x01H\n\x88\x01\x01\x42\x0b\n\t_endpointB\x08\n\x06_modelB\x14\n\x12_frequency_penaltyB\x0c\n\n_log_probsB\x10\n\x0e_top_log_probsB\r\n\x0b_max_tokensB\x04\n\x02_nB\x13\n\x11_presence_penaltyB\x07\n\x05_stopB\x0e\n\x0c_temperatureB\x08\n\x06_top_p\"\xde\x01\n\x14GenerativeFriendliAI\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x0e\n\x01n\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x12\n\x05top_p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\r\n\x0b_max_tokensB\x0e\n\x0c_temperatureB\x04\n\x02_nB\x08\n\x06_top_p\"\xc4\x01\n\x10GenerativeNvidia\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x12\n\x05top_p\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x05 \x01(\x03H\x04\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\r\n\x0b_max_tokens\"\xc5\x02\n\rGenerativeXAI\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x12\n\x05top_p\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12+\n\x06images\x18\x06 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x05\x88\x01\x01\x12\x35\n\x10image_properties\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\r\n\x0b_max_tokensB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xce\x02\n\x16GenerativeContextualAI\x12\x12\n\x05model\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0btemperature\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x12\n\x05top_p\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x1b\n\x0emax_new_tokens\x18\x04 \x01(\x03H\x03\x88\x01\x01\x12\x1a\n\rsystem_prompt\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1d\n\x10\x61void_commentary\x18\x06 \x01(\x08H\x05\x88\x01\x01\x12.\n\tknowledge\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x42\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\x11\n\x0f_max_new_tokensB\x10\n\x0e_system_promptB\x13\n\x11_avoid_commentaryB\x0c\n\n_knowledge\"\x92\x01\n\x1bGenerativeAnthropicMetadata\x12=\n\x05usage\x18\x01 \x01(\x0b\x32..weaviate.v1.GenerativeAnthropicMetadata.Usage\x1a\x34\n\x05Usage\x12\x14\n\x0cinput_tokens\x18\x01 \x01(\x03\x12\x15\n\routput_tokens\x18\x02 \x01(\x03\"\x1c\n\x1aGenerativeAnyscaleMetadata\"\x17\n\x15GenerativeAWSMetadata\"\x9c\x06\n\x18GenerativeCohereMetadata\x12J\n\x0b\x61pi_version\x18\x01 \x01(\x0b\x32\x30.weaviate.v1.GenerativeCohereMetadata.ApiVersionH\x00\x88\x01\x01\x12L\n\x0c\x62illed_units\x18\x02 \x01(\x0b\x32\x31.weaviate.v1.GenerativeCohereMetadata.BilledUnitsH\x01\x88\x01\x01\x12\x41\n\x06tokens\x18\x03 \x01(\x0b\x32,.weaviate.v1.GenerativeCohereMetadata.TokensH\x02\x88\x01\x01\x12-\n\x08warnings\x18\x04 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x03\x88\x01\x01\x1a\x8e\x01\n\nApiVersion\x12\x14\n\x07version\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1a\n\ris_deprecated\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\x1c\n\x0fis_experimental\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\n\n\x08_versionB\x10\n\x0e_is_deprecatedB\x12\n\x10_is_experimental\x1a\xc5\x01\n\x0b\x42illedUnits\x12\x19\n\x0cinput_tokens\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1a\n\routput_tokens\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x19\n\x0csearch_units\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x1c\n\x0f\x63lassifications\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x0f\n\r_input_tokensB\x10\n\x0e_output_tokensB\x0f\n\r_search_unitsB\x12\n\x10_classifications\x1a\x62\n\x06Tokens\x12\x19\n\x0cinput_tokens\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1a\n\routput_tokens\x18\x02 \x01(\x01H\x01\x88\x01\x01\x42\x0f\n\r_input_tokensB\x10\n\x0e_output_tokensB\x0e\n\x0c_api_versionB\x0f\n\r_billed_unitsB\t\n\x07_tokensB\x0b\n\t_warnings\"\x19\n\x17GenerativeDummyMetadata\"\x81\x02\n\x19GenerativeMistralMetadata\x12@\n\x05usage\x18\x01 \x01(\x0b\x32,.weaviate.v1.GenerativeMistralMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x1a\n\x18GenerativeOllamaMetadata\"\xff\x01\n\x18GenerativeOpenAIMetadata\x12?\n\x05usage\x18\x01 \x01(\x0b\x32+.weaviate.v1.GenerativeOpenAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xe8\x06\n\x18GenerativeGoogleMetadata\x12\x45\n\x08metadata\x18\x01 \x01(\x0b\x32..weaviate.v1.GenerativeGoogleMetadata.MetadataH\x00\x88\x01\x01\x12P\n\x0eusage_metadata\x18\x02 \x01(\x0b\x32\x33.weaviate.v1.GenerativeGoogleMetadata.UsageMetadataH\x01\x88\x01\x01\x1a~\n\nTokenCount\x12&\n\x19total_billable_characters\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x42\x1c\n\x1a_total_billable_charactersB\x0f\n\r_total_tokens\x1a\xe1\x01\n\rTokenMetadata\x12P\n\x11input_token_count\x18\x01 \x01(\x0b\x32\x30.weaviate.v1.GenerativeGoogleMetadata.TokenCountH\x00\x88\x01\x01\x12Q\n\x12output_token_count\x18\x02 \x01(\x0b\x32\x30.weaviate.v1.GenerativeGoogleMetadata.TokenCountH\x01\x88\x01\x01\x42\x14\n\x12_input_token_countB\x15\n\x13_output_token_count\x1ao\n\x08Metadata\x12P\n\x0etoken_metadata\x18\x01 \x01(\x0b\x32\x33.weaviate.v1.GenerativeGoogleMetadata.TokenMetadataH\x00\x88\x01\x01\x42\x11\n\x0f_token_metadata\x1a\xbd\x01\n\rUsageMetadata\x12\x1f\n\x12prompt_token_count\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12#\n\x16\x63\x61ndidates_token_count\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1e\n\x11total_token_count\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x15\n\x13_prompt_token_countB\x19\n\x17_candidates_token_countB\x14\n\x12_total_token_countB\x0b\n\t_metadataB\x11\n\x0f_usage_metadata\"\x87\x02\n\x1cGenerativeDatabricksMetadata\x12\x43\n\x05usage\x18\x01 \x01(\x0b\x32/.weaviate.v1.GenerativeDatabricksMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x87\x02\n\x1cGenerativeFriendliAIMetadata\x12\x43\n\x05usage\x18\x01 \x01(\x0b\x32/.weaviate.v1.GenerativeFriendliAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xff\x01\n\x18GenerativeNvidiaMetadata\x12?\n\x05usage\x18\x01 \x01(\x0b\x32+.weaviate.v1.GenerativeNvidiaMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xf9\x01\n\x15GenerativeXAIMetadata\x12<\n\x05usage\x18\x01 \x01(\x0b\x32(.weaviate.v1.GenerativeXAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x8f\x06\n\x12GenerativeMetadata\x12=\n\tanthropic\x18\x01 \x01(\x0b\x32(.weaviate.v1.GenerativeAnthropicMetadataH\x00\x12;\n\x08\x61nyscale\x18\x02 \x01(\x0b\x32\'.weaviate.v1.GenerativeAnyscaleMetadataH\x00\x12\x31\n\x03\x61ws\x18\x03 \x01(\x0b\x32\".weaviate.v1.GenerativeAWSMetadataH\x00\x12\x37\n\x06\x63ohere\x18\x04 \x01(\x0b\x32%.weaviate.v1.GenerativeCohereMetadataH\x00\x12\x35\n\x05\x64ummy\x18\x05 \x01(\x0b\x32$.weaviate.v1.GenerativeDummyMetadataH\x00\x12\x39\n\x07mistral\x18\x06 \x01(\x0b\x32&.weaviate.v1.GenerativeMistralMetadataH\x00\x12\x37\n\x06ollama\x18\x07 \x01(\x0b\x32%.weaviate.v1.GenerativeOllamaMetadataH\x00\x12\x37\n\x06openai\x18\x08 \x01(\x0b\x32%.weaviate.v1.GenerativeOpenAIMetadataH\x00\x12\x37\n\x06google\x18\t \x01(\x0b\x32%.weaviate.v1.GenerativeGoogleMetadataH\x00\x12?\n\ndatabricks\x18\n \x01(\x0b\x32).weaviate.v1.GenerativeDatabricksMetadataH\x00\x12?\n\nfriendliai\x18\x0b \x01(\x0b\x32).weaviate.v1.GenerativeFriendliAIMetadataH\x00\x12\x37\n\x06nvidia\x18\x0c \x01(\x0b\x32%.weaviate.v1.GenerativeNvidiaMetadataH\x00\x12\x31\n\x03xai\x18\r \x01(\x0b\x32\".weaviate.v1.GenerativeXAIMetadataH\x00\x42\x06\n\x04kind\"\xa2\x01\n\x0fGenerativeReply\x12\x0e\n\x06result\x18\x01 \x01(\t\x12\x30\n\x05\x64\x65\x62ug\x18\x02 \x01(\x0b\x32\x1c.weaviate.v1.GenerativeDebugH\x00\x88\x01\x01\x12\x36\n\x08metadata\x18\x03 \x01(\x0b\x32\x1f.weaviate.v1.GenerativeMetadataH\x01\x88\x01\x01\x42\x08\n\x06_debugB\x0b\n\t_metadata\"@\n\x10GenerativeResult\x12,\n\x06values\x18\x01 \x03(\x0b\x32\x1c.weaviate.v1.GenerativeReply\";\n\x0fGenerativeDebug\x12\x18\n\x0b\x66ull_prompt\x18\x01 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_full_promptBt\n#io.weaviate.client.grpc.protocol.v1B\x17WeaviateProtoGenerativeZ4github.com/weaviate/weaviate/grpc/generated;protocolb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13v1/generative.proto\x12\x0bweaviate.v1\x1a\rv1/base.proto\"\xdd\x03\n\x10GenerativeSearch\x12\"\n\x16single_response_prompt\x18\x01 \x01(\tB\x02\x18\x01\x12!\n\x15grouped_response_task\x18\x02 \x01(\tB\x02\x18\x01\x12\x1e\n\x12grouped_properties\x18\x03 \x03(\tB\x02\x18\x01\x12\x34\n\x06single\x18\x04 \x01(\x0b\x32$.weaviate.v1.GenerativeSearch.Single\x12\x36\n\x07grouped\x18\x05 \x01(\x0b\x32%.weaviate.v1.GenerativeSearch.Grouped\x1aY\n\x06Single\x12\x0e\n\x06prompt\x18\x01 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x02 \x01(\x08\x12\x30\n\x07queries\x18\x03 \x03(\x0b\x32\x1f.weaviate.v1.GenerativeProvider\x1a\x98\x01\n\x07Grouped\x12\x0c\n\x04task\x18\x01 \x01(\t\x12/\n\nproperties\x18\x02 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x00\x88\x01\x01\x12\x30\n\x07queries\x18\x03 \x03(\x0b\x32\x1f.weaviate.v1.GenerativeProvider\x12\r\n\x05\x64\x65\x62ug\x18\x04 \x01(\x08\x42\r\n\x0b_properties\"\xfd\x05\n\x12GenerativeProvider\x12\x17\n\x0freturn_metadata\x18\x01 \x01(\x08\x12\x35\n\tanthropic\x18\x02 \x01(\x0b\x32 .weaviate.v1.GenerativeAnthropicH\x00\x12\x33\n\x08\x61nyscale\x18\x03 \x01(\x0b\x32\x1f.weaviate.v1.GenerativeAnyscaleH\x00\x12)\n\x03\x61ws\x18\x04 \x01(\x0b\x32\x1a.weaviate.v1.GenerativeAWSH\x00\x12/\n\x06\x63ohere\x18\x05 \x01(\x0b\x32\x1d.weaviate.v1.GenerativeCohereH\x00\x12-\n\x05\x64ummy\x18\x06 \x01(\x0b\x32\x1c.weaviate.v1.GenerativeDummyH\x00\x12\x31\n\x07mistral\x18\x07 \x01(\x0b\x32\x1e.weaviate.v1.GenerativeMistralH\x00\x12/\n\x06ollama\x18\x08 \x01(\x0b\x32\x1d.weaviate.v1.GenerativeOllamaH\x00\x12/\n\x06openai\x18\t \x01(\x0b\x32\x1d.weaviate.v1.GenerativeOpenAIH\x00\x12/\n\x06google\x18\n \x01(\x0b\x32\x1d.weaviate.v1.GenerativeGoogleH\x00\x12\x37\n\ndatabricks\x18\x0b \x01(\x0b\x32!.weaviate.v1.GenerativeDatabricksH\x00\x12\x37\n\nfriendliai\x18\x0c \x01(\x0b\x32!.weaviate.v1.GenerativeFriendliAIH\x00\x12/\n\x06nvidia\x18\r \x01(\x0b\x32\x1d.weaviate.v1.GenerativeNvidiaH\x00\x12)\n\x03xai\x18\x0e \x01(\x0b\x32\x1a.weaviate.v1.GenerativeXAIH\x00\x12;\n\x0c\x63ontextualai\x18\x0f \x01(\x0b\x32#.weaviate.v1.GenerativeContextualAIH\x00\x42\x06\n\x04kind\"\xb1\x03\n\x13GenerativeAnthropic\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x12\n\x05top_k\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x12\n\x05top_p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x12+\n\x06images\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x35\n\x10image_properties\x18\t \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x42\x0b\n\t_base_urlB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_kB\x08\n\x06_top_pB\x11\n\x0f_stop_sequencesB\t\n\x07_imagesB\x13\n\x11_image_properties\"\x80\x01\n\x12GenerativeAnyscale\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperature\"\x8d\x04\n\rGenerativeAWS\x12\x12\n\x05model\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0btemperature\x18\x08 \x01(\x01H\x01\x88\x01\x01\x12\x14\n\x07service\x18\t \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06region\x18\n \x01(\tH\x03\x88\x01\x01\x12\x15\n\x08\x65ndpoint\x18\x0b \x01(\tH\x04\x88\x01\x01\x12\x19\n\x0ctarget_model\x18\x0c \x01(\tH\x05\x88\x01\x01\x12\x1b\n\x0etarget_variant\x18\r \x01(\tH\x06\x88\x01\x01\x12+\n\x06images\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0f \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x10 \x01(\x03H\t\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x11 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\n\x88\x01\x01\x42\x08\n\x06_modelB\x0e\n\x0c_temperatureB\n\n\x08_serviceB\t\n\x07_regionB\x0b\n\t_endpointB\x0f\n\r_target_modelB\x11\n\x0f_target_variantB\t\n\x07_imagesB\x13\n\x11_image_propertiesB\r\n\x0b_max_tokensB\x11\n\x0f_stop_sequences\"\x88\x04\n\x10GenerativeCohere\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1e\n\x11\x66requency_penalty\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x12\n\x05model\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x0e\n\x01k\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x0e\n\x01p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x18\n\x0btemperature\x18\t \x01(\x01H\x08\x88\x01\x01\x12+\n\x06images\x18\n \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\t\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0b \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\n\x88\x01\x01\x42\x0b\n\t_base_urlB\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x04\n\x02_kB\x04\n\x02_pB\x13\n\x11_presence_penaltyB\x11\n\x0f_stop_sequencesB\x0e\n\x0c_temperatureB\t\n\x07_imagesB\x13\n\x11_image_properties\"\x11\n\x0fGenerativeDummy\"\xc5\x01\n\x11GenerativeMistral\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x12\n\x05top_p\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x0b\n\t_base_urlB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_p\"\x8a\x02\n\x10GenerativeOllama\x12\x19\n\x0c\x61pi_endpoint\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12+\n\x06images\x18\x04 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x03\x88\x01\x01\x12\x35\n\x10image_properties\x18\x05 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x04\x88\x01\x01\x42\x0f\n\r_api_endpointB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xe3\x08\n\x10GenerativeOpenAI\x12\x1e\n\x11\x66requency_penalty\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0e\n\x01n\x18\x04 \x01(\x03H\x03\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12)\n\x04stop\x18\x06 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x05\x88\x01\x01\x12\x18\n\x0btemperature\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x12\n\x05top_p\x18\x08 \x01(\x01H\x07\x88\x01\x01\x12\x15\n\x08\x62\x61se_url\x18\t \x01(\tH\x08\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\n \x01(\tH\t\x88\x01\x01\x12\x1a\n\rresource_name\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x1a\n\rdeployment_id\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12\x15\n\x08is_azure\x18\r \x01(\x08H\x0c\x88\x01\x01\x12+\n\x06images\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\r\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0f \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x0e\x88\x01\x01\x12L\n\x10reasoning_effort\x18\x10 \x01(\x0e\x32-.weaviate.v1.GenerativeOpenAI.ReasoningEffortH\x0f\x88\x01\x01\x12?\n\tverbosity\x18\x11 \x01(\x0e\x32\'.weaviate.v1.GenerativeOpenAI.VerbosityH\x10\x88\x01\x01\"\xa3\x01\n\x0fReasoningEffort\x12 \n\x1cREASONING_EFFORT_UNSPECIFIED\x10\x00\x12\x1c\n\x18REASONING_EFFORT_MINIMAL\x10\x01\x12\x18\n\x14REASONING_EFFORT_LOW\x10\x02\x12\x1b\n\x17REASONING_EFFORT_MEDIUM\x10\x03\x12\x19\n\x15REASONING_EFFORT_HIGH\x10\x04\"c\n\tVerbosity\x12\x19\n\x15VERBOSITY_UNSPECIFIED\x10\x00\x12\x11\n\rVERBOSITY_LOW\x10\x01\x12\x14\n\x10VERBOSITY_MEDIUM\x10\x02\x12\x12\n\x0eVERBOSITY_HIGH\x10\x03\x42\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x04\n\x02_nB\x13\n\x11_presence_penaltyB\x07\n\x05_stopB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\x0b\n\t_base_urlB\x0e\n\x0c_api_versionB\x10\n\x0e_resource_nameB\x10\n\x0e_deployment_idB\x0b\n\t_is_azureB\t\n\x07_imagesB\x13\n\x11_image_propertiesB\x13\n\x11_reasoning_effortB\x0c\n\n_verbosity\"\x92\x05\n\x10GenerativeGoogle\x12\x1e\n\x11\x66requency_penalty\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x18\n\x0btemperature\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12\x12\n\x05top_k\x18\x06 \x01(\x03H\x05\x88\x01\x01\x12\x12\n\x05top_p\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x19\n\x0c\x61pi_endpoint\x18\t \x01(\tH\x08\x88\x01\x01\x12\x17\n\nproject_id\x18\n \x01(\tH\t\x88\x01\x01\x12\x18\n\x0b\x65ndpoint_id\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x13\n\x06region\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12+\n\x06images\x18\r \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x0c\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\r\x88\x01\x01\x42\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x13\n\x11_presence_penaltyB\x0e\n\x0c_temperatureB\x08\n\x06_top_kB\x08\n\x06_top_pB\x11\n\x0f_stop_sequencesB\x0f\n\r_api_endpointB\r\n\x0b_project_idB\x0e\n\x0c_endpoint_idB\t\n\x07_regionB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xd0\x03\n\x14GenerativeDatabricks\x12\x15\n\x08\x65ndpoint\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x1e\n\x11\x66requency_penalty\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x16\n\tlog_probs\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x1a\n\rtop_log_probs\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x06 \x01(\x03H\x05\x88\x01\x01\x12\x0e\n\x01n\x18\x07 \x01(\x03H\x06\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x08 \x01(\x01H\x07\x88\x01\x01\x12)\n\x04stop\x18\t \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x12\x18\n\x0btemperature\x18\n \x01(\x01H\t\x88\x01\x01\x12\x12\n\x05top_p\x18\x0b \x01(\x01H\n\x88\x01\x01\x42\x0b\n\t_endpointB\x08\n\x06_modelB\x14\n\x12_frequency_penaltyB\x0c\n\n_log_probsB\x10\n\x0e_top_log_probsB\r\n\x0b_max_tokensB\x04\n\x02_nB\x13\n\x11_presence_penaltyB\x07\n\x05_stopB\x0e\n\x0c_temperatureB\x08\n\x06_top_p\"\xde\x01\n\x14GenerativeFriendliAI\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x0e\n\x01n\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x12\n\x05top_p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\r\n\x0b_max_tokensB\x0e\n\x0c_temperatureB\x04\n\x02_nB\x08\n\x06_top_p\"\xc4\x01\n\x10GenerativeNvidia\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x12\n\x05top_p\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x05 \x01(\x03H\x04\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\r\n\x0b_max_tokens\"\xc5\x02\n\rGenerativeXAI\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x12\n\x05top_p\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12+\n\x06images\x18\x06 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x05\x88\x01\x01\x12\x35\n\x10image_properties\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\r\n\x0b_max_tokensB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xce\x02\n\x16GenerativeContextualAI\x12\x12\n\x05model\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0btemperature\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x12\n\x05top_p\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x1b\n\x0emax_new_tokens\x18\x04 \x01(\x03H\x03\x88\x01\x01\x12\x1a\n\rsystem_prompt\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1d\n\x10\x61void_commentary\x18\x06 \x01(\x08H\x05\x88\x01\x01\x12.\n\tknowledge\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x42\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\x11\n\x0f_max_new_tokensB\x10\n\x0e_system_promptB\x13\n\x11_avoid_commentaryB\x0c\n\n_knowledge\"\x92\x01\n\x1bGenerativeAnthropicMetadata\x12=\n\x05usage\x18\x01 \x01(\x0b\x32..weaviate.v1.GenerativeAnthropicMetadata.Usage\x1a\x34\n\x05Usage\x12\x14\n\x0cinput_tokens\x18\x01 \x01(\x03\x12\x15\n\routput_tokens\x18\x02 \x01(\x03\"\x1c\n\x1aGenerativeAnyscaleMetadata\"\x17\n\x15GenerativeAWSMetadata\"\x9c\x06\n\x18GenerativeCohereMetadata\x12J\n\x0b\x61pi_version\x18\x01 \x01(\x0b\x32\x30.weaviate.v1.GenerativeCohereMetadata.ApiVersionH\x00\x88\x01\x01\x12L\n\x0c\x62illed_units\x18\x02 \x01(\x0b\x32\x31.weaviate.v1.GenerativeCohereMetadata.BilledUnitsH\x01\x88\x01\x01\x12\x41\n\x06tokens\x18\x03 \x01(\x0b\x32,.weaviate.v1.GenerativeCohereMetadata.TokensH\x02\x88\x01\x01\x12-\n\x08warnings\x18\x04 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x03\x88\x01\x01\x1a\x8e\x01\n\nApiVersion\x12\x14\n\x07version\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1a\n\ris_deprecated\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\x1c\n\x0fis_experimental\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\n\n\x08_versionB\x10\n\x0e_is_deprecatedB\x12\n\x10_is_experimental\x1a\xc5\x01\n\x0b\x42illedUnits\x12\x19\n\x0cinput_tokens\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1a\n\routput_tokens\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x19\n\x0csearch_units\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x1c\n\x0f\x63lassifications\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x0f\n\r_input_tokensB\x10\n\x0e_output_tokensB\x0f\n\r_search_unitsB\x12\n\x10_classifications\x1a\x62\n\x06Tokens\x12\x19\n\x0cinput_tokens\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1a\n\routput_tokens\x18\x02 \x01(\x01H\x01\x88\x01\x01\x42\x0f\n\r_input_tokensB\x10\n\x0e_output_tokensB\x0e\n\x0c_api_versionB\x0f\n\r_billed_unitsB\t\n\x07_tokensB\x0b\n\t_warnings\"\x19\n\x17GenerativeDummyMetadata\"\x81\x02\n\x19GenerativeMistralMetadata\x12@\n\x05usage\x18\x01 \x01(\x0b\x32,.weaviate.v1.GenerativeMistralMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x1a\n\x18GenerativeOllamaMetadata\"\xff\x01\n\x18GenerativeOpenAIMetadata\x12?\n\x05usage\x18\x01 \x01(\x0b\x32+.weaviate.v1.GenerativeOpenAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xe8\x06\n\x18GenerativeGoogleMetadata\x12\x45\n\x08metadata\x18\x01 \x01(\x0b\x32..weaviate.v1.GenerativeGoogleMetadata.MetadataH\x00\x88\x01\x01\x12P\n\x0eusage_metadata\x18\x02 \x01(\x0b\x32\x33.weaviate.v1.GenerativeGoogleMetadata.UsageMetadataH\x01\x88\x01\x01\x1a~\n\nTokenCount\x12&\n\x19total_billable_characters\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x42\x1c\n\x1a_total_billable_charactersB\x0f\n\r_total_tokens\x1a\xe1\x01\n\rTokenMetadata\x12P\n\x11input_token_count\x18\x01 \x01(\x0b\x32\x30.weaviate.v1.GenerativeGoogleMetadata.TokenCountH\x00\x88\x01\x01\x12Q\n\x12output_token_count\x18\x02 \x01(\x0b\x32\x30.weaviate.v1.GenerativeGoogleMetadata.TokenCountH\x01\x88\x01\x01\x42\x14\n\x12_input_token_countB\x15\n\x13_output_token_count\x1ao\n\x08Metadata\x12P\n\x0etoken_metadata\x18\x01 \x01(\x0b\x32\x33.weaviate.v1.GenerativeGoogleMetadata.TokenMetadataH\x00\x88\x01\x01\x42\x11\n\x0f_token_metadata\x1a\xbd\x01\n\rUsageMetadata\x12\x1f\n\x12prompt_token_count\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12#\n\x16\x63\x61ndidates_token_count\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1e\n\x11total_token_count\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x15\n\x13_prompt_token_countB\x19\n\x17_candidates_token_countB\x14\n\x12_total_token_countB\x0b\n\t_metadataB\x11\n\x0f_usage_metadata\"\x87\x02\n\x1cGenerativeDatabricksMetadata\x12\x43\n\x05usage\x18\x01 \x01(\x0b\x32/.weaviate.v1.GenerativeDatabricksMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x87\x02\n\x1cGenerativeFriendliAIMetadata\x12\x43\n\x05usage\x18\x01 \x01(\x0b\x32/.weaviate.v1.GenerativeFriendliAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xff\x01\n\x18GenerativeNvidiaMetadata\x12?\n\x05usage\x18\x01 \x01(\x0b\x32+.weaviate.v1.GenerativeNvidiaMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xf9\x01\n\x15GenerativeXAIMetadata\x12<\n\x05usage\x18\x01 \x01(\x0b\x32(.weaviate.v1.GenerativeXAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x8f\x06\n\x12GenerativeMetadata\x12=\n\tanthropic\x18\x01 \x01(\x0b\x32(.weaviate.v1.GenerativeAnthropicMetadataH\x00\x12;\n\x08\x61nyscale\x18\x02 \x01(\x0b\x32\'.weaviate.v1.GenerativeAnyscaleMetadataH\x00\x12\x31\n\x03\x61ws\x18\x03 \x01(\x0b\x32\".weaviate.v1.GenerativeAWSMetadataH\x00\x12\x37\n\x06\x63ohere\x18\x04 \x01(\x0b\x32%.weaviate.v1.GenerativeCohereMetadataH\x00\x12\x35\n\x05\x64ummy\x18\x05 \x01(\x0b\x32$.weaviate.v1.GenerativeDummyMetadataH\x00\x12\x39\n\x07mistral\x18\x06 \x01(\x0b\x32&.weaviate.v1.GenerativeMistralMetadataH\x00\x12\x37\n\x06ollama\x18\x07 \x01(\x0b\x32%.weaviate.v1.GenerativeOllamaMetadataH\x00\x12\x37\n\x06openai\x18\x08 \x01(\x0b\x32%.weaviate.v1.GenerativeOpenAIMetadataH\x00\x12\x37\n\x06google\x18\t \x01(\x0b\x32%.weaviate.v1.GenerativeGoogleMetadataH\x00\x12?\n\ndatabricks\x18\n \x01(\x0b\x32).weaviate.v1.GenerativeDatabricksMetadataH\x00\x12?\n\nfriendliai\x18\x0b \x01(\x0b\x32).weaviate.v1.GenerativeFriendliAIMetadataH\x00\x12\x37\n\x06nvidia\x18\x0c \x01(\x0b\x32%.weaviate.v1.GenerativeNvidiaMetadataH\x00\x12\x31\n\x03xai\x18\r \x01(\x0b\x32\".weaviate.v1.GenerativeXAIMetadataH\x00\x42\x06\n\x04kind\"\xa2\x01\n\x0fGenerativeReply\x12\x0e\n\x06result\x18\x01 \x01(\t\x12\x30\n\x05\x64\x65\x62ug\x18\x02 \x01(\x0b\x32\x1c.weaviate.v1.GenerativeDebugH\x00\x88\x01\x01\x12\x36\n\x08metadata\x18\x03 \x01(\x0b\x32\x1f.weaviate.v1.GenerativeMetadataH\x01\x88\x01\x01\x42\x08\n\x06_debugB\x0b\n\t_metadata\"@\n\x10GenerativeResult\x12,\n\x06values\x18\x01 \x03(\x0b\x32\x1c.weaviate.v1.GenerativeReply\";\n\x0fGenerativeDebug\x12\x18\n\x0b\x66ull_prompt\x18\x01 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_full_promptBt\n#io.weaviate.client.grpc.protocol.v1B\x17WeaviateProtoGenerativeZ4github.com/weaviate/weaviate/grpc/generated;protocolb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -42,93 +42,93 @@ _globals['_GENERATIVEANYSCALE']._serialized_start=1736 _globals['_GENERATIVEANYSCALE']._serialized_end=1864 _globals['_GENERATIVEAWS']._serialized_start=1867 - _globals['_GENERATIVEAWS']._serialized_end=2320 - _globals['_GENERATIVECOHERE']._serialized_start=2323 - _globals['_GENERATIVECOHERE']._serialized_end=2843 - _globals['_GENERATIVEDUMMY']._serialized_start=2845 - _globals['_GENERATIVEDUMMY']._serialized_end=2862 - _globals['_GENERATIVEMISTRAL']._serialized_start=2865 - _globals['_GENERATIVEMISTRAL']._serialized_end=3062 - _globals['_GENERATIVEOLLAMA']._serialized_start=3065 - _globals['_GENERATIVEOLLAMA']._serialized_end=3331 - _globals['_GENERATIVEOPENAI']._serialized_start=3334 - _globals['_GENERATIVEOPENAI']._serialized_end=4457 - _globals['_GENERATIVEOPENAI_REASONINGEFFORT']._serialized_start=3939 - _globals['_GENERATIVEOPENAI_REASONINGEFFORT']._serialized_end=4102 - _globals['_GENERATIVEOPENAI_VERBOSITY']._serialized_start=4104 - _globals['_GENERATIVEOPENAI_VERBOSITY']._serialized_end=4203 - _globals['_GENERATIVEGOOGLE']._serialized_start=4460 - _globals['_GENERATIVEGOOGLE']._serialized_end=5118 - _globals['_GENERATIVEDATABRICKS']._serialized_start=5121 - _globals['_GENERATIVEDATABRICKS']._serialized_end=5585 - _globals['_GENERATIVEFRIENDLIAI']._serialized_start=5588 - _globals['_GENERATIVEFRIENDLIAI']._serialized_end=5810 - _globals['_GENERATIVENVIDIA']._serialized_start=5813 - _globals['_GENERATIVENVIDIA']._serialized_end=6009 - _globals['_GENERATIVEXAI']._serialized_start=6012 - _globals['_GENERATIVEXAI']._serialized_end=6337 - _globals['_GENERATIVECONTEXTUALAI']._serialized_start=6340 - _globals['_GENERATIVECONTEXTUALAI']._serialized_end=6674 - _globals['_GENERATIVEANTHROPICMETADATA']._serialized_start=6677 - _globals['_GENERATIVEANTHROPICMETADATA']._serialized_end=6823 - _globals['_GENERATIVEANTHROPICMETADATA_USAGE']._serialized_start=6771 - _globals['_GENERATIVEANTHROPICMETADATA_USAGE']._serialized_end=6823 - _globals['_GENERATIVEANYSCALEMETADATA']._serialized_start=6825 - _globals['_GENERATIVEANYSCALEMETADATA']._serialized_end=6853 - _globals['_GENERATIVEAWSMETADATA']._serialized_start=6855 - _globals['_GENERATIVEAWSMETADATA']._serialized_end=6878 - _globals['_GENERATIVECOHEREMETADATA']._serialized_start=6881 - _globals['_GENERATIVECOHEREMETADATA']._serialized_end=7677 - _globals['_GENERATIVECOHEREMETADATA_APIVERSION']._serialized_start=7178 - _globals['_GENERATIVECOHEREMETADATA_APIVERSION']._serialized_end=7320 - _globals['_GENERATIVECOHEREMETADATA_BILLEDUNITS']._serialized_start=7323 - _globals['_GENERATIVECOHEREMETADATA_BILLEDUNITS']._serialized_end=7520 - _globals['_GENERATIVECOHEREMETADATA_TOKENS']._serialized_start=7522 - _globals['_GENERATIVECOHEREMETADATA_TOKENS']._serialized_end=7620 - _globals['_GENERATIVEDUMMYMETADATA']._serialized_start=7679 - _globals['_GENERATIVEDUMMYMETADATA']._serialized_end=7704 - _globals['_GENERATIVEMISTRALMETADATA']._serialized_start=7707 - _globals['_GENERATIVEMISTRALMETADATA']._serialized_end=7964 - _globals['_GENERATIVEMISTRALMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEMISTRALMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEOLLAMAMETADATA']._serialized_start=7966 - _globals['_GENERATIVEOLLAMAMETADATA']._serialized_end=7992 - _globals['_GENERATIVEOPENAIMETADATA']._serialized_start=7995 - _globals['_GENERATIVEOPENAIMETADATA']._serialized_end=8250 - _globals['_GENERATIVEOPENAIMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEOPENAIMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEGOOGLEMETADATA']._serialized_start=8253 - _globals['_GENERATIVEGOOGLEMETADATA']._serialized_end=9125 - _globals['_GENERATIVEGOOGLEMETADATA_TOKENCOUNT']._serialized_start=8434 - _globals['_GENERATIVEGOOGLEMETADATA_TOKENCOUNT']._serialized_end=8560 - _globals['_GENERATIVEGOOGLEMETADATA_TOKENMETADATA']._serialized_start=8563 - _globals['_GENERATIVEGOOGLEMETADATA_TOKENMETADATA']._serialized_end=8788 - _globals['_GENERATIVEGOOGLEMETADATA_METADATA']._serialized_start=8790 - _globals['_GENERATIVEGOOGLEMETADATA_METADATA']._serialized_end=8901 - _globals['_GENERATIVEGOOGLEMETADATA_USAGEMETADATA']._serialized_start=8904 - _globals['_GENERATIVEGOOGLEMETADATA_USAGEMETADATA']._serialized_end=9093 - _globals['_GENERATIVEDATABRICKSMETADATA']._serialized_start=9128 - _globals['_GENERATIVEDATABRICKSMETADATA']._serialized_end=9391 - _globals['_GENERATIVEDATABRICKSMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEDATABRICKSMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEFRIENDLIAIMETADATA']._serialized_start=9394 - _globals['_GENERATIVEFRIENDLIAIMETADATA']._serialized_end=9657 - _globals['_GENERATIVEFRIENDLIAIMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEFRIENDLIAIMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVENVIDIAMETADATA']._serialized_start=9660 - _globals['_GENERATIVENVIDIAMETADATA']._serialized_end=9915 - _globals['_GENERATIVENVIDIAMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVENVIDIAMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEXAIMETADATA']._serialized_start=9918 - _globals['_GENERATIVEXAIMETADATA']._serialized_end=10167 - _globals['_GENERATIVEXAIMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEXAIMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEMETADATA']._serialized_start=10170 - _globals['_GENERATIVEMETADATA']._serialized_end=10953 - _globals['_GENERATIVEREPLY']._serialized_start=10956 - _globals['_GENERATIVEREPLY']._serialized_end=11118 - _globals['_GENERATIVERESULT']._serialized_start=11120 - _globals['_GENERATIVERESULT']._serialized_end=11184 - _globals['_GENERATIVEDEBUG']._serialized_start=11186 - _globals['_GENERATIVEDEBUG']._serialized_end=11245 + _globals['_GENERATIVEAWS']._serialized_end=2392 + _globals['_GENERATIVECOHERE']._serialized_start=2395 + _globals['_GENERATIVECOHERE']._serialized_end=2915 + _globals['_GENERATIVEDUMMY']._serialized_start=2917 + _globals['_GENERATIVEDUMMY']._serialized_end=2934 + _globals['_GENERATIVEMISTRAL']._serialized_start=2937 + _globals['_GENERATIVEMISTRAL']._serialized_end=3134 + _globals['_GENERATIVEOLLAMA']._serialized_start=3137 + _globals['_GENERATIVEOLLAMA']._serialized_end=3403 + _globals['_GENERATIVEOPENAI']._serialized_start=3406 + _globals['_GENERATIVEOPENAI']._serialized_end=4529 + _globals['_GENERATIVEOPENAI_REASONINGEFFORT']._serialized_start=4011 + _globals['_GENERATIVEOPENAI_REASONINGEFFORT']._serialized_end=4174 + _globals['_GENERATIVEOPENAI_VERBOSITY']._serialized_start=4176 + _globals['_GENERATIVEOPENAI_VERBOSITY']._serialized_end=4275 + _globals['_GENERATIVEGOOGLE']._serialized_start=4532 + _globals['_GENERATIVEGOOGLE']._serialized_end=5190 + _globals['_GENERATIVEDATABRICKS']._serialized_start=5193 + _globals['_GENERATIVEDATABRICKS']._serialized_end=5657 + _globals['_GENERATIVEFRIENDLIAI']._serialized_start=5660 + _globals['_GENERATIVEFRIENDLIAI']._serialized_end=5882 + _globals['_GENERATIVENVIDIA']._serialized_start=5885 + _globals['_GENERATIVENVIDIA']._serialized_end=6081 + _globals['_GENERATIVEXAI']._serialized_start=6084 + _globals['_GENERATIVEXAI']._serialized_end=6409 + _globals['_GENERATIVECONTEXTUALAI']._serialized_start=6412 + _globals['_GENERATIVECONTEXTUALAI']._serialized_end=6746 + _globals['_GENERATIVEANTHROPICMETADATA']._serialized_start=6749 + _globals['_GENERATIVEANTHROPICMETADATA']._serialized_end=6895 + _globals['_GENERATIVEANTHROPICMETADATA_USAGE']._serialized_start=6843 + _globals['_GENERATIVEANTHROPICMETADATA_USAGE']._serialized_end=6895 + _globals['_GENERATIVEANYSCALEMETADATA']._serialized_start=6897 + _globals['_GENERATIVEANYSCALEMETADATA']._serialized_end=6925 + _globals['_GENERATIVEAWSMETADATA']._serialized_start=6927 + _globals['_GENERATIVEAWSMETADATA']._serialized_end=6950 + _globals['_GENERATIVECOHEREMETADATA']._serialized_start=6953 + _globals['_GENERATIVECOHEREMETADATA']._serialized_end=7749 + _globals['_GENERATIVECOHEREMETADATA_APIVERSION']._serialized_start=7250 + _globals['_GENERATIVECOHEREMETADATA_APIVERSION']._serialized_end=7392 + _globals['_GENERATIVECOHEREMETADATA_BILLEDUNITS']._serialized_start=7395 + _globals['_GENERATIVECOHEREMETADATA_BILLEDUNITS']._serialized_end=7592 + _globals['_GENERATIVECOHEREMETADATA_TOKENS']._serialized_start=7594 + _globals['_GENERATIVECOHEREMETADATA_TOKENS']._serialized_end=7692 + _globals['_GENERATIVEDUMMYMETADATA']._serialized_start=7751 + _globals['_GENERATIVEDUMMYMETADATA']._serialized_end=7776 + _globals['_GENERATIVEMISTRALMETADATA']._serialized_start=7779 + _globals['_GENERATIVEMISTRALMETADATA']._serialized_end=8036 + _globals['_GENERATIVEMISTRALMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEMISTRALMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEOLLAMAMETADATA']._serialized_start=8038 + _globals['_GENERATIVEOLLAMAMETADATA']._serialized_end=8064 + _globals['_GENERATIVEOPENAIMETADATA']._serialized_start=8067 + _globals['_GENERATIVEOPENAIMETADATA']._serialized_end=8322 + _globals['_GENERATIVEOPENAIMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEOPENAIMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEGOOGLEMETADATA']._serialized_start=8325 + _globals['_GENERATIVEGOOGLEMETADATA']._serialized_end=9197 + _globals['_GENERATIVEGOOGLEMETADATA_TOKENCOUNT']._serialized_start=8506 + _globals['_GENERATIVEGOOGLEMETADATA_TOKENCOUNT']._serialized_end=8632 + _globals['_GENERATIVEGOOGLEMETADATA_TOKENMETADATA']._serialized_start=8635 + _globals['_GENERATIVEGOOGLEMETADATA_TOKENMETADATA']._serialized_end=8860 + _globals['_GENERATIVEGOOGLEMETADATA_METADATA']._serialized_start=8862 + _globals['_GENERATIVEGOOGLEMETADATA_METADATA']._serialized_end=8973 + _globals['_GENERATIVEGOOGLEMETADATA_USAGEMETADATA']._serialized_start=8976 + _globals['_GENERATIVEGOOGLEMETADATA_USAGEMETADATA']._serialized_end=9165 + _globals['_GENERATIVEDATABRICKSMETADATA']._serialized_start=9200 + _globals['_GENERATIVEDATABRICKSMETADATA']._serialized_end=9463 + _globals['_GENERATIVEDATABRICKSMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEDATABRICKSMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEFRIENDLIAIMETADATA']._serialized_start=9466 + _globals['_GENERATIVEFRIENDLIAIMETADATA']._serialized_end=9729 + _globals['_GENERATIVEFRIENDLIAIMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEFRIENDLIAIMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVENVIDIAMETADATA']._serialized_start=9732 + _globals['_GENERATIVENVIDIAMETADATA']._serialized_end=9987 + _globals['_GENERATIVENVIDIAMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVENVIDIAMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEXAIMETADATA']._serialized_start=9990 + _globals['_GENERATIVEXAIMETADATA']._serialized_end=10239 + _globals['_GENERATIVEXAIMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEXAIMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEMETADATA']._serialized_start=10242 + _globals['_GENERATIVEMETADATA']._serialized_end=11025 + _globals['_GENERATIVEREPLY']._serialized_start=11028 + _globals['_GENERATIVEREPLY']._serialized_end=11190 + _globals['_GENERATIVERESULT']._serialized_start=11192 + _globals['_GENERATIVERESULT']._serialized_end=11256 + _globals['_GENERATIVEDEBUG']._serialized_start=11258 + _globals['_GENERATIVEDEBUG']._serialized_end=11317 # @@protoc_insertion_point(module_scope) diff --git a/weaviate/proto/v1/v5261/v1/generative_pb2.pyi b/weaviate/proto/v1/v5261/v1/generative_pb2.pyi index 5c0b563e5..1699c732f 100644 --- a/weaviate/proto/v1/v5261/v1/generative_pb2.pyi +++ b/weaviate/proto/v1/v5261/v1/generative_pb2.pyi @@ -108,7 +108,7 @@ class GenerativeAnyscale(_message.Message): def __init__(self, base_url: _Optional[str] = ..., model: _Optional[str] = ..., temperature: _Optional[float] = ...) -> None: ... class GenerativeAWS(_message.Message): - __slots__ = ("model", "temperature", "service", "region", "endpoint", "target_model", "target_variant", "images", "image_properties", "max_tokens") + __slots__ = ("model", "temperature", "service", "region", "endpoint", "target_model", "target_variant", "images", "image_properties", "max_tokens", "stop_sequences") MODEL_FIELD_NUMBER: _ClassVar[int] TEMPERATURE_FIELD_NUMBER: _ClassVar[int] SERVICE_FIELD_NUMBER: _ClassVar[int] @@ -119,6 +119,7 @@ class GenerativeAWS(_message.Message): IMAGES_FIELD_NUMBER: _ClassVar[int] IMAGE_PROPERTIES_FIELD_NUMBER: _ClassVar[int] MAX_TOKENS_FIELD_NUMBER: _ClassVar[int] + STOP_SEQUENCES_FIELD_NUMBER: _ClassVar[int] model: str temperature: float service: str @@ -129,7 +130,8 @@ class GenerativeAWS(_message.Message): images: _base_pb2.TextArray image_properties: _base_pb2.TextArray max_tokens: int - def __init__(self, model: _Optional[str] = ..., temperature: _Optional[float] = ..., service: _Optional[str] = ..., region: _Optional[str] = ..., endpoint: _Optional[str] = ..., target_model: _Optional[str] = ..., target_variant: _Optional[str] = ..., images: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ..., image_properties: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ..., max_tokens: _Optional[int] = ...) -> None: ... + stop_sequences: _base_pb2.TextArray + def __init__(self, model: _Optional[str] = ..., temperature: _Optional[float] = ..., service: _Optional[str] = ..., region: _Optional[str] = ..., endpoint: _Optional[str] = ..., target_model: _Optional[str] = ..., target_variant: _Optional[str] = ..., images: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ..., image_properties: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ..., max_tokens: _Optional[int] = ..., stop_sequences: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ...) -> None: ... class GenerativeCohere(_message.Message): __slots__ = ("base_url", "frequency_penalty", "max_tokens", "model", "k", "p", "presence_penalty", "stop_sequences", "temperature", "images", "image_properties") diff --git a/weaviate/proto/v1/v6300/v1/batch_pb2.py b/weaviate/proto/v1/v6300/v1/batch_pb2.py index f7434e1d3..83b37fd05 100644 --- a/weaviate/proto/v1/v6300/v1/batch_pb2.py +++ b/weaviate/proto/v1/v6300/v1/batch_pb2.py @@ -26,7 +26,7 @@ from weaviate.proto.v1.v6300.v1 import base_pb2 as v1_dot_base__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0ev1/batch.proto\x12\x0bweaviate.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a\rv1/base.proto\"\x95\x01\n\x13\x42\x61tchObjectsRequest\x12)\n\x07objects\x18\x01 \x03(\x0b\x32\x18.weaviate.v1.BatchObject\x12=\n\x11\x63onsistency_level\x18\x02 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\"\x9e\x01\n\x16\x42\x61tchReferencesRequest\x12/\n\nreferences\x18\x01 \x03(\x0b\x32\x1b.weaviate.v1.BatchReference\x12=\n\x11\x63onsistency_level\x18\x02 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\"\xa6\x04\n\x12\x42\x61tchStreamRequest\x12\x36\n\x05start\x18\x01 \x01(\x0b\x32%.weaviate.v1.BatchStreamRequest.StartH\x00\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.weaviate.v1.BatchStreamRequest.DataH\x00\x12\x34\n\x04stop\x18\x03 \x01(\x0b\x32$.weaviate.v1.BatchStreamRequest.StopH\x00\x1a\\\n\x05Start\x12=\n\x11\x63onsistency_level\x18\x01 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\x1a\x06\n\x04Stop\x1a\xfa\x01\n\x04\x44\x61ta\x12=\n\x07objects\x18\x01 \x01(\x0b\x32,.weaviate.v1.BatchStreamRequest.Data.Objects\x12\x43\n\nreferences\x18\x02 \x01(\x0b\x32/.weaviate.v1.BatchStreamRequest.Data.References\x1a\x33\n\x07Objects\x12(\n\x06values\x18\x01 \x03(\x0b\x32\x18.weaviate.v1.BatchObject\x1a\x39\n\nReferences\x12+\n\x06values\x18\x01 \x03(\x0b\x32\x1b.weaviate.v1.BatchReferenceB\t\n\x07message\"\x98\x05\n\x10\x42\x61tchStreamReply\x12\x38\n\x07results\x18\x01 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.ResultsH\x00\x12\x43\n\rshutting_down\x18\x02 \x01(\x0b\x32*.weaviate.v1.BatchStreamReply.ShuttingDownH\x00\x12:\n\x08shutdown\x18\x03 \x01(\x0b\x32&.weaviate.v1.BatchStreamReply.ShutdownH\x00\x12\x38\n\x07started\x18\x04 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.StartedH\x00\x12\x38\n\x07\x62\x61\x63koff\x18\x05 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.BackoffH\x00\x1a\t\n\x07Started\x1a\x0e\n\x0cShuttingDown\x1a\n\n\x08Shutdown\x1a\x1d\n\x07\x42\x61\x63koff\x12\x12\n\nbatch_size\x18\x01 \x01(\x05\x1a\x83\x02\n\x07Results\x12;\n\x06\x65rrors\x18\x01 \x03(\x0b\x32+.weaviate.v1.BatchStreamReply.Results.Error\x12@\n\tsuccesses\x18\x02 \x03(\x0b\x32-.weaviate.v1.BatchStreamReply.Results.Success\x1a\x42\n\x05\x45rror\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12\x10\n\x06\x62\x65\x61\x63on\x18\x03 \x01(\tH\x00\x42\x08\n\x06\x64\x65tail\x1a\x35\n\x07Success\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12\x10\n\x06\x62\x65\x61\x63on\x18\x03 \x01(\tH\x00\x42\x08\n\x06\x64\x65tailB\t\n\x07message\"\xde\x07\n\x0b\x42\x61tchObject\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x12\n\x06vector\x18\x02 \x03(\x02\x42\x02\x18\x01\x12\x37\n\nproperties\x18\x03 \x01(\x0b\x32#.weaviate.v1.BatchObject.Properties\x12\x12\n\ncollection\x18\x04 \x01(\t\x12\x0e\n\x06tenant\x18\x05 \x01(\t\x12\x14\n\x0cvector_bytes\x18\x06 \x01(\x0c\x12%\n\x07vectors\x18\x17 \x03(\x0b\x32\x14.weaviate.v1.Vectors\x1a\x84\x05\n\nProperties\x12\x33\n\x12non_ref_properties\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12N\n\x17single_target_ref_props\x18\x02 \x03(\x0b\x32-.weaviate.v1.BatchObject.SingleTargetRefProps\x12L\n\x16multi_target_ref_props\x18\x03 \x03(\x0b\x32,.weaviate.v1.BatchObject.MultiTargetRefProps\x12\x43\n\x17number_array_properties\x18\x04 \x03(\x0b\x32\".weaviate.v1.NumberArrayProperties\x12=\n\x14int_array_properties\x18\x05 \x03(\x0b\x32\x1f.weaviate.v1.IntArrayProperties\x12?\n\x15text_array_properties\x18\x06 \x03(\x0b\x32 .weaviate.v1.TextArrayProperties\x12\x45\n\x18\x62oolean_array_properties\x18\x07 \x03(\x0b\x32#.weaviate.v1.BooleanArrayProperties\x12\x38\n\x11object_properties\x18\x08 \x03(\x0b\x32\x1d.weaviate.v1.ObjectProperties\x12\x43\n\x17object_array_properties\x18\t \x03(\x0b\x32\".weaviate.v1.ObjectArrayProperties\x12\x18\n\x10\x65mpty_list_props\x18\n \x03(\t\x1a\x38\n\x14SingleTargetRefProps\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x11\n\tprop_name\x18\x02 \x01(\t\x1aR\n\x13MultiTargetRefProps\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x11\n\tprop_name\x18\x02 \x01(\t\x12\x19\n\x11target_collection\x18\x03 \x01(\t\"\x99\x01\n\x0e\x42\x61tchReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0f\x66rom_collection\x18\x02 \x01(\t\x12\x11\n\tfrom_uuid\x18\x03 \x01(\t\x12\x1a\n\rto_collection\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x07to_uuid\x18\x05 \x01(\t\x12\x0e\n\x06tenant\x18\x06 \x01(\tB\x10\n\x0e_to_collection\"\x88\x01\n\x11\x42\x61tchObjectsReply\x12\x0c\n\x04took\x18\x01 \x01(\x02\x12\x39\n\x06\x65rrors\x18\x02 \x03(\x0b\x32).weaviate.v1.BatchObjectsReply.BatchError\x1a*\n\nBatchError\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"\x8e\x01\n\x14\x42\x61tchReferencesReply\x12\x0c\n\x04took\x18\x01 \x01(\x02\x12<\n\x06\x65rrors\x18\x02 \x03(\x0b\x32,.weaviate.v1.BatchReferencesReply.BatchError\x1a*\n\nBatchError\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\tBo\n#io.weaviate.client.grpc.protocol.v1B\x12WeaviateProtoBatchZ4github.com/weaviate/weaviate/grpc/generated;protocolb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0ev1/batch.proto\x12\x0bweaviate.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a\rv1/base.proto\"\x95\x01\n\x13\x42\x61tchObjectsRequest\x12)\n\x07objects\x18\x01 \x03(\x0b\x32\x18.weaviate.v1.BatchObject\x12=\n\x11\x63onsistency_level\x18\x02 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\"\x9e\x01\n\x16\x42\x61tchReferencesRequest\x12/\n\nreferences\x18\x01 \x03(\x0b\x32\x1b.weaviate.v1.BatchReference\x12=\n\x11\x63onsistency_level\x18\x02 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\"\xa6\x04\n\x12\x42\x61tchStreamRequest\x12\x36\n\x05start\x18\x01 \x01(\x0b\x32%.weaviate.v1.BatchStreamRequest.StartH\x00\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.weaviate.v1.BatchStreamRequest.DataH\x00\x12\x34\n\x04stop\x18\x03 \x01(\x0b\x32$.weaviate.v1.BatchStreamRequest.StopH\x00\x1a\\\n\x05Start\x12=\n\x11\x63onsistency_level\x18\x01 \x01(\x0e\x32\x1d.weaviate.v1.ConsistencyLevelH\x00\x88\x01\x01\x42\x14\n\x12_consistency_level\x1a\x06\n\x04Stop\x1a\xfa\x01\n\x04\x44\x61ta\x12=\n\x07objects\x18\x01 \x01(\x0b\x32,.weaviate.v1.BatchStreamRequest.Data.Objects\x12\x43\n\nreferences\x18\x02 \x01(\x0b\x32/.weaviate.v1.BatchStreamRequest.Data.References\x1a\x33\n\x07Objects\x12(\n\x06values\x18\x01 \x03(\x0b\x32\x18.weaviate.v1.BatchObject\x1a\x39\n\nReferences\x12+\n\x06values\x18\x01 \x03(\x0b\x32\x1b.weaviate.v1.BatchReferenceB\t\n\x07message\"\xe7\x06\n\x10\x42\x61tchStreamReply\x12\x38\n\x07results\x18\x01 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.ResultsH\x00\x12\x43\n\rshutting_down\x18\x02 \x01(\x0b\x32*.weaviate.v1.BatchStreamReply.ShuttingDownH\x00\x12:\n\x08shutdown\x18\x03 \x01(\x0b\x32&.weaviate.v1.BatchStreamReply.ShutdownH\x00\x12\x38\n\x07started\x18\x04 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.StartedH\x00\x12\x38\n\x07\x62\x61\x63koff\x18\x05 \x01(\x0b\x32%.weaviate.v1.BatchStreamReply.BackoffH\x00\x12\x32\n\x04\x61\x63ks\x18\x06 \x01(\x0b\x32\".weaviate.v1.BatchStreamReply.AcksH\x00\x12\x42\n\rout_of_memory\x18\x07 \x01(\x0b\x32).weaviate.v1.BatchStreamReply.OutOfMemoryH\x00\x1a\t\n\x07Started\x1a\x0e\n\x0cShuttingDown\x1a\n\n\x08Shutdown\x1a-\n\x0bOutOfMemory\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x0f\n\x07\x62\x65\x61\x63ons\x18\x02 \x03(\t\x1a\x1d\n\x07\x42\x61\x63koff\x12\x12\n\nbatch_size\x18\x01 \x01(\x05\x1a&\n\x04\x41\x63ks\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x0f\n\x07\x62\x65\x61\x63ons\x18\x02 \x03(\t\x1a\x83\x02\n\x07Results\x12;\n\x06\x65rrors\x18\x01 \x03(\x0b\x32+.weaviate.v1.BatchStreamReply.Results.Error\x12@\n\tsuccesses\x18\x02 \x03(\x0b\x32-.weaviate.v1.BatchStreamReply.Results.Success\x1a\x42\n\x05\x45rror\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12\x10\n\x06\x62\x65\x61\x63on\x18\x03 \x01(\tH\x00\x42\x08\n\x06\x64\x65tail\x1a\x35\n\x07Success\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12\x10\n\x06\x62\x65\x61\x63on\x18\x03 \x01(\tH\x00\x42\x08\n\x06\x64\x65tailB\t\n\x07message\"\xde\x07\n\x0b\x42\x61tchObject\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x12\n\x06vector\x18\x02 \x03(\x02\x42\x02\x18\x01\x12\x37\n\nproperties\x18\x03 \x01(\x0b\x32#.weaviate.v1.BatchObject.Properties\x12\x12\n\ncollection\x18\x04 \x01(\t\x12\x0e\n\x06tenant\x18\x05 \x01(\t\x12\x14\n\x0cvector_bytes\x18\x06 \x01(\x0c\x12%\n\x07vectors\x18\x17 \x03(\x0b\x32\x14.weaviate.v1.Vectors\x1a\x84\x05\n\nProperties\x12\x33\n\x12non_ref_properties\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12N\n\x17single_target_ref_props\x18\x02 \x03(\x0b\x32-.weaviate.v1.BatchObject.SingleTargetRefProps\x12L\n\x16multi_target_ref_props\x18\x03 \x03(\x0b\x32,.weaviate.v1.BatchObject.MultiTargetRefProps\x12\x43\n\x17number_array_properties\x18\x04 \x03(\x0b\x32\".weaviate.v1.NumberArrayProperties\x12=\n\x14int_array_properties\x18\x05 \x03(\x0b\x32\x1f.weaviate.v1.IntArrayProperties\x12?\n\x15text_array_properties\x18\x06 \x03(\x0b\x32 .weaviate.v1.TextArrayProperties\x12\x45\n\x18\x62oolean_array_properties\x18\x07 \x03(\x0b\x32#.weaviate.v1.BooleanArrayProperties\x12\x38\n\x11object_properties\x18\x08 \x03(\x0b\x32\x1d.weaviate.v1.ObjectProperties\x12\x43\n\x17object_array_properties\x18\t \x03(\x0b\x32\".weaviate.v1.ObjectArrayProperties\x12\x18\n\x10\x65mpty_list_props\x18\n \x03(\t\x1a\x38\n\x14SingleTargetRefProps\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x11\n\tprop_name\x18\x02 \x01(\t\x1aR\n\x13MultiTargetRefProps\x12\r\n\x05uuids\x18\x01 \x03(\t\x12\x11\n\tprop_name\x18\x02 \x01(\t\x12\x19\n\x11target_collection\x18\x03 \x01(\t\"\x99\x01\n\x0e\x42\x61tchReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0f\x66rom_collection\x18\x02 \x01(\t\x12\x11\n\tfrom_uuid\x18\x03 \x01(\t\x12\x1a\n\rto_collection\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x07to_uuid\x18\x05 \x01(\t\x12\x0e\n\x06tenant\x18\x06 \x01(\tB\x10\n\x0e_to_collection\"\x88\x01\n\x11\x42\x61tchObjectsReply\x12\x0c\n\x04took\x18\x01 \x01(\x02\x12\x39\n\x06\x65rrors\x18\x02 \x03(\x0b\x32).weaviate.v1.BatchObjectsReply.BatchError\x1a*\n\nBatchError\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"\x8e\x01\n\x14\x42\x61tchReferencesReply\x12\x0c\n\x04took\x18\x01 \x01(\x02\x12<\n\x06\x65rrors\x18\x02 \x03(\x0b\x32,.weaviate.v1.BatchReferencesReply.BatchError\x1a*\n\nBatchError\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\tBo\n#io.weaviate.client.grpc.protocol.v1B\x12WeaviateProtoBatchZ4github.com/weaviate/weaviate/grpc/generated;protocolb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -53,37 +53,41 @@ _globals['_BATCHSTREAMREQUEST_DATA_REFERENCES']._serialized_start=872 _globals['_BATCHSTREAMREQUEST_DATA_REFERENCES']._serialized_end=929 _globals['_BATCHSTREAMREPLY']._serialized_start=943 - _globals['_BATCHSTREAMREPLY']._serialized_end=1607 - _globals['_BATCHSTREAMREPLY_STARTED']._serialized_start=1266 - _globals['_BATCHSTREAMREPLY_STARTED']._serialized_end=1275 - _globals['_BATCHSTREAMREPLY_SHUTTINGDOWN']._serialized_start=1277 - _globals['_BATCHSTREAMREPLY_SHUTTINGDOWN']._serialized_end=1291 - _globals['_BATCHSTREAMREPLY_SHUTDOWN']._serialized_start=1293 - _globals['_BATCHSTREAMREPLY_SHUTDOWN']._serialized_end=1303 - _globals['_BATCHSTREAMREPLY_BACKOFF']._serialized_start=1305 - _globals['_BATCHSTREAMREPLY_BACKOFF']._serialized_end=1334 - _globals['_BATCHSTREAMREPLY_RESULTS']._serialized_start=1337 - _globals['_BATCHSTREAMREPLY_RESULTS']._serialized_end=1596 - _globals['_BATCHSTREAMREPLY_RESULTS_ERROR']._serialized_start=1475 - _globals['_BATCHSTREAMREPLY_RESULTS_ERROR']._serialized_end=1541 - _globals['_BATCHSTREAMREPLY_RESULTS_SUCCESS']._serialized_start=1543 - _globals['_BATCHSTREAMREPLY_RESULTS_SUCCESS']._serialized_end=1596 - _globals['_BATCHOBJECT']._serialized_start=1610 - _globals['_BATCHOBJECT']._serialized_end=2600 - _globals['_BATCHOBJECT_PROPERTIES']._serialized_start=1814 - _globals['_BATCHOBJECT_PROPERTIES']._serialized_end=2458 - _globals['_BATCHOBJECT_SINGLETARGETREFPROPS']._serialized_start=2460 - _globals['_BATCHOBJECT_SINGLETARGETREFPROPS']._serialized_end=2516 - _globals['_BATCHOBJECT_MULTITARGETREFPROPS']._serialized_start=2518 - _globals['_BATCHOBJECT_MULTITARGETREFPROPS']._serialized_end=2600 - _globals['_BATCHREFERENCE']._serialized_start=2603 - _globals['_BATCHREFERENCE']._serialized_end=2756 - _globals['_BATCHOBJECTSREPLY']._serialized_start=2759 - _globals['_BATCHOBJECTSREPLY']._serialized_end=2895 - _globals['_BATCHOBJECTSREPLY_BATCHERROR']._serialized_start=2853 - _globals['_BATCHOBJECTSREPLY_BATCHERROR']._serialized_end=2895 - _globals['_BATCHREFERENCESREPLY']._serialized_start=2898 - _globals['_BATCHREFERENCESREPLY']._serialized_end=3040 - _globals['_BATCHREFERENCESREPLY_BATCHERROR']._serialized_start=2853 - _globals['_BATCHREFERENCESREPLY_BATCHERROR']._serialized_end=2895 + _globals['_BATCHSTREAMREPLY']._serialized_end=1814 + _globals['_BATCHSTREAMREPLY_STARTED']._serialized_start=1386 + _globals['_BATCHSTREAMREPLY_STARTED']._serialized_end=1395 + _globals['_BATCHSTREAMREPLY_SHUTTINGDOWN']._serialized_start=1397 + _globals['_BATCHSTREAMREPLY_SHUTTINGDOWN']._serialized_end=1411 + _globals['_BATCHSTREAMREPLY_SHUTDOWN']._serialized_start=1413 + _globals['_BATCHSTREAMREPLY_SHUTDOWN']._serialized_end=1423 + _globals['_BATCHSTREAMREPLY_OUTOFMEMORY']._serialized_start=1425 + _globals['_BATCHSTREAMREPLY_OUTOFMEMORY']._serialized_end=1470 + _globals['_BATCHSTREAMREPLY_BACKOFF']._serialized_start=1472 + _globals['_BATCHSTREAMREPLY_BACKOFF']._serialized_end=1501 + _globals['_BATCHSTREAMREPLY_ACKS']._serialized_start=1503 + _globals['_BATCHSTREAMREPLY_ACKS']._serialized_end=1541 + _globals['_BATCHSTREAMREPLY_RESULTS']._serialized_start=1544 + _globals['_BATCHSTREAMREPLY_RESULTS']._serialized_end=1803 + _globals['_BATCHSTREAMREPLY_RESULTS_ERROR']._serialized_start=1682 + _globals['_BATCHSTREAMREPLY_RESULTS_ERROR']._serialized_end=1748 + _globals['_BATCHSTREAMREPLY_RESULTS_SUCCESS']._serialized_start=1750 + _globals['_BATCHSTREAMREPLY_RESULTS_SUCCESS']._serialized_end=1803 + _globals['_BATCHOBJECT']._serialized_start=1817 + _globals['_BATCHOBJECT']._serialized_end=2807 + _globals['_BATCHOBJECT_PROPERTIES']._serialized_start=2021 + _globals['_BATCHOBJECT_PROPERTIES']._serialized_end=2665 + _globals['_BATCHOBJECT_SINGLETARGETREFPROPS']._serialized_start=2667 + _globals['_BATCHOBJECT_SINGLETARGETREFPROPS']._serialized_end=2723 + _globals['_BATCHOBJECT_MULTITARGETREFPROPS']._serialized_start=2725 + _globals['_BATCHOBJECT_MULTITARGETREFPROPS']._serialized_end=2807 + _globals['_BATCHREFERENCE']._serialized_start=2810 + _globals['_BATCHREFERENCE']._serialized_end=2963 + _globals['_BATCHOBJECTSREPLY']._serialized_start=2966 + _globals['_BATCHOBJECTSREPLY']._serialized_end=3102 + _globals['_BATCHOBJECTSREPLY_BATCHERROR']._serialized_start=3060 + _globals['_BATCHOBJECTSREPLY_BATCHERROR']._serialized_end=3102 + _globals['_BATCHREFERENCESREPLY']._serialized_start=3105 + _globals['_BATCHREFERENCESREPLY']._serialized_end=3247 + _globals['_BATCHREFERENCESREPLY_BATCHERROR']._serialized_start=3060 + _globals['_BATCHREFERENCESREPLY_BATCHERROR']._serialized_end=3102 # @@protoc_insertion_point(module_scope) diff --git a/weaviate/proto/v1/v6300/v1/batch_pb2.pyi b/weaviate/proto/v1/v6300/v1/batch_pb2.pyi index aa308b1d4..3a34d9b16 100644 --- a/weaviate/proto/v1/v6300/v1/batch_pb2.pyi +++ b/weaviate/proto/v1/v6300/v1/batch_pb2.pyi @@ -60,7 +60,7 @@ class BatchStreamRequest(_message.Message): def __init__(self, start: _Optional[_Union[BatchStreamRequest.Start, _Mapping]] = ..., data: _Optional[_Union[BatchStreamRequest.Data, _Mapping]] = ..., stop: _Optional[_Union[BatchStreamRequest.Stop, _Mapping]] = ...) -> None: ... class BatchStreamReply(_message.Message): - __slots__ = ("results", "shutting_down", "shutdown", "started", "backoff") + __slots__ = ("results", "shutting_down", "shutdown", "started", "backoff", "acks", "out_of_memory") class Started(_message.Message): __slots__ = () def __init__(self) -> None: ... @@ -70,11 +70,25 @@ class BatchStreamReply(_message.Message): class Shutdown(_message.Message): __slots__ = () def __init__(self) -> None: ... + class OutOfMemory(_message.Message): + __slots__ = ("uuids", "beacons") + UUIDS_FIELD_NUMBER: _ClassVar[int] + BEACONS_FIELD_NUMBER: _ClassVar[int] + uuids: _containers.RepeatedScalarFieldContainer[str] + beacons: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, uuids: _Optional[_Iterable[str]] = ..., beacons: _Optional[_Iterable[str]] = ...) -> None: ... class Backoff(_message.Message): __slots__ = ("batch_size",) BATCH_SIZE_FIELD_NUMBER: _ClassVar[int] batch_size: int def __init__(self, batch_size: _Optional[int] = ...) -> None: ... + class Acks(_message.Message): + __slots__ = ("uuids", "beacons") + UUIDS_FIELD_NUMBER: _ClassVar[int] + BEACONS_FIELD_NUMBER: _ClassVar[int] + uuids: _containers.RepeatedScalarFieldContainer[str] + beacons: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, uuids: _Optional[_Iterable[str]] = ..., beacons: _Optional[_Iterable[str]] = ...) -> None: ... class Results(_message.Message): __slots__ = ("errors", "successes") class Error(_message.Message): @@ -103,12 +117,16 @@ class BatchStreamReply(_message.Message): SHUTDOWN_FIELD_NUMBER: _ClassVar[int] STARTED_FIELD_NUMBER: _ClassVar[int] BACKOFF_FIELD_NUMBER: _ClassVar[int] + ACKS_FIELD_NUMBER: _ClassVar[int] + OUT_OF_MEMORY_FIELD_NUMBER: _ClassVar[int] results: BatchStreamReply.Results shutting_down: BatchStreamReply.ShuttingDown shutdown: BatchStreamReply.Shutdown started: BatchStreamReply.Started backoff: BatchStreamReply.Backoff - def __init__(self, results: _Optional[_Union[BatchStreamReply.Results, _Mapping]] = ..., shutting_down: _Optional[_Union[BatchStreamReply.ShuttingDown, _Mapping]] = ..., shutdown: _Optional[_Union[BatchStreamReply.Shutdown, _Mapping]] = ..., started: _Optional[_Union[BatchStreamReply.Started, _Mapping]] = ..., backoff: _Optional[_Union[BatchStreamReply.Backoff, _Mapping]] = ...) -> None: ... + acks: BatchStreamReply.Acks + out_of_memory: BatchStreamReply.OutOfMemory + def __init__(self, results: _Optional[_Union[BatchStreamReply.Results, _Mapping]] = ..., shutting_down: _Optional[_Union[BatchStreamReply.ShuttingDown, _Mapping]] = ..., shutdown: _Optional[_Union[BatchStreamReply.Shutdown, _Mapping]] = ..., started: _Optional[_Union[BatchStreamReply.Started, _Mapping]] = ..., backoff: _Optional[_Union[BatchStreamReply.Backoff, _Mapping]] = ..., acks: _Optional[_Union[BatchStreamReply.Acks, _Mapping]] = ..., out_of_memory: _Optional[_Union[BatchStreamReply.OutOfMemory, _Mapping]] = ...) -> None: ... class BatchObject(_message.Message): __slots__ = ("uuid", "vector", "properties", "collection", "tenant", "vector_bytes", "vectors") diff --git a/weaviate/proto/v1/v6300/v1/generative_pb2.py b/weaviate/proto/v1/v6300/v1/generative_pb2.py index 59337afe2..4af6fb9ce 100644 --- a/weaviate/proto/v1/v6300/v1/generative_pb2.py +++ b/weaviate/proto/v1/v6300/v1/generative_pb2.py @@ -25,7 +25,7 @@ from weaviate.proto.v1.v6300.v1 import base_pb2 as v1_dot_base__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13v1/generative.proto\x12\x0bweaviate.v1\x1a\rv1/base.proto\"\xdd\x03\n\x10GenerativeSearch\x12\"\n\x16single_response_prompt\x18\x01 \x01(\tB\x02\x18\x01\x12!\n\x15grouped_response_task\x18\x02 \x01(\tB\x02\x18\x01\x12\x1e\n\x12grouped_properties\x18\x03 \x03(\tB\x02\x18\x01\x12\x34\n\x06single\x18\x04 \x01(\x0b\x32$.weaviate.v1.GenerativeSearch.Single\x12\x36\n\x07grouped\x18\x05 \x01(\x0b\x32%.weaviate.v1.GenerativeSearch.Grouped\x1aY\n\x06Single\x12\x0e\n\x06prompt\x18\x01 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x02 \x01(\x08\x12\x30\n\x07queries\x18\x03 \x03(\x0b\x32\x1f.weaviate.v1.GenerativeProvider\x1a\x98\x01\n\x07Grouped\x12\x0c\n\x04task\x18\x01 \x01(\t\x12/\n\nproperties\x18\x02 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x00\x88\x01\x01\x12\x30\n\x07queries\x18\x03 \x03(\x0b\x32\x1f.weaviate.v1.GenerativeProvider\x12\r\n\x05\x64\x65\x62ug\x18\x04 \x01(\x08\x42\r\n\x0b_properties\"\xfd\x05\n\x12GenerativeProvider\x12\x17\n\x0freturn_metadata\x18\x01 \x01(\x08\x12\x35\n\tanthropic\x18\x02 \x01(\x0b\x32 .weaviate.v1.GenerativeAnthropicH\x00\x12\x33\n\x08\x61nyscale\x18\x03 \x01(\x0b\x32\x1f.weaviate.v1.GenerativeAnyscaleH\x00\x12)\n\x03\x61ws\x18\x04 \x01(\x0b\x32\x1a.weaviate.v1.GenerativeAWSH\x00\x12/\n\x06\x63ohere\x18\x05 \x01(\x0b\x32\x1d.weaviate.v1.GenerativeCohereH\x00\x12-\n\x05\x64ummy\x18\x06 \x01(\x0b\x32\x1c.weaviate.v1.GenerativeDummyH\x00\x12\x31\n\x07mistral\x18\x07 \x01(\x0b\x32\x1e.weaviate.v1.GenerativeMistralH\x00\x12/\n\x06ollama\x18\x08 \x01(\x0b\x32\x1d.weaviate.v1.GenerativeOllamaH\x00\x12/\n\x06openai\x18\t \x01(\x0b\x32\x1d.weaviate.v1.GenerativeOpenAIH\x00\x12/\n\x06google\x18\n \x01(\x0b\x32\x1d.weaviate.v1.GenerativeGoogleH\x00\x12\x37\n\ndatabricks\x18\x0b \x01(\x0b\x32!.weaviate.v1.GenerativeDatabricksH\x00\x12\x37\n\nfriendliai\x18\x0c \x01(\x0b\x32!.weaviate.v1.GenerativeFriendliAIH\x00\x12/\n\x06nvidia\x18\r \x01(\x0b\x32\x1d.weaviate.v1.GenerativeNvidiaH\x00\x12)\n\x03xai\x18\x0e \x01(\x0b\x32\x1a.weaviate.v1.GenerativeXAIH\x00\x12;\n\x0c\x63ontextualai\x18\x0f \x01(\x0b\x32#.weaviate.v1.GenerativeContextualAIH\x00\x42\x06\n\x04kind\"\xb1\x03\n\x13GenerativeAnthropic\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x12\n\x05top_k\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x12\n\x05top_p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x12+\n\x06images\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x35\n\x10image_properties\x18\t \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x42\x0b\n\t_base_urlB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_kB\x08\n\x06_top_pB\x11\n\x0f_stop_sequencesB\t\n\x07_imagesB\x13\n\x11_image_properties\"\x80\x01\n\x12GenerativeAnyscale\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperature\"\xc5\x03\n\rGenerativeAWS\x12\x12\n\x05model\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0btemperature\x18\x08 \x01(\x01H\x01\x88\x01\x01\x12\x14\n\x07service\x18\t \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06region\x18\n \x01(\tH\x03\x88\x01\x01\x12\x15\n\x08\x65ndpoint\x18\x0b \x01(\tH\x04\x88\x01\x01\x12\x19\n\x0ctarget_model\x18\x0c \x01(\tH\x05\x88\x01\x01\x12\x1b\n\x0etarget_variant\x18\r \x01(\tH\x06\x88\x01\x01\x12+\n\x06images\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0f \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x10 \x01(\x03H\t\x88\x01\x01\x42\x08\n\x06_modelB\x0e\n\x0c_temperatureB\n\n\x08_serviceB\t\n\x07_regionB\x0b\n\t_endpointB\x0f\n\r_target_modelB\x11\n\x0f_target_variantB\t\n\x07_imagesB\x13\n\x11_image_propertiesB\r\n\x0b_max_tokens\"\x88\x04\n\x10GenerativeCohere\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1e\n\x11\x66requency_penalty\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x12\n\x05model\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x0e\n\x01k\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x0e\n\x01p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x18\n\x0btemperature\x18\t \x01(\x01H\x08\x88\x01\x01\x12+\n\x06images\x18\n \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\t\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0b \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\n\x88\x01\x01\x42\x0b\n\t_base_urlB\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x04\n\x02_kB\x04\n\x02_pB\x13\n\x11_presence_penaltyB\x11\n\x0f_stop_sequencesB\x0e\n\x0c_temperatureB\t\n\x07_imagesB\x13\n\x11_image_properties\"\x11\n\x0fGenerativeDummy\"\xc5\x01\n\x11GenerativeMistral\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x12\n\x05top_p\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x0b\n\t_base_urlB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_p\"\x8a\x02\n\x10GenerativeOllama\x12\x19\n\x0c\x61pi_endpoint\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12+\n\x06images\x18\x04 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x03\x88\x01\x01\x12\x35\n\x10image_properties\x18\x05 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x04\x88\x01\x01\x42\x0f\n\r_api_endpointB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xe3\x08\n\x10GenerativeOpenAI\x12\x1e\n\x11\x66requency_penalty\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0e\n\x01n\x18\x04 \x01(\x03H\x03\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12)\n\x04stop\x18\x06 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x05\x88\x01\x01\x12\x18\n\x0btemperature\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x12\n\x05top_p\x18\x08 \x01(\x01H\x07\x88\x01\x01\x12\x15\n\x08\x62\x61se_url\x18\t \x01(\tH\x08\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\n \x01(\tH\t\x88\x01\x01\x12\x1a\n\rresource_name\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x1a\n\rdeployment_id\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12\x15\n\x08is_azure\x18\r \x01(\x08H\x0c\x88\x01\x01\x12+\n\x06images\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\r\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0f \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x0e\x88\x01\x01\x12L\n\x10reasoning_effort\x18\x10 \x01(\x0e\x32-.weaviate.v1.GenerativeOpenAI.ReasoningEffortH\x0f\x88\x01\x01\x12?\n\tverbosity\x18\x11 \x01(\x0e\x32\'.weaviate.v1.GenerativeOpenAI.VerbosityH\x10\x88\x01\x01\"\xa3\x01\n\x0fReasoningEffort\x12 \n\x1cREASONING_EFFORT_UNSPECIFIED\x10\x00\x12\x1c\n\x18REASONING_EFFORT_MINIMAL\x10\x01\x12\x18\n\x14REASONING_EFFORT_LOW\x10\x02\x12\x1b\n\x17REASONING_EFFORT_MEDIUM\x10\x03\x12\x19\n\x15REASONING_EFFORT_HIGH\x10\x04\"c\n\tVerbosity\x12\x19\n\x15VERBOSITY_UNSPECIFIED\x10\x00\x12\x11\n\rVERBOSITY_LOW\x10\x01\x12\x14\n\x10VERBOSITY_MEDIUM\x10\x02\x12\x12\n\x0eVERBOSITY_HIGH\x10\x03\x42\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x04\n\x02_nB\x13\n\x11_presence_penaltyB\x07\n\x05_stopB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\x0b\n\t_base_urlB\x0e\n\x0c_api_versionB\x10\n\x0e_resource_nameB\x10\n\x0e_deployment_idB\x0b\n\t_is_azureB\t\n\x07_imagesB\x13\n\x11_image_propertiesB\x13\n\x11_reasoning_effortB\x0c\n\n_verbosity\"\x92\x05\n\x10GenerativeGoogle\x12\x1e\n\x11\x66requency_penalty\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x18\n\x0btemperature\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12\x12\n\x05top_k\x18\x06 \x01(\x03H\x05\x88\x01\x01\x12\x12\n\x05top_p\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x19\n\x0c\x61pi_endpoint\x18\t \x01(\tH\x08\x88\x01\x01\x12\x17\n\nproject_id\x18\n \x01(\tH\t\x88\x01\x01\x12\x18\n\x0b\x65ndpoint_id\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x13\n\x06region\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12+\n\x06images\x18\r \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x0c\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\r\x88\x01\x01\x42\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x13\n\x11_presence_penaltyB\x0e\n\x0c_temperatureB\x08\n\x06_top_kB\x08\n\x06_top_pB\x11\n\x0f_stop_sequencesB\x0f\n\r_api_endpointB\r\n\x0b_project_idB\x0e\n\x0c_endpoint_idB\t\n\x07_regionB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xd0\x03\n\x14GenerativeDatabricks\x12\x15\n\x08\x65ndpoint\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x1e\n\x11\x66requency_penalty\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x16\n\tlog_probs\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x1a\n\rtop_log_probs\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x06 \x01(\x03H\x05\x88\x01\x01\x12\x0e\n\x01n\x18\x07 \x01(\x03H\x06\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x08 \x01(\x01H\x07\x88\x01\x01\x12)\n\x04stop\x18\t \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x12\x18\n\x0btemperature\x18\n \x01(\x01H\t\x88\x01\x01\x12\x12\n\x05top_p\x18\x0b \x01(\x01H\n\x88\x01\x01\x42\x0b\n\t_endpointB\x08\n\x06_modelB\x14\n\x12_frequency_penaltyB\x0c\n\n_log_probsB\x10\n\x0e_top_log_probsB\r\n\x0b_max_tokensB\x04\n\x02_nB\x13\n\x11_presence_penaltyB\x07\n\x05_stopB\x0e\n\x0c_temperatureB\x08\n\x06_top_p\"\xde\x01\n\x14GenerativeFriendliAI\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x0e\n\x01n\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x12\n\x05top_p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\r\n\x0b_max_tokensB\x0e\n\x0c_temperatureB\x04\n\x02_nB\x08\n\x06_top_p\"\xc4\x01\n\x10GenerativeNvidia\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x12\n\x05top_p\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x05 \x01(\x03H\x04\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\r\n\x0b_max_tokens\"\xc5\x02\n\rGenerativeXAI\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x12\n\x05top_p\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12+\n\x06images\x18\x06 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x05\x88\x01\x01\x12\x35\n\x10image_properties\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\r\n\x0b_max_tokensB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xce\x02\n\x16GenerativeContextualAI\x12\x12\n\x05model\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0btemperature\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x12\n\x05top_p\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x1b\n\x0emax_new_tokens\x18\x04 \x01(\x03H\x03\x88\x01\x01\x12\x1a\n\rsystem_prompt\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1d\n\x10\x61void_commentary\x18\x06 \x01(\x08H\x05\x88\x01\x01\x12.\n\tknowledge\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x42\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\x11\n\x0f_max_new_tokensB\x10\n\x0e_system_promptB\x13\n\x11_avoid_commentaryB\x0c\n\n_knowledge\"\x92\x01\n\x1bGenerativeAnthropicMetadata\x12=\n\x05usage\x18\x01 \x01(\x0b\x32..weaviate.v1.GenerativeAnthropicMetadata.Usage\x1a\x34\n\x05Usage\x12\x14\n\x0cinput_tokens\x18\x01 \x01(\x03\x12\x15\n\routput_tokens\x18\x02 \x01(\x03\"\x1c\n\x1aGenerativeAnyscaleMetadata\"\x17\n\x15GenerativeAWSMetadata\"\x9c\x06\n\x18GenerativeCohereMetadata\x12J\n\x0b\x61pi_version\x18\x01 \x01(\x0b\x32\x30.weaviate.v1.GenerativeCohereMetadata.ApiVersionH\x00\x88\x01\x01\x12L\n\x0c\x62illed_units\x18\x02 \x01(\x0b\x32\x31.weaviate.v1.GenerativeCohereMetadata.BilledUnitsH\x01\x88\x01\x01\x12\x41\n\x06tokens\x18\x03 \x01(\x0b\x32,.weaviate.v1.GenerativeCohereMetadata.TokensH\x02\x88\x01\x01\x12-\n\x08warnings\x18\x04 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x03\x88\x01\x01\x1a\x8e\x01\n\nApiVersion\x12\x14\n\x07version\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1a\n\ris_deprecated\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\x1c\n\x0fis_experimental\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\n\n\x08_versionB\x10\n\x0e_is_deprecatedB\x12\n\x10_is_experimental\x1a\xc5\x01\n\x0b\x42illedUnits\x12\x19\n\x0cinput_tokens\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1a\n\routput_tokens\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x19\n\x0csearch_units\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x1c\n\x0f\x63lassifications\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x0f\n\r_input_tokensB\x10\n\x0e_output_tokensB\x0f\n\r_search_unitsB\x12\n\x10_classifications\x1a\x62\n\x06Tokens\x12\x19\n\x0cinput_tokens\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1a\n\routput_tokens\x18\x02 \x01(\x01H\x01\x88\x01\x01\x42\x0f\n\r_input_tokensB\x10\n\x0e_output_tokensB\x0e\n\x0c_api_versionB\x0f\n\r_billed_unitsB\t\n\x07_tokensB\x0b\n\t_warnings\"\x19\n\x17GenerativeDummyMetadata\"\x81\x02\n\x19GenerativeMistralMetadata\x12@\n\x05usage\x18\x01 \x01(\x0b\x32,.weaviate.v1.GenerativeMistralMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x1a\n\x18GenerativeOllamaMetadata\"\xff\x01\n\x18GenerativeOpenAIMetadata\x12?\n\x05usage\x18\x01 \x01(\x0b\x32+.weaviate.v1.GenerativeOpenAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xe8\x06\n\x18GenerativeGoogleMetadata\x12\x45\n\x08metadata\x18\x01 \x01(\x0b\x32..weaviate.v1.GenerativeGoogleMetadata.MetadataH\x00\x88\x01\x01\x12P\n\x0eusage_metadata\x18\x02 \x01(\x0b\x32\x33.weaviate.v1.GenerativeGoogleMetadata.UsageMetadataH\x01\x88\x01\x01\x1a~\n\nTokenCount\x12&\n\x19total_billable_characters\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x42\x1c\n\x1a_total_billable_charactersB\x0f\n\r_total_tokens\x1a\xe1\x01\n\rTokenMetadata\x12P\n\x11input_token_count\x18\x01 \x01(\x0b\x32\x30.weaviate.v1.GenerativeGoogleMetadata.TokenCountH\x00\x88\x01\x01\x12Q\n\x12output_token_count\x18\x02 \x01(\x0b\x32\x30.weaviate.v1.GenerativeGoogleMetadata.TokenCountH\x01\x88\x01\x01\x42\x14\n\x12_input_token_countB\x15\n\x13_output_token_count\x1ao\n\x08Metadata\x12P\n\x0etoken_metadata\x18\x01 \x01(\x0b\x32\x33.weaviate.v1.GenerativeGoogleMetadata.TokenMetadataH\x00\x88\x01\x01\x42\x11\n\x0f_token_metadata\x1a\xbd\x01\n\rUsageMetadata\x12\x1f\n\x12prompt_token_count\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12#\n\x16\x63\x61ndidates_token_count\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1e\n\x11total_token_count\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x15\n\x13_prompt_token_countB\x19\n\x17_candidates_token_countB\x14\n\x12_total_token_countB\x0b\n\t_metadataB\x11\n\x0f_usage_metadata\"\x87\x02\n\x1cGenerativeDatabricksMetadata\x12\x43\n\x05usage\x18\x01 \x01(\x0b\x32/.weaviate.v1.GenerativeDatabricksMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x87\x02\n\x1cGenerativeFriendliAIMetadata\x12\x43\n\x05usage\x18\x01 \x01(\x0b\x32/.weaviate.v1.GenerativeFriendliAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xff\x01\n\x18GenerativeNvidiaMetadata\x12?\n\x05usage\x18\x01 \x01(\x0b\x32+.weaviate.v1.GenerativeNvidiaMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xf9\x01\n\x15GenerativeXAIMetadata\x12<\n\x05usage\x18\x01 \x01(\x0b\x32(.weaviate.v1.GenerativeXAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x8f\x06\n\x12GenerativeMetadata\x12=\n\tanthropic\x18\x01 \x01(\x0b\x32(.weaviate.v1.GenerativeAnthropicMetadataH\x00\x12;\n\x08\x61nyscale\x18\x02 \x01(\x0b\x32\'.weaviate.v1.GenerativeAnyscaleMetadataH\x00\x12\x31\n\x03\x61ws\x18\x03 \x01(\x0b\x32\".weaviate.v1.GenerativeAWSMetadataH\x00\x12\x37\n\x06\x63ohere\x18\x04 \x01(\x0b\x32%.weaviate.v1.GenerativeCohereMetadataH\x00\x12\x35\n\x05\x64ummy\x18\x05 \x01(\x0b\x32$.weaviate.v1.GenerativeDummyMetadataH\x00\x12\x39\n\x07mistral\x18\x06 \x01(\x0b\x32&.weaviate.v1.GenerativeMistralMetadataH\x00\x12\x37\n\x06ollama\x18\x07 \x01(\x0b\x32%.weaviate.v1.GenerativeOllamaMetadataH\x00\x12\x37\n\x06openai\x18\x08 \x01(\x0b\x32%.weaviate.v1.GenerativeOpenAIMetadataH\x00\x12\x37\n\x06google\x18\t \x01(\x0b\x32%.weaviate.v1.GenerativeGoogleMetadataH\x00\x12?\n\ndatabricks\x18\n \x01(\x0b\x32).weaviate.v1.GenerativeDatabricksMetadataH\x00\x12?\n\nfriendliai\x18\x0b \x01(\x0b\x32).weaviate.v1.GenerativeFriendliAIMetadataH\x00\x12\x37\n\x06nvidia\x18\x0c \x01(\x0b\x32%.weaviate.v1.GenerativeNvidiaMetadataH\x00\x12\x31\n\x03xai\x18\r \x01(\x0b\x32\".weaviate.v1.GenerativeXAIMetadataH\x00\x42\x06\n\x04kind\"\xa2\x01\n\x0fGenerativeReply\x12\x0e\n\x06result\x18\x01 \x01(\t\x12\x30\n\x05\x64\x65\x62ug\x18\x02 \x01(\x0b\x32\x1c.weaviate.v1.GenerativeDebugH\x00\x88\x01\x01\x12\x36\n\x08metadata\x18\x03 \x01(\x0b\x32\x1f.weaviate.v1.GenerativeMetadataH\x01\x88\x01\x01\x42\x08\n\x06_debugB\x0b\n\t_metadata\"@\n\x10GenerativeResult\x12,\n\x06values\x18\x01 \x03(\x0b\x32\x1c.weaviate.v1.GenerativeReply\";\n\x0fGenerativeDebug\x12\x18\n\x0b\x66ull_prompt\x18\x01 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_full_promptBt\n#io.weaviate.client.grpc.protocol.v1B\x17WeaviateProtoGenerativeZ4github.com/weaviate/weaviate/grpc/generated;protocolb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13v1/generative.proto\x12\x0bweaviate.v1\x1a\rv1/base.proto\"\xdd\x03\n\x10GenerativeSearch\x12\"\n\x16single_response_prompt\x18\x01 \x01(\tB\x02\x18\x01\x12!\n\x15grouped_response_task\x18\x02 \x01(\tB\x02\x18\x01\x12\x1e\n\x12grouped_properties\x18\x03 \x03(\tB\x02\x18\x01\x12\x34\n\x06single\x18\x04 \x01(\x0b\x32$.weaviate.v1.GenerativeSearch.Single\x12\x36\n\x07grouped\x18\x05 \x01(\x0b\x32%.weaviate.v1.GenerativeSearch.Grouped\x1aY\n\x06Single\x12\x0e\n\x06prompt\x18\x01 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x02 \x01(\x08\x12\x30\n\x07queries\x18\x03 \x03(\x0b\x32\x1f.weaviate.v1.GenerativeProvider\x1a\x98\x01\n\x07Grouped\x12\x0c\n\x04task\x18\x01 \x01(\t\x12/\n\nproperties\x18\x02 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x00\x88\x01\x01\x12\x30\n\x07queries\x18\x03 \x03(\x0b\x32\x1f.weaviate.v1.GenerativeProvider\x12\r\n\x05\x64\x65\x62ug\x18\x04 \x01(\x08\x42\r\n\x0b_properties\"\xfd\x05\n\x12GenerativeProvider\x12\x17\n\x0freturn_metadata\x18\x01 \x01(\x08\x12\x35\n\tanthropic\x18\x02 \x01(\x0b\x32 .weaviate.v1.GenerativeAnthropicH\x00\x12\x33\n\x08\x61nyscale\x18\x03 \x01(\x0b\x32\x1f.weaviate.v1.GenerativeAnyscaleH\x00\x12)\n\x03\x61ws\x18\x04 \x01(\x0b\x32\x1a.weaviate.v1.GenerativeAWSH\x00\x12/\n\x06\x63ohere\x18\x05 \x01(\x0b\x32\x1d.weaviate.v1.GenerativeCohereH\x00\x12-\n\x05\x64ummy\x18\x06 \x01(\x0b\x32\x1c.weaviate.v1.GenerativeDummyH\x00\x12\x31\n\x07mistral\x18\x07 \x01(\x0b\x32\x1e.weaviate.v1.GenerativeMistralH\x00\x12/\n\x06ollama\x18\x08 \x01(\x0b\x32\x1d.weaviate.v1.GenerativeOllamaH\x00\x12/\n\x06openai\x18\t \x01(\x0b\x32\x1d.weaviate.v1.GenerativeOpenAIH\x00\x12/\n\x06google\x18\n \x01(\x0b\x32\x1d.weaviate.v1.GenerativeGoogleH\x00\x12\x37\n\ndatabricks\x18\x0b \x01(\x0b\x32!.weaviate.v1.GenerativeDatabricksH\x00\x12\x37\n\nfriendliai\x18\x0c \x01(\x0b\x32!.weaviate.v1.GenerativeFriendliAIH\x00\x12/\n\x06nvidia\x18\r \x01(\x0b\x32\x1d.weaviate.v1.GenerativeNvidiaH\x00\x12)\n\x03xai\x18\x0e \x01(\x0b\x32\x1a.weaviate.v1.GenerativeXAIH\x00\x12;\n\x0c\x63ontextualai\x18\x0f \x01(\x0b\x32#.weaviate.v1.GenerativeContextualAIH\x00\x42\x06\n\x04kind\"\xb1\x03\n\x13GenerativeAnthropic\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x12\n\x05top_k\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x12\n\x05top_p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x12+\n\x06images\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x35\n\x10image_properties\x18\t \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x42\x0b\n\t_base_urlB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_kB\x08\n\x06_top_pB\x11\n\x0f_stop_sequencesB\t\n\x07_imagesB\x13\n\x11_image_properties\"\x80\x01\n\x12GenerativeAnyscale\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperature\"\x8d\x04\n\rGenerativeAWS\x12\x12\n\x05model\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0btemperature\x18\x08 \x01(\x01H\x01\x88\x01\x01\x12\x14\n\x07service\x18\t \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06region\x18\n \x01(\tH\x03\x88\x01\x01\x12\x15\n\x08\x65ndpoint\x18\x0b \x01(\tH\x04\x88\x01\x01\x12\x19\n\x0ctarget_model\x18\x0c \x01(\tH\x05\x88\x01\x01\x12\x1b\n\x0etarget_variant\x18\r \x01(\tH\x06\x88\x01\x01\x12+\n\x06images\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0f \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x10 \x01(\x03H\t\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x11 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\n\x88\x01\x01\x42\x08\n\x06_modelB\x0e\n\x0c_temperatureB\n\n\x08_serviceB\t\n\x07_regionB\x0b\n\t_endpointB\x0f\n\r_target_modelB\x11\n\x0f_target_variantB\t\n\x07_imagesB\x13\n\x11_image_propertiesB\r\n\x0b_max_tokensB\x11\n\x0f_stop_sequences\"\x88\x04\n\x10GenerativeCohere\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1e\n\x11\x66requency_penalty\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x12\n\x05model\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x0e\n\x01k\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x0e\n\x01p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x18\n\x0btemperature\x18\t \x01(\x01H\x08\x88\x01\x01\x12+\n\x06images\x18\n \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\t\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0b \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\n\x88\x01\x01\x42\x0b\n\t_base_urlB\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x04\n\x02_kB\x04\n\x02_pB\x13\n\x11_presence_penaltyB\x11\n\x0f_stop_sequencesB\x0e\n\x0c_temperatureB\t\n\x07_imagesB\x13\n\x11_image_properties\"\x11\n\x0fGenerativeDummy\"\xc5\x01\n\x11GenerativeMistral\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x12\n\x05top_p\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x0b\n\t_base_urlB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_p\"\x8a\x02\n\x10GenerativeOllama\x12\x19\n\x0c\x61pi_endpoint\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12+\n\x06images\x18\x04 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x03\x88\x01\x01\x12\x35\n\x10image_properties\x18\x05 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x04\x88\x01\x01\x42\x0f\n\r_api_endpointB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xe3\x08\n\x10GenerativeOpenAI\x12\x1e\n\x11\x66requency_penalty\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0e\n\x01n\x18\x04 \x01(\x03H\x03\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12)\n\x04stop\x18\x06 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x05\x88\x01\x01\x12\x18\n\x0btemperature\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x12\n\x05top_p\x18\x08 \x01(\x01H\x07\x88\x01\x01\x12\x15\n\x08\x62\x61se_url\x18\t \x01(\tH\x08\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\n \x01(\tH\t\x88\x01\x01\x12\x1a\n\rresource_name\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x1a\n\rdeployment_id\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12\x15\n\x08is_azure\x18\r \x01(\x08H\x0c\x88\x01\x01\x12+\n\x06images\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\r\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0f \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x0e\x88\x01\x01\x12L\n\x10reasoning_effort\x18\x10 \x01(\x0e\x32-.weaviate.v1.GenerativeOpenAI.ReasoningEffortH\x0f\x88\x01\x01\x12?\n\tverbosity\x18\x11 \x01(\x0e\x32\'.weaviate.v1.GenerativeOpenAI.VerbosityH\x10\x88\x01\x01\"\xa3\x01\n\x0fReasoningEffort\x12 \n\x1cREASONING_EFFORT_UNSPECIFIED\x10\x00\x12\x1c\n\x18REASONING_EFFORT_MINIMAL\x10\x01\x12\x18\n\x14REASONING_EFFORT_LOW\x10\x02\x12\x1b\n\x17REASONING_EFFORT_MEDIUM\x10\x03\x12\x19\n\x15REASONING_EFFORT_HIGH\x10\x04\"c\n\tVerbosity\x12\x19\n\x15VERBOSITY_UNSPECIFIED\x10\x00\x12\x11\n\rVERBOSITY_LOW\x10\x01\x12\x14\n\x10VERBOSITY_MEDIUM\x10\x02\x12\x12\n\x0eVERBOSITY_HIGH\x10\x03\x42\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x04\n\x02_nB\x13\n\x11_presence_penaltyB\x07\n\x05_stopB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\x0b\n\t_base_urlB\x0e\n\x0c_api_versionB\x10\n\x0e_resource_nameB\x10\n\x0e_deployment_idB\x0b\n\t_is_azureB\t\n\x07_imagesB\x13\n\x11_image_propertiesB\x13\n\x11_reasoning_effortB\x0c\n\n_verbosity\"\x92\x05\n\x10GenerativeGoogle\x12\x1e\n\x11\x66requency_penalty\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05model\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x18\n\x0btemperature\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12\x12\n\x05top_k\x18\x06 \x01(\x03H\x05\x88\x01\x01\x12\x12\n\x05top_p\x18\x07 \x01(\x01H\x06\x88\x01\x01\x12\x33\n\x0estop_sequences\x18\x08 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x07\x88\x01\x01\x12\x19\n\x0c\x61pi_endpoint\x18\t \x01(\tH\x08\x88\x01\x01\x12\x17\n\nproject_id\x18\n \x01(\tH\t\x88\x01\x01\x12\x18\n\x0b\x65ndpoint_id\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x13\n\x06region\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12+\n\x06images\x18\r \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x0c\x88\x01\x01\x12\x35\n\x10image_properties\x18\x0e \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\r\x88\x01\x01\x42\x14\n\x12_frequency_penaltyB\r\n\x0b_max_tokensB\x08\n\x06_modelB\x13\n\x11_presence_penaltyB\x0e\n\x0c_temperatureB\x08\n\x06_top_kB\x08\n\x06_top_pB\x11\n\x0f_stop_sequencesB\x0f\n\r_api_endpointB\r\n\x0b_project_idB\x0e\n\x0c_endpoint_idB\t\n\x07_regionB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xd0\x03\n\x14GenerativeDatabricks\x12\x15\n\x08\x65ndpoint\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x1e\n\x11\x66requency_penalty\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x16\n\tlog_probs\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x1a\n\rtop_log_probs\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x06 \x01(\x03H\x05\x88\x01\x01\x12\x0e\n\x01n\x18\x07 \x01(\x03H\x06\x88\x01\x01\x12\x1d\n\x10presence_penalty\x18\x08 \x01(\x01H\x07\x88\x01\x01\x12)\n\x04stop\x18\t \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x08\x88\x01\x01\x12\x18\n\x0btemperature\x18\n \x01(\x01H\t\x88\x01\x01\x12\x12\n\x05top_p\x18\x0b \x01(\x01H\n\x88\x01\x01\x42\x0b\n\t_endpointB\x08\n\x06_modelB\x14\n\x12_frequency_penaltyB\x0c\n\n_log_probsB\x10\n\x0e_top_log_probsB\r\n\x0b_max_tokensB\x04\n\x02_nB\x13\n\x11_presence_penaltyB\x07\n\x05_stopB\x0e\n\x0c_temperatureB\x08\n\x06_top_p\"\xde\x01\n\x14GenerativeFriendliAI\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x18\n\x0btemperature\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x0e\n\x01n\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12\x12\n\x05top_p\x18\x06 \x01(\x01H\x05\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\r\n\x0b_max_tokensB\x0e\n\x0c_temperatureB\x04\n\x02_nB\x08\n\x06_top_p\"\xc4\x01\n\x10GenerativeNvidia\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x12\n\x05top_p\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x05 \x01(\x03H\x04\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\r\n\x0b_max_tokens\"\xc5\x02\n\rGenerativeXAI\x12\x15\n\x08\x62\x61se_url\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05model\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0btemperature\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x12\n\x05top_p\x18\x04 \x01(\x01H\x03\x88\x01\x01\x12\x17\n\nmax_tokens\x18\x05 \x01(\x03H\x04\x88\x01\x01\x12+\n\x06images\x18\x06 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x05\x88\x01\x01\x12\x35\n\x10image_properties\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x42\x0b\n\t_base_urlB\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\r\n\x0b_max_tokensB\t\n\x07_imagesB\x13\n\x11_image_properties\"\xce\x02\n\x16GenerativeContextualAI\x12\x12\n\x05model\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0btemperature\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x12\n\x05top_p\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x1b\n\x0emax_new_tokens\x18\x04 \x01(\x03H\x03\x88\x01\x01\x12\x1a\n\rsystem_prompt\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1d\n\x10\x61void_commentary\x18\x06 \x01(\x08H\x05\x88\x01\x01\x12.\n\tknowledge\x18\x07 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x06\x88\x01\x01\x42\x08\n\x06_modelB\x0e\n\x0c_temperatureB\x08\n\x06_top_pB\x11\n\x0f_max_new_tokensB\x10\n\x0e_system_promptB\x13\n\x11_avoid_commentaryB\x0c\n\n_knowledge\"\x92\x01\n\x1bGenerativeAnthropicMetadata\x12=\n\x05usage\x18\x01 \x01(\x0b\x32..weaviate.v1.GenerativeAnthropicMetadata.Usage\x1a\x34\n\x05Usage\x12\x14\n\x0cinput_tokens\x18\x01 \x01(\x03\x12\x15\n\routput_tokens\x18\x02 \x01(\x03\"\x1c\n\x1aGenerativeAnyscaleMetadata\"\x17\n\x15GenerativeAWSMetadata\"\x9c\x06\n\x18GenerativeCohereMetadata\x12J\n\x0b\x61pi_version\x18\x01 \x01(\x0b\x32\x30.weaviate.v1.GenerativeCohereMetadata.ApiVersionH\x00\x88\x01\x01\x12L\n\x0c\x62illed_units\x18\x02 \x01(\x0b\x32\x31.weaviate.v1.GenerativeCohereMetadata.BilledUnitsH\x01\x88\x01\x01\x12\x41\n\x06tokens\x18\x03 \x01(\x0b\x32,.weaviate.v1.GenerativeCohereMetadata.TokensH\x02\x88\x01\x01\x12-\n\x08warnings\x18\x04 \x01(\x0b\x32\x16.weaviate.v1.TextArrayH\x03\x88\x01\x01\x1a\x8e\x01\n\nApiVersion\x12\x14\n\x07version\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1a\n\ris_deprecated\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\x1c\n\x0fis_experimental\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\n\n\x08_versionB\x10\n\x0e_is_deprecatedB\x12\n\x10_is_experimental\x1a\xc5\x01\n\x0b\x42illedUnits\x12\x19\n\x0cinput_tokens\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1a\n\routput_tokens\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x19\n\x0csearch_units\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x1c\n\x0f\x63lassifications\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x0f\n\r_input_tokensB\x10\n\x0e_output_tokensB\x0f\n\r_search_unitsB\x12\n\x10_classifications\x1a\x62\n\x06Tokens\x12\x19\n\x0cinput_tokens\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1a\n\routput_tokens\x18\x02 \x01(\x01H\x01\x88\x01\x01\x42\x0f\n\r_input_tokensB\x10\n\x0e_output_tokensB\x0e\n\x0c_api_versionB\x0f\n\r_billed_unitsB\t\n\x07_tokensB\x0b\n\t_warnings\"\x19\n\x17GenerativeDummyMetadata\"\x81\x02\n\x19GenerativeMistralMetadata\x12@\n\x05usage\x18\x01 \x01(\x0b\x32,.weaviate.v1.GenerativeMistralMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x1a\n\x18GenerativeOllamaMetadata\"\xff\x01\n\x18GenerativeOpenAIMetadata\x12?\n\x05usage\x18\x01 \x01(\x0b\x32+.weaviate.v1.GenerativeOpenAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xe8\x06\n\x18GenerativeGoogleMetadata\x12\x45\n\x08metadata\x18\x01 \x01(\x0b\x32..weaviate.v1.GenerativeGoogleMetadata.MetadataH\x00\x88\x01\x01\x12P\n\x0eusage_metadata\x18\x02 \x01(\x0b\x32\x33.weaviate.v1.GenerativeGoogleMetadata.UsageMetadataH\x01\x88\x01\x01\x1a~\n\nTokenCount\x12&\n\x19total_billable_characters\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x42\x1c\n\x1a_total_billable_charactersB\x0f\n\r_total_tokens\x1a\xe1\x01\n\rTokenMetadata\x12P\n\x11input_token_count\x18\x01 \x01(\x0b\x32\x30.weaviate.v1.GenerativeGoogleMetadata.TokenCountH\x00\x88\x01\x01\x12Q\n\x12output_token_count\x18\x02 \x01(\x0b\x32\x30.weaviate.v1.GenerativeGoogleMetadata.TokenCountH\x01\x88\x01\x01\x42\x14\n\x12_input_token_countB\x15\n\x13_output_token_count\x1ao\n\x08Metadata\x12P\n\x0etoken_metadata\x18\x01 \x01(\x0b\x32\x33.weaviate.v1.GenerativeGoogleMetadata.TokenMetadataH\x00\x88\x01\x01\x42\x11\n\x0f_token_metadata\x1a\xbd\x01\n\rUsageMetadata\x12\x1f\n\x12prompt_token_count\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12#\n\x16\x63\x61ndidates_token_count\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1e\n\x11total_token_count\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x15\n\x13_prompt_token_countB\x19\n\x17_candidates_token_countB\x14\n\x12_total_token_countB\x0b\n\t_metadataB\x11\n\x0f_usage_metadata\"\x87\x02\n\x1cGenerativeDatabricksMetadata\x12\x43\n\x05usage\x18\x01 \x01(\x0b\x32/.weaviate.v1.GenerativeDatabricksMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x87\x02\n\x1cGenerativeFriendliAIMetadata\x12\x43\n\x05usage\x18\x01 \x01(\x0b\x32/.weaviate.v1.GenerativeFriendliAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xff\x01\n\x18GenerativeNvidiaMetadata\x12?\n\x05usage\x18\x01 \x01(\x0b\x32+.weaviate.v1.GenerativeNvidiaMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\xf9\x01\n\x15GenerativeXAIMetadata\x12<\n\x05usage\x18\x01 \x01(\x0b\x32(.weaviate.v1.GenerativeXAIMetadata.UsageH\x00\x88\x01\x01\x1a\x97\x01\n\x05Usage\x12\x1a\n\rprompt_tokens\x18\x01 \x01(\x03H\x00\x88\x01\x01\x12\x1e\n\x11\x63ompletion_tokens\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x19\n\x0ctotal_tokens\x18\x03 \x01(\x03H\x02\x88\x01\x01\x42\x10\n\x0e_prompt_tokensB\x14\n\x12_completion_tokensB\x0f\n\r_total_tokensB\x08\n\x06_usage\"\x8f\x06\n\x12GenerativeMetadata\x12=\n\tanthropic\x18\x01 \x01(\x0b\x32(.weaviate.v1.GenerativeAnthropicMetadataH\x00\x12;\n\x08\x61nyscale\x18\x02 \x01(\x0b\x32\'.weaviate.v1.GenerativeAnyscaleMetadataH\x00\x12\x31\n\x03\x61ws\x18\x03 \x01(\x0b\x32\".weaviate.v1.GenerativeAWSMetadataH\x00\x12\x37\n\x06\x63ohere\x18\x04 \x01(\x0b\x32%.weaviate.v1.GenerativeCohereMetadataH\x00\x12\x35\n\x05\x64ummy\x18\x05 \x01(\x0b\x32$.weaviate.v1.GenerativeDummyMetadataH\x00\x12\x39\n\x07mistral\x18\x06 \x01(\x0b\x32&.weaviate.v1.GenerativeMistralMetadataH\x00\x12\x37\n\x06ollama\x18\x07 \x01(\x0b\x32%.weaviate.v1.GenerativeOllamaMetadataH\x00\x12\x37\n\x06openai\x18\x08 \x01(\x0b\x32%.weaviate.v1.GenerativeOpenAIMetadataH\x00\x12\x37\n\x06google\x18\t \x01(\x0b\x32%.weaviate.v1.GenerativeGoogleMetadataH\x00\x12?\n\ndatabricks\x18\n \x01(\x0b\x32).weaviate.v1.GenerativeDatabricksMetadataH\x00\x12?\n\nfriendliai\x18\x0b \x01(\x0b\x32).weaviate.v1.GenerativeFriendliAIMetadataH\x00\x12\x37\n\x06nvidia\x18\x0c \x01(\x0b\x32%.weaviate.v1.GenerativeNvidiaMetadataH\x00\x12\x31\n\x03xai\x18\r \x01(\x0b\x32\".weaviate.v1.GenerativeXAIMetadataH\x00\x42\x06\n\x04kind\"\xa2\x01\n\x0fGenerativeReply\x12\x0e\n\x06result\x18\x01 \x01(\t\x12\x30\n\x05\x64\x65\x62ug\x18\x02 \x01(\x0b\x32\x1c.weaviate.v1.GenerativeDebugH\x00\x88\x01\x01\x12\x36\n\x08metadata\x18\x03 \x01(\x0b\x32\x1f.weaviate.v1.GenerativeMetadataH\x01\x88\x01\x01\x42\x08\n\x06_debugB\x0b\n\t_metadata\"@\n\x10GenerativeResult\x12,\n\x06values\x18\x01 \x03(\x0b\x32\x1c.weaviate.v1.GenerativeReply\";\n\x0fGenerativeDebug\x12\x18\n\x0b\x66ull_prompt\x18\x01 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_full_promptBt\n#io.weaviate.client.grpc.protocol.v1B\x17WeaviateProtoGenerativeZ4github.com/weaviate/weaviate/grpc/generated;protocolb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -52,93 +52,93 @@ _globals['_GENERATIVEANYSCALE']._serialized_start=1736 _globals['_GENERATIVEANYSCALE']._serialized_end=1864 _globals['_GENERATIVEAWS']._serialized_start=1867 - _globals['_GENERATIVEAWS']._serialized_end=2320 - _globals['_GENERATIVECOHERE']._serialized_start=2323 - _globals['_GENERATIVECOHERE']._serialized_end=2843 - _globals['_GENERATIVEDUMMY']._serialized_start=2845 - _globals['_GENERATIVEDUMMY']._serialized_end=2862 - _globals['_GENERATIVEMISTRAL']._serialized_start=2865 - _globals['_GENERATIVEMISTRAL']._serialized_end=3062 - _globals['_GENERATIVEOLLAMA']._serialized_start=3065 - _globals['_GENERATIVEOLLAMA']._serialized_end=3331 - _globals['_GENERATIVEOPENAI']._serialized_start=3334 - _globals['_GENERATIVEOPENAI']._serialized_end=4457 - _globals['_GENERATIVEOPENAI_REASONINGEFFORT']._serialized_start=3939 - _globals['_GENERATIVEOPENAI_REASONINGEFFORT']._serialized_end=4102 - _globals['_GENERATIVEOPENAI_VERBOSITY']._serialized_start=4104 - _globals['_GENERATIVEOPENAI_VERBOSITY']._serialized_end=4203 - _globals['_GENERATIVEGOOGLE']._serialized_start=4460 - _globals['_GENERATIVEGOOGLE']._serialized_end=5118 - _globals['_GENERATIVEDATABRICKS']._serialized_start=5121 - _globals['_GENERATIVEDATABRICKS']._serialized_end=5585 - _globals['_GENERATIVEFRIENDLIAI']._serialized_start=5588 - _globals['_GENERATIVEFRIENDLIAI']._serialized_end=5810 - _globals['_GENERATIVENVIDIA']._serialized_start=5813 - _globals['_GENERATIVENVIDIA']._serialized_end=6009 - _globals['_GENERATIVEXAI']._serialized_start=6012 - _globals['_GENERATIVEXAI']._serialized_end=6337 - _globals['_GENERATIVECONTEXTUALAI']._serialized_start=6340 - _globals['_GENERATIVECONTEXTUALAI']._serialized_end=6674 - _globals['_GENERATIVEANTHROPICMETADATA']._serialized_start=6677 - _globals['_GENERATIVEANTHROPICMETADATA']._serialized_end=6823 - _globals['_GENERATIVEANTHROPICMETADATA_USAGE']._serialized_start=6771 - _globals['_GENERATIVEANTHROPICMETADATA_USAGE']._serialized_end=6823 - _globals['_GENERATIVEANYSCALEMETADATA']._serialized_start=6825 - _globals['_GENERATIVEANYSCALEMETADATA']._serialized_end=6853 - _globals['_GENERATIVEAWSMETADATA']._serialized_start=6855 - _globals['_GENERATIVEAWSMETADATA']._serialized_end=6878 - _globals['_GENERATIVECOHEREMETADATA']._serialized_start=6881 - _globals['_GENERATIVECOHEREMETADATA']._serialized_end=7677 - _globals['_GENERATIVECOHEREMETADATA_APIVERSION']._serialized_start=7178 - _globals['_GENERATIVECOHEREMETADATA_APIVERSION']._serialized_end=7320 - _globals['_GENERATIVECOHEREMETADATA_BILLEDUNITS']._serialized_start=7323 - _globals['_GENERATIVECOHEREMETADATA_BILLEDUNITS']._serialized_end=7520 - _globals['_GENERATIVECOHEREMETADATA_TOKENS']._serialized_start=7522 - _globals['_GENERATIVECOHEREMETADATA_TOKENS']._serialized_end=7620 - _globals['_GENERATIVEDUMMYMETADATA']._serialized_start=7679 - _globals['_GENERATIVEDUMMYMETADATA']._serialized_end=7704 - _globals['_GENERATIVEMISTRALMETADATA']._serialized_start=7707 - _globals['_GENERATIVEMISTRALMETADATA']._serialized_end=7964 - _globals['_GENERATIVEMISTRALMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEMISTRALMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEOLLAMAMETADATA']._serialized_start=7966 - _globals['_GENERATIVEOLLAMAMETADATA']._serialized_end=7992 - _globals['_GENERATIVEOPENAIMETADATA']._serialized_start=7995 - _globals['_GENERATIVEOPENAIMETADATA']._serialized_end=8250 - _globals['_GENERATIVEOPENAIMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEOPENAIMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEGOOGLEMETADATA']._serialized_start=8253 - _globals['_GENERATIVEGOOGLEMETADATA']._serialized_end=9125 - _globals['_GENERATIVEGOOGLEMETADATA_TOKENCOUNT']._serialized_start=8434 - _globals['_GENERATIVEGOOGLEMETADATA_TOKENCOUNT']._serialized_end=8560 - _globals['_GENERATIVEGOOGLEMETADATA_TOKENMETADATA']._serialized_start=8563 - _globals['_GENERATIVEGOOGLEMETADATA_TOKENMETADATA']._serialized_end=8788 - _globals['_GENERATIVEGOOGLEMETADATA_METADATA']._serialized_start=8790 - _globals['_GENERATIVEGOOGLEMETADATA_METADATA']._serialized_end=8901 - _globals['_GENERATIVEGOOGLEMETADATA_USAGEMETADATA']._serialized_start=8904 - _globals['_GENERATIVEGOOGLEMETADATA_USAGEMETADATA']._serialized_end=9093 - _globals['_GENERATIVEDATABRICKSMETADATA']._serialized_start=9128 - _globals['_GENERATIVEDATABRICKSMETADATA']._serialized_end=9391 - _globals['_GENERATIVEDATABRICKSMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEDATABRICKSMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEFRIENDLIAIMETADATA']._serialized_start=9394 - _globals['_GENERATIVEFRIENDLIAIMETADATA']._serialized_end=9657 - _globals['_GENERATIVEFRIENDLIAIMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEFRIENDLIAIMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVENVIDIAMETADATA']._serialized_start=9660 - _globals['_GENERATIVENVIDIAMETADATA']._serialized_end=9915 - _globals['_GENERATIVENVIDIAMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVENVIDIAMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEXAIMETADATA']._serialized_start=9918 - _globals['_GENERATIVEXAIMETADATA']._serialized_end=10167 - _globals['_GENERATIVEXAIMETADATA_USAGE']._serialized_start=7803 - _globals['_GENERATIVEXAIMETADATA_USAGE']._serialized_end=7954 - _globals['_GENERATIVEMETADATA']._serialized_start=10170 - _globals['_GENERATIVEMETADATA']._serialized_end=10953 - _globals['_GENERATIVEREPLY']._serialized_start=10956 - _globals['_GENERATIVEREPLY']._serialized_end=11118 - _globals['_GENERATIVERESULT']._serialized_start=11120 - _globals['_GENERATIVERESULT']._serialized_end=11184 - _globals['_GENERATIVEDEBUG']._serialized_start=11186 - _globals['_GENERATIVEDEBUG']._serialized_end=11245 + _globals['_GENERATIVEAWS']._serialized_end=2392 + _globals['_GENERATIVECOHERE']._serialized_start=2395 + _globals['_GENERATIVECOHERE']._serialized_end=2915 + _globals['_GENERATIVEDUMMY']._serialized_start=2917 + _globals['_GENERATIVEDUMMY']._serialized_end=2934 + _globals['_GENERATIVEMISTRAL']._serialized_start=2937 + _globals['_GENERATIVEMISTRAL']._serialized_end=3134 + _globals['_GENERATIVEOLLAMA']._serialized_start=3137 + _globals['_GENERATIVEOLLAMA']._serialized_end=3403 + _globals['_GENERATIVEOPENAI']._serialized_start=3406 + _globals['_GENERATIVEOPENAI']._serialized_end=4529 + _globals['_GENERATIVEOPENAI_REASONINGEFFORT']._serialized_start=4011 + _globals['_GENERATIVEOPENAI_REASONINGEFFORT']._serialized_end=4174 + _globals['_GENERATIVEOPENAI_VERBOSITY']._serialized_start=4176 + _globals['_GENERATIVEOPENAI_VERBOSITY']._serialized_end=4275 + _globals['_GENERATIVEGOOGLE']._serialized_start=4532 + _globals['_GENERATIVEGOOGLE']._serialized_end=5190 + _globals['_GENERATIVEDATABRICKS']._serialized_start=5193 + _globals['_GENERATIVEDATABRICKS']._serialized_end=5657 + _globals['_GENERATIVEFRIENDLIAI']._serialized_start=5660 + _globals['_GENERATIVEFRIENDLIAI']._serialized_end=5882 + _globals['_GENERATIVENVIDIA']._serialized_start=5885 + _globals['_GENERATIVENVIDIA']._serialized_end=6081 + _globals['_GENERATIVEXAI']._serialized_start=6084 + _globals['_GENERATIVEXAI']._serialized_end=6409 + _globals['_GENERATIVECONTEXTUALAI']._serialized_start=6412 + _globals['_GENERATIVECONTEXTUALAI']._serialized_end=6746 + _globals['_GENERATIVEANTHROPICMETADATA']._serialized_start=6749 + _globals['_GENERATIVEANTHROPICMETADATA']._serialized_end=6895 + _globals['_GENERATIVEANTHROPICMETADATA_USAGE']._serialized_start=6843 + _globals['_GENERATIVEANTHROPICMETADATA_USAGE']._serialized_end=6895 + _globals['_GENERATIVEANYSCALEMETADATA']._serialized_start=6897 + _globals['_GENERATIVEANYSCALEMETADATA']._serialized_end=6925 + _globals['_GENERATIVEAWSMETADATA']._serialized_start=6927 + _globals['_GENERATIVEAWSMETADATA']._serialized_end=6950 + _globals['_GENERATIVECOHEREMETADATA']._serialized_start=6953 + _globals['_GENERATIVECOHEREMETADATA']._serialized_end=7749 + _globals['_GENERATIVECOHEREMETADATA_APIVERSION']._serialized_start=7250 + _globals['_GENERATIVECOHEREMETADATA_APIVERSION']._serialized_end=7392 + _globals['_GENERATIVECOHEREMETADATA_BILLEDUNITS']._serialized_start=7395 + _globals['_GENERATIVECOHEREMETADATA_BILLEDUNITS']._serialized_end=7592 + _globals['_GENERATIVECOHEREMETADATA_TOKENS']._serialized_start=7594 + _globals['_GENERATIVECOHEREMETADATA_TOKENS']._serialized_end=7692 + _globals['_GENERATIVEDUMMYMETADATA']._serialized_start=7751 + _globals['_GENERATIVEDUMMYMETADATA']._serialized_end=7776 + _globals['_GENERATIVEMISTRALMETADATA']._serialized_start=7779 + _globals['_GENERATIVEMISTRALMETADATA']._serialized_end=8036 + _globals['_GENERATIVEMISTRALMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEMISTRALMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEOLLAMAMETADATA']._serialized_start=8038 + _globals['_GENERATIVEOLLAMAMETADATA']._serialized_end=8064 + _globals['_GENERATIVEOPENAIMETADATA']._serialized_start=8067 + _globals['_GENERATIVEOPENAIMETADATA']._serialized_end=8322 + _globals['_GENERATIVEOPENAIMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEOPENAIMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEGOOGLEMETADATA']._serialized_start=8325 + _globals['_GENERATIVEGOOGLEMETADATA']._serialized_end=9197 + _globals['_GENERATIVEGOOGLEMETADATA_TOKENCOUNT']._serialized_start=8506 + _globals['_GENERATIVEGOOGLEMETADATA_TOKENCOUNT']._serialized_end=8632 + _globals['_GENERATIVEGOOGLEMETADATA_TOKENMETADATA']._serialized_start=8635 + _globals['_GENERATIVEGOOGLEMETADATA_TOKENMETADATA']._serialized_end=8860 + _globals['_GENERATIVEGOOGLEMETADATA_METADATA']._serialized_start=8862 + _globals['_GENERATIVEGOOGLEMETADATA_METADATA']._serialized_end=8973 + _globals['_GENERATIVEGOOGLEMETADATA_USAGEMETADATA']._serialized_start=8976 + _globals['_GENERATIVEGOOGLEMETADATA_USAGEMETADATA']._serialized_end=9165 + _globals['_GENERATIVEDATABRICKSMETADATA']._serialized_start=9200 + _globals['_GENERATIVEDATABRICKSMETADATA']._serialized_end=9463 + _globals['_GENERATIVEDATABRICKSMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEDATABRICKSMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEFRIENDLIAIMETADATA']._serialized_start=9466 + _globals['_GENERATIVEFRIENDLIAIMETADATA']._serialized_end=9729 + _globals['_GENERATIVEFRIENDLIAIMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEFRIENDLIAIMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVENVIDIAMETADATA']._serialized_start=9732 + _globals['_GENERATIVENVIDIAMETADATA']._serialized_end=9987 + _globals['_GENERATIVENVIDIAMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVENVIDIAMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEXAIMETADATA']._serialized_start=9990 + _globals['_GENERATIVEXAIMETADATA']._serialized_end=10239 + _globals['_GENERATIVEXAIMETADATA_USAGE']._serialized_start=7875 + _globals['_GENERATIVEXAIMETADATA_USAGE']._serialized_end=8026 + _globals['_GENERATIVEMETADATA']._serialized_start=10242 + _globals['_GENERATIVEMETADATA']._serialized_end=11025 + _globals['_GENERATIVEREPLY']._serialized_start=11028 + _globals['_GENERATIVEREPLY']._serialized_end=11190 + _globals['_GENERATIVERESULT']._serialized_start=11192 + _globals['_GENERATIVERESULT']._serialized_end=11256 + _globals['_GENERATIVEDEBUG']._serialized_start=11258 + _globals['_GENERATIVEDEBUG']._serialized_end=11317 # @@protoc_insertion_point(module_scope) diff --git a/weaviate/proto/v1/v6300/v1/generative_pb2.pyi b/weaviate/proto/v1/v6300/v1/generative_pb2.pyi index 1aa8e619c..41a6c3d37 100644 --- a/weaviate/proto/v1/v6300/v1/generative_pb2.pyi +++ b/weaviate/proto/v1/v6300/v1/generative_pb2.pyi @@ -109,7 +109,7 @@ class GenerativeAnyscale(_message.Message): def __init__(self, base_url: _Optional[str] = ..., model: _Optional[str] = ..., temperature: _Optional[float] = ...) -> None: ... class GenerativeAWS(_message.Message): - __slots__ = ("model", "temperature", "service", "region", "endpoint", "target_model", "target_variant", "images", "image_properties", "max_tokens") + __slots__ = ("model", "temperature", "service", "region", "endpoint", "target_model", "target_variant", "images", "image_properties", "max_tokens", "stop_sequences") MODEL_FIELD_NUMBER: _ClassVar[int] TEMPERATURE_FIELD_NUMBER: _ClassVar[int] SERVICE_FIELD_NUMBER: _ClassVar[int] @@ -120,6 +120,7 @@ class GenerativeAWS(_message.Message): IMAGES_FIELD_NUMBER: _ClassVar[int] IMAGE_PROPERTIES_FIELD_NUMBER: _ClassVar[int] MAX_TOKENS_FIELD_NUMBER: _ClassVar[int] + STOP_SEQUENCES_FIELD_NUMBER: _ClassVar[int] model: str temperature: float service: str @@ -130,7 +131,8 @@ class GenerativeAWS(_message.Message): images: _base_pb2.TextArray image_properties: _base_pb2.TextArray max_tokens: int - def __init__(self, model: _Optional[str] = ..., temperature: _Optional[float] = ..., service: _Optional[str] = ..., region: _Optional[str] = ..., endpoint: _Optional[str] = ..., target_model: _Optional[str] = ..., target_variant: _Optional[str] = ..., images: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ..., image_properties: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ..., max_tokens: _Optional[int] = ...) -> None: ... + stop_sequences: _base_pb2.TextArray + def __init__(self, model: _Optional[str] = ..., temperature: _Optional[float] = ..., service: _Optional[str] = ..., region: _Optional[str] = ..., endpoint: _Optional[str] = ..., target_model: _Optional[str] = ..., target_variant: _Optional[str] = ..., images: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ..., image_properties: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ..., max_tokens: _Optional[int] = ..., stop_sequences: _Optional[_Union[_base_pb2.TextArray, _Mapping]] = ...) -> None: ... class GenerativeCohere(_message.Message): __slots__ = ("base_url", "frequency_penalty", "max_tokens", "model", "k", "p", "presence_penalty", "stop_sequences", "temperature", "images", "image_properties")