Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .githooks/pre-commit
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail

uv run ruff check .
uv run ruff format --check .
uv run basedpyright
uv run pytest
39 changes: 39 additions & 0 deletions robosystems_client/extensions/document_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,14 @@
from ..api.documents.delete_document import sync_detailed as delete_document
from ..api.documents.list_documents import sync_detailed as list_documents
from ..api.documents.upload_document import sync_detailed as upload_document
from ..api.documents.upload_documents_bulk import sync_detailed as upload_documents_bulk
from ..api.search.get_document_section import sync_detailed as get_document_section
from ..api.search.search_documents import sync_detailed as search_documents
from ..client import AuthenticatedClient
from ..models.document_list_response import DocumentListResponse
from ..models.document_section import DocumentSection
from ..models.bulk_document_upload_request import BulkDocumentUploadRequest
from ..models.bulk_document_upload_response import BulkDocumentUploadResponse
from ..models.document_upload_request import DocumentUploadRequest
from ..models.document_upload_response import DocumentUploadResponse
from ..models.search_request import SearchRequest
Expand Down Expand Up @@ -154,6 +157,42 @@ def upload_directory(

return results

def upload_bulk(
self,
graph_id: str,
documents: List[Dict[str, Any]],
) -> BulkDocumentUploadResponse:
"""Upload multiple markdown documents (max 50 per request).

Args:
graph_id: Target graph ID.
documents: List of dicts with keys: title, content, and
optionally tags, folder, external_id.

Returns:
BulkDocumentUploadResponse with per-document results.
"""
items = []
for doc in documents:
items.append(
DocumentUploadRequest(
title=doc["title"],
content=doc["content"],
tags=doc.get("tags", UNSET),
folder=doc.get("folder", UNSET),
external_id=doc.get("external_id", UNSET),
)
)

body = BulkDocumentUploadRequest(documents=items)
client = self._get_client()
response = upload_documents_bulk(graph_id=graph_id, client=client, body=body)
if response.status_code != HTTPStatus.OK:
raise Exception(
f"Bulk upload failed ({response.status_code}): {response.content.decode()}"
)
return response.parsed

def search(
self,
graph_id: str,
Expand Down
8 changes: 8 additions & 0 deletions robosystems_client/models/document_list_item.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ class DocumentListItem:
"""A document in the document list.

Attributes:
document_id (str):
document_title (str):
section_count (int):
source_type (str):
Expand All @@ -24,6 +25,7 @@ class DocumentListItem:
last_indexed (None | str | Unset):
"""

document_id: str
document_title: str
section_count: int
source_type: str
Expand All @@ -33,6 +35,8 @@ class DocumentListItem:
additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)

def to_dict(self) -> dict[str, Any]:
document_id = self.document_id

document_title = self.document_title

section_count = self.section_count
Expand Down Expand Up @@ -64,6 +68,7 @@ def to_dict(self) -> dict[str, Any]:
field_dict.update(self.additional_properties)
field_dict.update(
{
"document_id": document_id,
"document_title": document_title,
"section_count": section_count,
"source_type": source_type,
Expand All @@ -81,6 +86,8 @@ def to_dict(self) -> dict[str, Any]:
@classmethod
def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
d = dict(src_dict)
document_id = d.pop("document_id")

document_title = d.pop("document_title")

section_count = d.pop("section_count")
Expand Down Expand Up @@ -123,6 +130,7 @@ def _parse_last_indexed(data: object) -> None | str | Unset:
last_indexed = _parse_last_indexed(d.pop("last_indexed", UNSET))

document_list_item = cls(
document_id=document_id,
document_title=document_title,
section_count=section_count,
source_type=source_type,
Expand Down
Loading