diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 682809c..8b71a89 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -198,12 +198,4 @@ jobs: EVOLUTION_KEY_ID: ${{ secrets.EVOLUTION_KEY_ID }} EVOLUTION_SECRET: ${{ secrets.EVOLUTION_SECRET }} EVOLUTION_BASE_URL: ${{ secrets.EVOLUTION_BASE_URL }} - run: make run-tokens - - - name: Run foundation models examples - env: - EVOLUTION_KEY_ID: ${{ secrets.EVOLUTION_KEY_ID }} - EVOLUTION_SECRET: ${{ secrets.EVOLUTION_SECRET }} - EVOLUTION_BASE_URL: ${{ secrets.EVOLUTION_BASE_URL }} - EVOLUTION_PROJECT_ID: ${{ secrets.EVOLUTION_PROJECT_ID }} - run: make run-foundation-models \ No newline at end of file + run: make run-tokens \ No newline at end of file diff --git a/.gitignore b/.gitignore index c514c5d..8254d73 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,172 @@ +# Created by https://www.toptal.com/developers/gitignore/api/python,pycharm+all,go,goland+all +# Edit at https://www.toptal.com/developers/gitignore?templates=python,pycharm+all,go,goland+all + +### Go ### +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +### GoLand+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# AWS User-specific +.idea/**/aws.xml + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# SonarLint plugin +.idea/sonarlint/ + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### GoLand+all Patch ### +# Ignore everything but code style settings and run configurations +# that are supposed to be shared within teams. + +.idea/* + +!.idea/codeStyles +!.idea/runConfigurations + +### PyCharm+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff + +# AWS User-specific + +# Generated files + +# Sensitive or high-churn files + +# Gradle + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake + +# Mongo Explorer plugin + +# File-based project format + +# IntelliJ + +# mpeltonen/sbt-idea plugin + +# JIRA plugin + +# Cursive Clojure plugin + +# SonarLint plugin + +# Crashlytics plugin (for Android Studio and IntelliJ) + +# Editor-based Rest Client + +# Android studio 3.1+ serialized cache file + +### PyCharm+all Patch ### +# Ignore everything but code style settings and run configurations +# that are supposed to be shared within teams. + + + ### Python ### # Byte-compiled / optimized / DLL files __pycache__/ @@ -5,7 +174,6 @@ __pycache__/ *$py.class # C extensions -*.so # Distribution / packaging .Python @@ -168,4 +336,9 @@ poetry.toml .ruff_cache/ # LSP config files -pyrightconfig.json \ No newline at end of file +pyrightconfig.json + +# End of https://www.toptal.com/developers/gitignore/api/python,pycharm+all,go,goland+all + + +coverage.json \ No newline at end of file diff --git a/Makefile b/Makefile index 6c869c0..9488d9e 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,7 @@ help: @echo " run-streaming Run streaming examples" @echo " run-async Run async examples" @echo " run-tokens Run token management examples" - @echo " run-foundation-models Run foundation models examples" + @echo "" @echo "" @echo "Build:" @echo " clean Clean build artifacts" @@ -59,8 +59,7 @@ shell: test: rye run pytest tests/ -v --cov=evolution_openai --cov-report=html --cov-report=term --cov-report=xml:coverage.xml --cov-report=json:coverage.json -test-foundation-models: - rye run pytest tests/test_foundation_models_*.py -v + # Code quality lint: @@ -213,9 +212,7 @@ run-tokens: @if [ -f .env ]; then echo "Загружение переменных окружения из файла .env..."; export $$(grep -v '^#' .env | xargs); fi; \ rye run python examples/token_management.py -run-foundation-models: - @if [ -f .env ]; then echo "Загружение переменных окружения из файла .env..."; export $$(grep -v '^#' .env | xargs); fi; \ - rye run python examples/foundation_models_example.py + # Package info info: diff --git a/README.md b/README.md index 7a36f1b..17a3aaa 100644 --- a/README.md +++ b/README.md @@ -10,16 +10,14 @@ - ✅ **100% совместимость** с официальным OpenAI Python SDK - ✅ **Автоматическое управление токенами** Cloud.ru - ✅ **Drop-in replacement** - минимальные изменения в коде -- ✅ **Async/await поддержка** с `AsyncOpenAI` +- ✅ **Async/await поддержка** с `EvolutionAsyncOpenAI` - ✅ **Streaming responses** поддержка - ✅ **Thread-safe** token management - ✅ **Автоматическое обновление** токенов за 30 секунд до истечения - ✅ **Retry логика** при ошибках авторизации - ✅ **Поддержка .env файлов** для управления конфигурацией - ✅ **Интеграционные тесты** с реальным API -- ✅ **Evolution Foundation Models** поддержка с `project_id` -- ✅ **Готовые примеры** для Foundation Models -- ✅ **Передовые AI модели** включая DeepSeek-R1, Qwen2.5 и другие + ## 📦 Установка @@ -38,27 +36,15 @@ from openai import OpenAI client = OpenAI(api_key="sk-...") # ✅ СТАЛО (Evolution OpenAI) -from evolution_openai import OpenAI +from evolution_openai import EvolutionOpenAI # Для обычного использования -client = OpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://your-model-endpoint.cloud.ru/v1" +client = EvolutionOpenAI( + key_id="your_key_id", secret="your_secret", base_url="https://your-model-endpoint.cloud.ru/v1" ) -# Для Evolution Foundation Models -client = OpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id" # Для Evolution Foundation Models -) - -# Все остальное работает ТОЧНО ТАК ЖЕ! response = client.chat.completions.create( - model="default", # или "deepseek-ai/DeepSeek-R1-Distill-Llama-70B" для Foundation Models - messages=[{"role": "user", "content": "Hello!"}] + model="default", messages=[{"role": "user", "content": "Hello!"}] ) ``` @@ -67,13 +53,11 @@ response = client.chat.completions.create( #### Обычное использование ```python -from evolution_openai import OpenAI +from evolution_openai import EvolutionOpenAI # Инициализация client для обычного использования -client = OpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://your-model-endpoint.cloud.ru/v1" +client = EvolutionOpenAI( + key_id="your_key_id", secret="your_secret", base_url="https://your-model-endpoint.cloud.ru/v1" ) # Chat Completions @@ -89,55 +73,16 @@ response = client.chat.completions.create( print(response.choices[0].message.content) ``` -#### 🚀 Evolution Foundation Models - -Библиотека полностью поддерживает **Evolution Foundation Models** - платформу для работы с передовыми AI моделями на Cloud.ru. Ключевые возможности: - -- **Автоматическое управление Project ID** - добавляет заголовок `x-project-id` автоматически -- **Передовые модели** - DeepSeek-R1, Qwen2.5, RefalMachine/RuadaptQwen2.5-7B-Lite-Beta -- **Специальный endpoint** - `https://foundation-models.api.cloud.ru/api/gigacube/openai/v1` -- **Полная совместимость** с OpenAI SDK - все методы работают идентично - -```python -from evolution_openai import OpenAI - -# Инициализация для Evolution Foundation Models -client = OpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id" # Автоматически добавляется в заголовки -) - -# Использование Foundation Models -response = client.chat.completions.create( - model="deepseek-ai/DeepSeek-R1-Distill-Llama-70B", - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "What is artificial intelligence?"}, - ], - max_tokens=150 -) - -print(response.choices[0].message.content) -``` + ### Streaming ```python # Для обычного использования stream = client.chat.completions.create( - model="default", - messages=[{"role": "user", "content": "Tell me a story"}], - stream=True + model="default", messages=[{"role": "user", "content": "Tell me a story"}], stream=True ) -# Для Foundation Models -stream = client.chat.completions.create( - model="deepseek-ai/DeepSeek-R1-Distill-Llama-70B", - messages=[{"role": "user", "content": "Tell me a story"}], - stream=True -) for chunk in stream: if chunk.choices[0].delta.content: @@ -148,28 +93,17 @@ for chunk in stream: ```python import asyncio -from evolution_openai import AsyncOpenAI +from evolution_openai import EvolutionAsyncOpenAI async def main(): - # Для обычного использования - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-model-endpoint.cloud.ru/v1", ) - - # Для Foundation Models - client = AsyncOpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id", # Опционально для Foundation Models - ) - response = await client.chat.completions.create( - model="deepseek-ai/DeepSeek-R1-Distill-Llama-70B", # или "default" для обычного использования - messages=[{"role": "user", "content": "Async hello!"}] + model="default", messages=[{"role": "user", "content": "Async hello!"}] ) print(response.choices[0].message.content) @@ -201,32 +135,20 @@ ENABLE_INTEGRATION_TESTS=false LOG_LEVEL=INFO ``` -#### Для Evolution Foundation Models: - -```bash -# .env файл для Foundation Models -EVOLUTION_KEY_ID=your_key_id_here -EVOLUTION_SECRET=your_secret_here -EVOLUTION_BASE_URL=https://foundation-models.api.cloud.ru/api/gigacube/openai/v1 -EVOLUTION_PROJECT_ID=your_project_id_here # Обязательно для Foundation Models -EVOLUTION_TOKEN_URL=https://iam.api.cloud.ru/api/v1/auth/token -ENABLE_INTEGRATION_TESTS=false -LOG_LEVEL=INFO -``` + ```python import os -from evolution_openai import OpenAI +from evolution_openai import EvolutionOpenAI from dotenv import load_dotenv # Загрузка переменных из .env файла load_dotenv() -client = OpenAI( +client = EvolutionOpenAI( key_id=os.getenv("EVOLUTION_KEY_ID"), secret=os.getenv("EVOLUTION_SECRET"), base_url=os.getenv("EVOLUTION_BASE_URL"), - project_id=os.getenv("EVOLUTION_PROJECT_ID"), # Опционально для Foundation Models ) ``` @@ -271,7 +193,7 @@ with client: ## 📚 Документация - [API Documentation](https://cloud-ru-tech.github.io/evolution-openai-python) -- [Evolution Foundation Models Guide](https://cloud-ru-tech.github.io/evolution-openai-python/foundation_models) + - [Migration Guide](https://cloud-ru-tech.github.io/evolution-openai-python/migration) - [Examples](examples/) - [Changelog](CHANGELOG.md) diff --git a/docs/async_usage.rst b/docs/async_usage.rst index 1f8c8b2..99f84dd 100644 --- a/docs/async_usage.rst +++ b/docs/async_usage.rst @@ -12,10 +12,10 @@ Evolution OpenAI полностью поддерживает асинхронн .. code-block:: python import asyncio - from evolution_openai import AsyncOpenAI + from evolution_openai import EvolutionAsyncOpenAI async def main(): - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -33,7 +33,7 @@ Evolution OpenAI полностью поддерживает асинхронн .. code-block:: python async def simple_request(): - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -59,7 +59,7 @@ Context Manager .. code-block:: python async def with_context_manager(): - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -83,7 +83,7 @@ Context Manager .. code-block:: python async def parallel_requests(): - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -127,7 +127,7 @@ Context Manager print(f"Запрос {index} завершен") return response.choices[0].message.content - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -154,7 +154,7 @@ Context Manager .. code-block:: python async def async_streaming(): - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -186,7 +186,7 @@ Try-except с async async def error_handling_example(): try: - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -218,7 +218,7 @@ Graceful shutdown self.running = True async def start(self): - self.client = AsyncOpenAI( + self.client = EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -265,7 +265,7 @@ Graceful shutdown async def __aenter__(self): for i in range(self.pool_size): - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -331,7 +331,7 @@ Graceful shutdown # Создаем очередь и клиент queue = asyncio.Queue(maxsize=20) - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -375,7 +375,7 @@ FastAPI пример from fastapi import FastAPI from pydantic import BaseModel - from evolution_openai import AsyncOpenAI + from evolution_openai import EvolutionAsyncOpenAI app = FastAPI() @@ -392,7 +392,7 @@ FastAPI пример @app.on_event("startup") async def startup_event(): global client - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -422,7 +422,7 @@ aiohttp пример .. code-block:: python from aiohttp import web - from evolution_openai import AsyncOpenAI + from evolution_openai import EvolutionAsyncOpenAI async def chat_handler(request): data = await request.json() @@ -444,7 +444,7 @@ aiohttp пример app = web.Application() # Инициализация клиента - app['openai_client'] = AsyncOpenAI( + app['openai_client'] = EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" diff --git a/docs/authentication.rst b/docs/authentication.rst index 7d442af..a0482d7 100644 --- a/docs/authentication.rst +++ b/docs/authentication.rst @@ -30,9 +30,9 @@ Evolution OpenAI использует систему аутентификаци .. code-block:: python - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI - client = OpenAI( + client = EvolutionOpenAI( key_id="your_key_id_here", secret="your_secret_here", base_url="https://your-endpoint.cloud.ru/v1" @@ -52,9 +52,9 @@ Evolution OpenAI использует систему аутентификаци .. code-block:: python import os - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI - client = OpenAI( + client = EvolutionOpenAI( key_id=os.getenv("EVOLUTION_KEY_ID"), secret=os.getenv("EVOLUTION_SECRET"), base_url=os.getenv("EVOLUTION_BASE_URL") @@ -76,11 +76,11 @@ Evolution OpenAI использует систему аутентификаци from dotenv import load_dotenv import os - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI load_dotenv() - client = OpenAI( + client = EvolutionOpenAI( key_id=os.getenv("EVOLUTION_KEY_ID"), secret=os.getenv("EVOLUTION_SECRET"), base_url=os.getenv("EVOLUTION_BASE_URL") @@ -173,9 +173,9 @@ SDK автоматически управляет токенами доступ # Включить отладочные логи logging.basicConfig(level=logging.DEBUG) - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI - client = OpenAI( + client = EvolutionOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -243,7 +243,7 @@ SDK автоматически управляет токенами доступ .. code-block:: python - client = OpenAI( + client = EvolutionOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1", diff --git a/docs/error_handling.rst b/docs/error_handling.rst index 37bacbd..bd27947 100644 --- a/docs/error_handling.rst +++ b/docs/error_handling.rst @@ -34,10 +34,10 @@ Evolution OpenAI предоставляет комплексную систем .. code-block:: python - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI from evolution_openai.exceptions import EvolutionOpenAIError - client = OpenAI( + client = EvolutionOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" diff --git a/docs/foundation_models.rst b/docs/foundation_models.rst deleted file mode 100644 index 3cc1565..0000000 --- a/docs/foundation_models.rst +++ /dev/null @@ -1,483 +0,0 @@ -Evolution Foundation Models -============================ - -Evolution Foundation Models - это специальная платформа для работы с передовыми моделями искусственного интеллекта на основе Cloud.ru. Библиотека **evolution-openai** предоставляет полную поддержку для работы с Evolution Foundation Models через знакомый OpenAI-совместимый API. - -Особенности Foundation Models ------------------------------- - -✅ **Передовые модели AI** - Доступ к последним моделям ИИ включая DeepSeek-R1, Qwen2.5 и другие - -✅ **Автоматическое управление Project ID** - Библиотека автоматически добавляет заголовок ``x-project-id`` - -✅ **Полная совместимость с OpenAI SDK** - Все методы работают идентично - -✅ **Поддержка streaming** - Потоковая обработка ответов - -✅ **Async/await поддержка** - Асинхронные операции - -✅ **Автоматическое управление токенами** - Встроенная авторизация Cloud.ru - -Быстрый старт -------------- - -Базовая настройка -~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - from evolution_openai import OpenAI - - client = OpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id" # Обязательно для Foundation Models - ) - - response = client.chat.completions.create( - model="RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - messages=[ - {"role": "system", "content": "Ты полезный помощник."}, - {"role": "user", "content": "Расскажи о возможностях ИИ"} - ], - max_tokens=100 - ) - - print(response.choices[0].message.content) - -Переменные окружения -~~~~~~~~~~~~~~~~~~~~ - -Рекомендуется использовать файл ``.env`` для хранения конфигурации: - -.. code-block:: bash - - # .env файл для Foundation Models - EVOLUTION_KEY_ID=your_key_id_here - EVOLUTION_SECRET=your_secret_here - EVOLUTION_BASE_URL=https://foundation-models.api.cloud.ru/api/gigacube/openai/v1 - EVOLUTION_PROJECT_ID=your_project_id_here - EVOLUTION_FOUNDATION_MODELS_URL=https://foundation-models.api.cloud.ru/api/gigacube/openai/v1 - -Загрузка из переменных окружения: - -.. code-block:: python - - import os - from evolution_openai import OpenAI - from dotenv import load_dotenv - - load_dotenv() - - client = OpenAI( - key_id=os.getenv("EVOLUTION_KEY_ID"), - secret=os.getenv("EVOLUTION_SECRET"), - base_url=os.getenv("EVOLUTION_FOUNDATION_MODELS_URL"), - project_id=os.getenv("EVOLUTION_PROJECT_ID"), - ) - -Доступные модели ----------------- - -Evolution Foundation Models предоставляет доступ к различным моделям: - -**RefalMachine/RuadaptQwen2.5-7B-Lite-Beta** (рекомендуется) - Адаптированная для русского языка модель на основе Qwen2.5-7B - -**deepseek-ai/DeepSeek-R1-Distill-Llama-70B** - Модель на основе DeepSeek-R1 с дистилляцией - -**Другие модели** - Список доступных моделей может обновляться - обратитесь к документации Cloud.ru - -Параметры конфигурации ----------------------- - -Project ID -~~~~~~~~~~ - -``project_id`` - обязательный параметр для Foundation Models: - -.. code-block:: python - - client = OpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id" # Автоматически добавляется в заголовки - ) - -Timeout и повторы -~~~~~~~~~~~~~~~~~ - -Foundation Models могут требовать больше времени для обработки: - -.. code-block:: python - - client = OpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id", - timeout=60.0, # Увеличенный timeout - max_retries=3, # Количество повторов - ) - -Примеры использования ---------------------- - -Базовый пример -~~~~~~~~~~~~~~ - -.. code-block:: python - - from evolution_openai import OpenAI - - client = OpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id" - ) - - response = client.chat.completions.create( - model="RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - messages=[ - {"role": "system", "content": "Ты полезный помощник."}, - {"role": "user", "content": "Объясни машинное обучение простыми словами"} - ], - max_tokens=200, - temperature=0.7 - ) - - print(f"Ответ: {response.choices[0].message.content}") - print(f"Модель: {response.model}") - print(f"Токенов использовано: {response.usage.total_tokens}") - -Streaming ответы -~~~~~~~~~~~~~~~~ - -.. code-block:: python - - stream = client.chat.completions.create( - model="RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - messages=[ - {"role": "user", "content": "Напиши короткое стихотворение про технологии"} - ], - stream=True, - max_tokens=100, - temperature=0.8 - ) - - print("Генерация стихотворения:") - for chunk in stream: - if chunk.choices[0].delta.content: - print(chunk.choices[0].delta.content, end="", flush=True) - -Асинхронное использование -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - import asyncio - from evolution_openai import AsyncOpenAI - - async def main(): - async with AsyncOpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id" - ) as client: - response = await client.chat.completions.create( - model="RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - messages=[ - {"role": "user", "content": "Что такое квантовые вычисления?"} - ], - max_tokens=150 - ) - - print(response.choices[0].message.content) - - asyncio.run(main()) - -Параллельные запросы -~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - import asyncio - from evolution_openai import AsyncOpenAI - - async def parallel_requests(): - async with AsyncOpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id" - ) as client: - - questions = [ - "Что такое ИИ?", - "Как работает машинное обучение?", - "Что такое нейронные сети?" - ] - - # Создаем задачи для параллельного выполнения - tasks = [] - for question in questions: - task = client.chat.completions.create( - model="RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - messages=[ - {"role": "system", "content": "Дай краткий ответ."}, - {"role": "user", "content": question} - ], - max_tokens=50 - ) - tasks.append(task) - - # Выполняем все запросы параллельно - responses = await asyncio.gather(*tasks) - - for question, response in zip(questions, responses): - print(f"Вопрос: {question}") - print(f"Ответ: {response.choices[0].message.content}") - print("-" * 50) - - asyncio.run(parallel_requests()) - -Использование with_options -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - # Настройка дополнительных опций - client_with_options = client.with_options( - timeout=120.0, # Увеличенный timeout - max_retries=5, # Больше попыток - ) - - response = client_with_options.chat.completions.create( - model="RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - messages=[ - {"role": "user", "content": "Создай подробный план изучения Python"} - ], - max_tokens=300, - temperature=0.3 - ) - - print(response.choices[0].message.content) - -Управление токенами -------------------- - -Информация о токене -~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - # Получение информации о токене - token_info = client.get_token_info() - print(f"Токен активен: {token_info['has_token']}") - print(f"Токен валиден: {token_info['is_valid']}") - - # Текущий токен - current_token = client.current_token - print(f"Текущий токен: {current_token[:20]}...") - -Принудительное обновление токена -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - # Принудительное обновление токена - new_token = client.refresh_token() - print(f"Новый токен получен: {new_token[:20]}...") - -Обработка ошибок ----------------- - -Типичные ошибки и их обработка -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - from evolution_openai import OpenAI - from evolution_openai.exceptions import EvolutionAuthError - - try: - client = OpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id" - ) - - response = client.chat.completions.create( - model="RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - messages=[ - {"role": "user", "content": "Привет!"} - ], - max_tokens=50 - ) - - except EvolutionAuthError as e: - print(f"Ошибка авторизации: {e}") - # Проверьте key_id, secret и project_id - - except Exception as e: - print(f"Общая ошибка: {e}") - -Неправильная модель -~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - try: - response = client.chat.completions.create( - model="non-existent-model", - messages=[{"role": "user", "content": "Test"}], - max_tokens=10 - ) - except Exception as e: - print(f"Модель не найдена: {e}") - -Неправильные параметры -~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - try: - response = client.chat.completions.create( - model="RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - messages=[], # Пустой список сообщений - max_tokens=10 - ) - except Exception as e: - print(f"Неправильные параметры: {e}") - - -Лучшие практики ---------------- - -Настройка timeout -~~~~~~~~~~~~~~~~~ - -Foundation Models могут работать медленнее обычных API: - -.. code-block:: python - - client = OpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id", - timeout=90.0 # Увеличенный timeout для Foundation Models - ) - -Управление токенами -~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - # Ограничение количества токенов в ответе - response = client.chat.completions.create( - model="RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - messages=[{"role": "user", "content": "Объясни квантовую физику"}], - max_tokens=200, # Ограничение для контроля затрат - temperature=0.5 # Сбалансированная креативность - ) - -Кеширование соединений -~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - # Используйте context manager для автоматического управления ресурсами - with OpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id" - ) as client: - # Множественные запросы с одним клиентом - for i in range(5): - response = client.chat.completions.create( - model="RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - messages=[{"role": "user", "content": f"Вопрос {i+1}"}], - max_tokens=50 - ) - print(response.choices[0].message.content) - -Мониторинг использования -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - import time - - start_time = time.time() - - response = client.chat.completions.create( - model="RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - messages=[{"role": "user", "content": "Создай план проекта"}], - max_tokens=300 - ) - - elapsed_time = time.time() - start_time - - print(f"Время ответа: {elapsed_time:.2f} секунд") - print(f"Токенов использовано: {response.usage.total_tokens}") - print(f"Скорость: {response.usage.total_tokens / elapsed_time:.1f} токен/сек") - -Устранение неполадок --------------------- - -Проблемы с авторизацией -~~~~~~~~~~~~~~~~~~~~~~~ - -**Проблема**: Ошибка авторизации при подключении - -**Решение**: Проверьте правильность key_id, secret и project_id: - -.. code-block:: python - - # Проверьте переменные окружения - import os - print(f"KEY_ID: {os.getenv('EVOLUTION_KEY_ID', 'не установлен')}") - print(f"SECRET: {os.getenv('EVOLUTION_SECRET', 'не установлен')[:10]}...") - print(f"PROJECT_ID: {os.getenv('EVOLUTION_PROJECT_ID', 'не установлен')}") - -Проблемы с моделью -~~~~~~~~~~~~~~~~~~ - -**Проблема**: Модель не найдена или недоступна - -**Решение**: Используйте проверенные модели: - -.. code-block:: python - - # Рекомендуемые модели для Foundation Models - models = [ - "RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - "deepseek-ai/DeepSeek-R1-Distill-Llama-70B" - ] - -Проблемы с сетью -~~~~~~~~~~~~~~~~ - -**Проблема**: Тайм-ауты или проблемы с подключением - -**Решение**: Увеличьте timeout и количество повторов: - -.. code-block:: python - - client = OpenAI( - key_id="your_key_id", - secret="your_secret", - base_url="https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - project_id="your_project_id", - timeout=120.0, # 2 минуты - max_retries=5, # 5 попыток - ) \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index 08f6c75..ca57dd9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -43,9 +43,9 @@ Evolution OpenAI Documentation .. code-block:: python - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI - client = OpenAI( + client = EvolutionOpenAI( key_id="your_EVOLUTION_key_id", secret="your_EVOLUTION_secret", base_url="https://your-model-endpoint.cloud.ru/v1" @@ -74,7 +74,6 @@ Evolution OpenAI Documentation usage async_usage streaming - foundation_models error_handling migration diff --git a/docs/migration.rst b/docs/migration.rst index 5498ade..916bef2 100644 --- a/docs/migration.rst +++ b/docs/migration.rst @@ -59,7 +59,7 @@ Evolution OpenAI **полностью совместим** с официальн .. code-block:: python - from evolution_openai import OpenAI, AsyncOpenAI + from evolution_openai import EvolutionOpenAI, EvolutionAsyncOpenAI Шаг 3: Обновление аутентификации ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -76,7 +76,7 @@ Evolution OpenAI **полностью совместим** с официальн .. code-block:: python - client = OpenAI( + client = EvolutionOpenAI( key_id="your_EVOLUTION_key_id", secret="your_EVOLUTION_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -132,10 +132,10 @@ Evolution OpenAI **полностью совместим** с официальн .. code-block:: python - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI import os - client = OpenAI( + client = EvolutionOpenAI( key_id=os.getenv("EVOLUTION_KEY_ID"), secret=os.getenv("EVOLUTION_SECRET"), base_url=os.getenv("EVOLUTION_BASE_URL") @@ -181,10 +181,10 @@ Evolution OpenAI **полностью совместим** с официальн .. code-block:: python import asyncio - from evolution_openai import AsyncOpenAI + from evolution_openai import EvolutionAsyncOpenAI async def main(): - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -224,9 +224,9 @@ Streaming .. code-block:: python - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI - client = OpenAI( + client = EvolutionOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -263,7 +263,7 @@ Streaming .. code-block:: python - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI from evolution_openai.exceptions import EvolutionOpenAIError, RateLimitError try: @@ -504,7 +504,7 @@ Kubernetes def test_connection(): """Тестирует подключение к Evolution API""" try: - client = OpenAI( + client = EvolutionOpenAI( key_id=os.getenv("EVOLUTION_KEY_ID"), secret=os.getenv("EVOLUTION_SECRET"), base_url=os.getenv("EVOLUTION_BASE_URL") @@ -527,7 +527,7 @@ Kubernetes def test_models(): """Тестирует получение списка моделей""" try: - client = OpenAI( + client = EvolutionOpenAI( key_id=os.getenv("EVOLUTION_KEY_ID"), secret=os.getenv("EVOLUTION_SECRET"), base_url=os.getenv("EVOLUTION_BASE_URL") @@ -592,7 +592,7 @@ Kubernetes # Замените все импорты # from openai import OpenAI - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI Проблема: "Invalid credentials" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/quickstart.rst b/docs/quickstart.rst index 102f50a..b0b7e0c 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -36,10 +36,10 @@ .. code-block:: python - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI # Инициализация клиента - client = OpenAI( + client = EvolutionOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -79,9 +79,9 @@ .. code-block:: python import os - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI - client = OpenAI( + client = EvolutionOpenAI( key_id=os.getenv("EVOLUTION_KEY_ID"), secret=os.getenv("EVOLUTION_SECRET"), base_url=os.getenv("EVOLUTION_BASE_URL") @@ -100,11 +100,11 @@ from dotenv import load_dotenv import os - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI load_dotenv() - client = OpenAI( + client = EvolutionOpenAI( key_id=os.getenv("EVOLUTION_KEY_ID"), secret=os.getenv("EVOLUTION_SECRET"), base_url=os.getenv("EVOLUTION_BASE_URL") @@ -134,10 +134,10 @@ Streaming .. code-block:: python import asyncio - from evolution_openai import AsyncOpenAI + from evolution_openai import EvolutionAsyncOpenAI async def main(): - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -157,10 +157,10 @@ Streaming .. code-block:: python - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI from evolution_openai.exceptions import EvolutionOpenAIError - client = OpenAI( + client = EvolutionOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" diff --git a/docs/streaming.rst b/docs/streaming.rst index 870d200..a78600b 100644 --- a/docs/streaming.rst +++ b/docs/streaming.rst @@ -21,9 +21,9 @@ Streaming позволяет: .. code-block:: python - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI - client = OpenAI( + client = EvolutionOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -52,10 +52,10 @@ Streaming позволяет: .. code-block:: python import asyncio - from evolution_openai import AsyncOpenAI + from evolution_openai import EvolutionAsyncOpenAI async def async_streaming(): - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -336,7 +336,7 @@ FastAPI с streaming async def generate(): try: - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -483,7 +483,7 @@ FastAPI с streaming import asyncio async def multiple_streams(): - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" diff --git a/docs/usage.rst b/docs/usage.rst index 1ab705e..ed4c41c 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -11,9 +11,9 @@ .. code-block:: python - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI - client = OpenAI( + client = EvolutionOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -164,7 +164,7 @@ Context Manager .. code-block:: python - with OpenAI( + with EvolutionOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1" @@ -202,7 +202,7 @@ create_client() .. code-block:: python - client = OpenAI( + client = EvolutionOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1", @@ -214,7 +214,7 @@ create_client() .. code-block:: python - client = OpenAI( + client = EvolutionOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1", @@ -226,7 +226,7 @@ create_client() .. code-block:: python - client = OpenAI( + client = EvolutionOpenAI( key_id="your_key_id", secret="your_secret", base_url="https://your-endpoint.cloud.ru/v1", diff --git a/env.example b/env.example index 7bc3018..18ac4b8 100644 --- a/env.example +++ b/env.example @@ -1,6 +1,9 @@ # Evolution OpenAI Environment Variables # Скопируйте этот файл в .env и заполните ваши реальные значения +# Logging Level (DEBUG, INFO, WARNING, ERROR) +LOG_LEVEL=INFO + # Evolution API Credentials EVOLUTION_KEY_ID=your_key_id_here EVOLUTION_SECRET=your_secret_here @@ -9,19 +12,10 @@ EVOLUTION_SECRET=your_secret_here # Для обычных примеров используйте ваш рабочий endpoint EVOLUTION_BASE_URL=https://your-endpoint.cloud.ru/v1 -# Evolution Foundation Models Endpoint (опционально) -# Если установлен, то будет использован для Foundation Models примеров -EVOLUTION_FOUNDATION_MODELS_URL=https://foundation-models.api.cloud.ru/api/gigacube/openai/v1 - -# Evolution Foundation Models Project ID (опционально) -EVOLUTION_PROJECT_ID=your_project_id_here - + # Token Service Endpoint (обычно не нужно менять) EVOLUTION_TOKEN_URL=https://iam.api.cloud.ru/api/v1/auth/token # Test Configuration # Установите в true для запуска интеграционных тестов с реальным API -ENABLE_INTEGRATION_TESTS=false - -# Logging Level (DEBUG, INFO, WARNING, ERROR) -LOG_LEVEL=INFO \ No newline at end of file +ENABLE_INTEGRATION_TESTS=false \ No newline at end of file diff --git a/examples/async_examples.py b/examples/async_examples.py index 116ea2d..479dcdf 100644 --- a/examples/async_examples.py +++ b/examples/async_examples.py @@ -7,7 +7,7 @@ import asyncio from typing import Any, Dict -from evolution_openai import AsyncOpenAI +from evolution_openai import EvolutionAsyncOpenAI # Конфигурация BASE_URL = os.getenv("EVOLUTION_BASE_URL", "https://your-endpoint.cloud.ru/v1") @@ -40,7 +40,7 @@ async def basic_async_example(): return try: - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id=KEY_ID, secret=SECRET, base_url=BASE_URL ) as client: # Получаем доступную модель @@ -73,7 +73,7 @@ async def parallel_requests_example(): return try: - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id=KEY_ID, secret=SECRET, base_url=BASE_URL ) as client: # Получаем доступную модель @@ -136,7 +136,9 @@ async def streaming_async_example(): return try: - client = AsyncOpenAI(key_id=KEY_ID, secret=SECRET, base_url=BASE_URL) + client = EvolutionAsyncOpenAI( + key_id=KEY_ID, secret=SECRET, base_url=BASE_URL + ) # Получаем доступную модель model_name = await get_available_model_async(client) @@ -176,7 +178,7 @@ async def context_manager_example(): try: # Используем async with для автоматического закрытия - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id=KEY_ID, secret=SECRET, base_url=BASE_URL ) as client: # Получаем доступную модель @@ -207,7 +209,7 @@ async def error_handling_example(): print("\n=== Обработка ошибок ===") try: - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=KEY_ID, secret=SECRET, base_url=BASE_URL, @@ -245,7 +247,9 @@ async def batch_processing_example(): # Семафор для ограничения количества одновременных запросов semaphore = asyncio.Semaphore(3) # Максимум 3 одновременных запроса - client = AsyncOpenAI(key_id=KEY_ID, secret=SECRET, base_url=BASE_URL) + client = EvolutionAsyncOpenAI( + key_id=KEY_ID, secret=SECRET, base_url=BASE_URL + ) # Получаем доступную модель model_name = await get_available_model_async(client) diff --git a/examples/basic_usage.py b/examples/basic_usage.py index 8727139..ae7e879 100644 --- a/examples/basic_usage.py +++ b/examples/basic_usage.py @@ -5,7 +5,7 @@ import os -from evolution_openai import OpenAI, create_client +from evolution_openai import EvolutionOpenAI, create_client # Cloud.ru модель endpoint (замените на ваш) BASE_URL = os.getenv("EVOLUTION_BASE_URL", "https://your-endpoint.cloud.ru/v1") @@ -55,7 +55,9 @@ def basic_chat_example(): try: # Создаем client с использованием контекстного менеджера - with OpenAI(key_id=key_id, secret=secret, base_url=BASE_URL) as client: + with EvolutionOpenAI( + key_id=key_id, secret=secret, base_url=BASE_URL + ) as client: # Получаем доступную модель model_name = get_available_model(client) @@ -97,7 +99,9 @@ def streaming_example(): return None try: - with OpenAI(key_id=key_id, secret=secret, base_url=BASE_URL) as client: + with EvolutionOpenAI( + key_id=key_id, secret=secret, base_url=BASE_URL + ) as client: # Получаем доступную модель model_name = get_available_model(client) @@ -180,7 +184,9 @@ def advanced_features_example(): return None try: - with OpenAI(key_id=key_id, secret=secret, base_url=BASE_URL) as client: + with EvolutionOpenAI( + key_id=key_id, secret=secret, base_url=BASE_URL + ) as client: # Получаем доступную модель model_name = get_available_model(client) diff --git a/examples/foundation_models_example.py b/examples/foundation_models_example.py deleted file mode 100644 index bda1db6..0000000 --- a/examples/foundation_models_example.py +++ /dev/null @@ -1,413 +0,0 @@ -#!/usr/bin/env python3 -""" -Примеры работы с Evolution Foundation Models -""" - -import os -import time -import asyncio - -from evolution_openai import OpenAI, AsyncOpenAI - -# Конфигурация -BASE_URL = os.getenv("EVOLUTION_BASE_URL", "https://your-endpoint.cloud.ru/v1") -FOUNDATION_MODELS_URL = os.getenv( - "EVOLUTION_FOUNDATION_MODELS_URL", - "https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", -) -KEY_ID = os.getenv("EVOLUTION_KEY_ID", "your_key_id") -SECRET = os.getenv("EVOLUTION_SECRET", "your_secret") -PROJECT_ID = os.getenv("EVOLUTION_PROJECT_ID") - -# Выбираем Foundation Models endpoint если доступен -ENDPOINT_URL = FOUNDATION_MODELS_URL if FOUNDATION_MODELS_URL else BASE_URL -DEFAULT_MODEL = "RefalMachine/RuadaptQwen2.5-7B-Lite-Beta" - - -def get_foundation_model(): - """Возвращает модель для Foundation Models""" - print(f"🔧 Используем модель: {DEFAULT_MODEL}") - return DEFAULT_MODEL - - -async def get_foundation_model_async(): - """Возвращает модель для Foundation Models (асинхронно)""" - print(f"🔧 Используем модель: {DEFAULT_MODEL}") - return DEFAULT_MODEL - - -def basic_foundation_models_example(): - """Базовый пример Foundation Models""" - print("=== Базовый Foundation Models ===") - - if KEY_ID == "your_key_id" or SECRET == "your_secret": - print("Установите переменные окружения для работы с Foundation Models") - return None - - try: - with OpenAI( - key_id=KEY_ID, - secret=SECRET, - base_url=ENDPOINT_URL, - project_id=PROJECT_ID, - ) as client: - model_name = get_foundation_model() - - response = client.chat.completions.create( - model=model_name, - messages=[ - { - "role": "system", - "content": "Ты полезный помощник, использующий Evolution Foundation Models.", - }, - { - "role": "user", - "content": "Расскажи кратко о возможностях искусственного интеллекта", - }, - ], - max_tokens=50, - temperature=0.7, - ) - - if ( - response.choices - and len(response.choices) > 0 - and response.choices[0].message - ): - content = ( - response.choices[0].message.content - or "Нет содержимого в ответе" - ) - print(f"✅ Ответ: {content}") - print(f"📊 Модель: {response.model}") - print(f"🔢 Токенов: {response.usage.total_tokens}") - return True - else: - print("❌ Получен пустой ответ") - return False - - except Exception as e: - print(f"❌ Ошибка: {e}") - return False - - -def streaming_foundation_models_example(): - """Пример streaming с Foundation Models""" - print("\n=== Streaming Foundation Models ===") - - if KEY_ID == "your_key_id" or SECRET == "your_secret": - print("Установите переменные окружения для streaming") - return None - - try: - with OpenAI( - key_id=KEY_ID, - secret=SECRET, - base_url=ENDPOINT_URL, - project_id=PROJECT_ID, - ) as client: - model_name = get_foundation_model() - - print("Генерируем стихотворение...") - print("-" * 50) - - stream = client.chat.completions.create( - model=model_name, - messages=[ - { - "role": "user", - "content": "Напиши короткое стихотворение про технологии", - } - ], - stream=True, - max_tokens=80, - temperature=0.8, - ) - - content_parts = [] - for chunk in stream: - if ( - chunk.choices - and len(chunk.choices) > 0 - and chunk.choices[0].delta - and chunk.choices[0].delta.content - ): - content = chunk.choices[0].delta.content - content_parts.append(content) - print(content, end="", flush=True) - - print("\n" + "-" * 50) - print( - f"✅ Streaming завершен! Получено {len(content_parts)} частей." - ) - return True - - except Exception as e: - print(f"❌ Streaming ошибка: {e}") - return False - - -async def async_foundation_models_example(): - """Асинхронный пример Foundation Models""" - print("\n=== Асинхронный Foundation Models ===") - - if KEY_ID == "your_key_id" or SECRET == "your_secret": - print("Установите переменные окружения для async примера") - return None - - try: - async with AsyncOpenAI( - key_id=KEY_ID, - secret=SECRET, - base_url=ENDPOINT_URL, - project_id=PROJECT_ID, - ) as client: - model_name = await get_foundation_model_async() - - response = await client.chat.completions.create( - model=model_name, - messages=[ - { - "role": "user", - "content": "Объясни простыми словами, что такое машинное обучение", - } - ], - max_tokens=60, - temperature=0.5, - ) - - if ( - response.choices - and len(response.choices) > 0 - and response.choices[0].message - ): - content = ( - response.choices[0].message.content - or "Нет содержимого в ответе" - ) - print(f"✅ Async ответ: {content}") - print(f"📊 Модель: {response.model}") - print(f"🔢 Токенов: {response.usage.total_tokens}") - return True - else: - print("❌ Получен пустой ответ") - return False - - except Exception as e: - print(f"❌ Async ошибка: {e}") - return False - - -def advanced_foundation_models_example(): - """Пример с дополнительными опциями Foundation Models""" - print("\n=== Foundation Models с опциями ===") - - if KEY_ID == "your_key_id" or SECRET == "your_secret": - print("Установите переменные окружения для advanced примера") - return None - - try: - with OpenAI( - key_id=KEY_ID, - secret=SECRET, - base_url=ENDPOINT_URL, - project_id=PROJECT_ID, - ) as client: - model_name = get_foundation_model() - - # Используем with_options для настройки параметров - response = client.with_options( - timeout=60.0, max_retries=3 - ).chat.completions.create( - model=model_name, - messages=[ - { - "role": "user", - "content": "Создай план изучения Python для начинающих", - } - ], - max_tokens=80, - temperature=0.3, - ) - - if ( - response.choices - and len(response.choices) > 0 - and response.choices[0].message - ): - content = ( - response.choices[0].message.content - or "Нет содержимого в ответе" - ) - print(f"✅ Ответ с опциями: {content}") - print(f"📊 Модель: {response.model}") - print(f"🔢 Токенов: {response.usage.total_tokens}") - - # Информация о токене - token_info = client.get_token_info() - print(f"🔑 Статус токена: {token_info}") - return True - else: - print("❌ Получен пустой ответ") - return False - - except Exception as e: - print(f"❌ Ошибка с опциями: {e}") - return False - - -async def parallel_foundation_models_example(): - """Пример параллельных запросов к Foundation Models""" - print("\n=== Параллельные запросы Foundation Models ===") - - if KEY_ID == "your_key_id" or SECRET == "your_secret": - print("Установите переменные окружения для параллельных запросов") - return None - - try: - async with AsyncOpenAI( - key_id=KEY_ID, - secret=SECRET, - base_url=ENDPOINT_URL, - project_id=PROJECT_ID, - ) as client: - model_name = await get_foundation_model_async() - - # Список вопросов для параллельной обработки - questions = [ - "Что такое искусственный интеллект?", - "Как работает машинное обучение?", - "Что такое нейронные сети?", - ] - - # Создаем задачи для параллельного выполнения - tasks = [] - for question in questions: - task = client.chat.completions.create( - model=model_name, - messages=[ - { - "role": "system", - "content": "Дай краткий ответ в 1-2 предложения.", - }, - {"role": "user", "content": question}, - ], - max_tokens=50, - temperature=0.5, - ) - tasks.append(task) - - # Выполняем все запросы параллельно - start_time = time.time() - responses = await asyncio.gather(*tasks) - end_time = time.time() - - elapsed = end_time - start_time - print( - f"⚡ Обработано {len(questions)} запросов за {elapsed:.2f} секунд" - ) - print() - - for i, (question, response) in enumerate( - zip(questions, responses) - ): - print(f"❓ Вопрос {i + 1}: {question}") - if ( - response.choices - and len(response.choices) > 0 - and response.choices[0].message - ): - content = ( - response.choices[0].message.content - or "Нет содержимого в ответе" - ) - print(f"✅ Ответ: {content}") - print(f"🔢 Токенов: {response.usage.total_tokens}") - else: - print("❌ Получен пустой ответ") - print("-" * 50) - - return True - - except Exception as e: - print(f"❌ Ошибка параллельных запросов: {e}") - return False - - -def main(): - """Основная функция с примерами Foundation Models""" - print("🚀 Evolution Foundation Models - Примеры использования\n") - print(f"🌐 Endpoint: {ENDPOINT_URL}") - print(f"🤖 Модель: {DEFAULT_MODEL}") - - # Показываем, используются ли Foundation Models - is_foundation_models = ( - "foundation-models" in ENDPOINT_URL or "gigacube" in ENDPOINT_URL - ) - print(f"🔧 Используется Foundation Models: {is_foundation_models}\n") - - # Проверяем переменные окружения - if KEY_ID == "your_key_id" or SECRET == "your_secret": - print("⚠️ ВНИМАНИЕ: Не установлены переменные окружения!") - print( - "Установите переменные окружения для работы с Foundation Models:" - ) - print("export EVOLUTION_KEY_ID='your_key_id'") - print("export EVOLUTION_SECRET='your_secret'") - print("export EVOLUTION_PROJECT_ID='your_project_id'") - print( - "export EVOLUTION_FOUNDATION_MODELS_URL='https://foundation-models.api.cloud.ru/api/gigacube/openai/v1'" - ) - print("\n💡 Примеры будут запущены в демонстрационном режиме") - print() - - # Запускаем примеры - results = [] - - # Синхронные примеры - results.append(basic_foundation_models_example()) - results.append(streaming_foundation_models_example()) - results.append(advanced_foundation_models_example()) - - # Асинхронные примеры - async def run_async_examples(): - async_results = [] - async_results.append(await async_foundation_models_example()) - async_results.append(await parallel_foundation_models_example()) - return async_results - - # Запускаем асинхронные примеры - async_results = asyncio.run(run_async_examples()) - results.extend(async_results) - - # Подводим итоги - successful = sum(1 for r in results if r is True) - failed = sum(1 for r in results if r is False) - skipped = sum(1 for r in results if r is None) - - print("\n📊 Результаты выполнения:") - print(f"✅ Успешно: {successful}") - print(f"❌ Ошибки: {failed}") - print(f"⏭️ Пропущено: {skipped}") - - if failed == 0 and successful > 0: - print("\n🎉 Все примеры Foundation Models выполнены успешно!") - elif failed > 0: - print(f"\n⚠️ {failed} примеров завершились с ошибками") - - print("\n💡 Подсказки:") - print("- Убедитесь, что PROJECT_ID установлен для Foundation Models") - print("- Проверьте доступность Foundation Models endpoint") - print( - "- Используйте EVOLUTION_FOUNDATION_MODELS_URL для специального endpoint" - ) - print("- Документация: docs/foundation_models.md") - - return failed == 0 - - -if __name__ == "__main__": - import sys - - success = main() - sys.exit(0 if success else 1) diff --git a/examples/run_all_examples.py b/examples/run_all_examples.py index aa0e57a..11af467 100644 --- a/examples/run_all_examples.py +++ b/examples/run_all_examples.py @@ -43,10 +43,7 @@ def run_example(script_name: str, description: str) -> bool: return False try: - # Запуск примера (увеличенный таймаут для Foundation Models) - timeout_seconds = ( - 60 if script_name == "foundation_models_example.py" else 30 - ) + timeout_seconds = 30 result = subprocess.run( [sys.executable, str(script_path)], capture_output=True, @@ -117,11 +114,7 @@ def main() -> bool: "EVOLUTION_BASE_URL", ] - # Проверяем опциональные переменные для Foundation Models - optional_vars = [ - "EVOLUTION_FOUNDATION_MODELS_URL", - "EVOLUTION_PROJECT_ID", - ] + optional_vars = [] missing_vars = [var for var in required_vars if not os.getenv(var)] if missing_vars: @@ -140,12 +133,7 @@ def main() -> bool: # Показываем статус опциональных переменных missing_optional = [var for var in optional_vars if not os.getenv(var)] if missing_optional: - print( - f"ℹ️ Опциональные переменные не установлены: {', '.join(missing_optional)}" - ) - print( - "Foundation Models примеры будут использовать значения по умолчанию" - ) + pass # Список примеров для запуска examples = [ @@ -153,7 +141,6 @@ def main() -> bool: ("streaming_examples.py", "Примеры Streaming API"), ("token_management.py", "Управление токенами"), ("async_examples.py", "Асинхронные примеры"), - ("foundation_models_example.py", "Примеры Foundation Models"), ] # Статистика diff --git a/examples/streaming_examples.py b/examples/streaming_examples.py index da700b3..e586403 100644 --- a/examples/streaming_examples.py +++ b/examples/streaming_examples.py @@ -7,7 +7,7 @@ import time import asyncio -from evolution_openai import OpenAI, AsyncOpenAI +from evolution_openai import EvolutionOpenAI, EvolutionAsyncOpenAI # Конфигурация BASE_URL = os.getenv("EVOLUTION_BASE_URL", "https://your-endpoint.cloud.ru/v1") @@ -56,7 +56,9 @@ def basic_streaming_example(): return try: - with OpenAI(key_id=KEY_ID, secret=SECRET, base_url=BASE_URL) as client: + with EvolutionOpenAI( + key_id=KEY_ID, secret=SECRET, base_url=BASE_URL + ) as client: # Получаем доступную модель model_name = get_available_model(client) @@ -100,7 +102,9 @@ def streaming_with_metadata(): return try: - with OpenAI(key_id=KEY_ID, secret=SECRET, base_url=BASE_URL) as client: + with EvolutionOpenAI( + key_id=KEY_ID, secret=SECRET, base_url=BASE_URL + ) as client: # Получаем доступную модель model_name = get_available_model(client) @@ -160,7 +164,7 @@ async def async_streaming_example(): return try: - async with AsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id=KEY_ID, secret=SECRET, base_url=BASE_URL ) as client: # Получаем доступную модель @@ -211,7 +215,9 @@ def streaming_with_stop_sequence(): return try: - client = OpenAI(key_id=KEY_ID, secret=SECRET, base_url=BASE_URL) + client = EvolutionOpenAI( + key_id=KEY_ID, secret=SECRET, base_url=BASE_URL + ) # Получаем доступную модель model_name = get_available_model(client) @@ -276,7 +282,9 @@ def multiple_streaming_conversations(): ] try: - client = OpenAI(key_id=KEY_ID, secret=SECRET, base_url=BASE_URL) + client = EvolutionOpenAI( + key_id=KEY_ID, secret=SECRET, base_url=BASE_URL + ) # Получаем доступную модель model_name = get_available_model(client) diff --git a/examples/token_management.py b/examples/token_management.py index 9beab42..d3f2a51 100644 --- a/examples/token_management.py +++ b/examples/token_management.py @@ -6,7 +6,7 @@ import os import time -from evolution_openai import OpenAI +from evolution_openai import EvolutionOpenAI # Конфигурация BASE_URL = os.getenv("EVOLUTION_BASE_URL", "https://your-endpoint.cloud.ru/v1") @@ -39,7 +39,9 @@ def token_info_example(): return try: - with OpenAI(key_id=KEY_ID, secret=SECRET, base_url=BASE_URL) as client: + with EvolutionOpenAI( + key_id=KEY_ID, secret=SECRET, base_url=BASE_URL + ) as client: # Получаем информацию о текущем токене token_info = client.get_token_info() print(f"Информация о токене: {token_info}") @@ -73,7 +75,9 @@ def token_refresh_example(): return try: - client = OpenAI(key_id=KEY_ID, secret=SECRET, base_url=BASE_URL) + client = EvolutionOpenAI( + key_id=KEY_ID, secret=SECRET, base_url=BASE_URL + ) # Получаем текущий токен old_token = client.current_token @@ -115,7 +119,9 @@ def automatic_token_management(): return try: - client = OpenAI(key_id=KEY_ID, secret=SECRET, base_url=BASE_URL) + client = EvolutionOpenAI( + key_id=KEY_ID, secret=SECRET, base_url=BASE_URL + ) print("Делаем серию запросов...") @@ -165,7 +171,9 @@ def token_expiration_simulation(): return try: - client = OpenAI(key_id=KEY_ID, secret=SECRET, base_url=BASE_URL) + client = EvolutionOpenAI( + key_id=KEY_ID, secret=SECRET, base_url=BASE_URL + ) print("Получаем информацию о токене...") token_info = client.get_token_info() @@ -210,9 +218,13 @@ def multiple_clients_example(): try: # Создаем два клиента с одинаковыми credentials - client1 = OpenAI(key_id=KEY_ID, secret=SECRET, base_url=BASE_URL) + client1 = EvolutionOpenAI( + key_id=KEY_ID, secret=SECRET, base_url=BASE_URL + ) - client2 = OpenAI(key_id=KEY_ID, secret=SECRET, base_url=BASE_URL) + client2 = EvolutionOpenAI( + key_id=KEY_ID, secret=SECRET, base_url=BASE_URL + ) print("Создали два клиента...") diff --git a/pyproject.toml b/pyproject.toml index 14cd1ad..faf6d0e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "evolution-openai" -version = "1.0.3" +version = "1.0.2" description = "Evolution OpenAI with automatic token management" dynamic = ["readme"] license-files = ["LICEN[CS]E*"] diff --git a/requirements-dev.lock b/requirements-dev.lock index 15a53cf..7237cae 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -22,25 +22,25 @@ babel==2.17.0 backports-tarfile==1.2.0 # via jaraco-context build==1.2.2.post1 -certifi==2025.6.15 +certifi==2025.10.5 # via httpcore # via httpx # via requests cfgv==3.4.0 # via pre-commit -charset-normalizer==3.4.2 +charset-normalizer==3.4.3 # via requests click==8.1.8 # via click-option-group # via python-semantic-release -click-option-group==0.5.7 +click-option-group==0.5.8 # via python-semantic-release coverage==7.6.1 # via pytest-cov deprecated==1.2.18 # via python-semantic-release dirty-equals==0.9.0 -distlib==0.3.9 +distlib==0.4.0 # via virtualenv distro==1.9.0 # via openai @@ -58,7 +58,7 @@ filelock==3.16.1 # via virtualenv gitdb==4.0.12 # via gitpython -gitpython==3.1.44 +gitpython==3.1.45 # via python-semantic-release h11==0.16.0 # via httpcore @@ -118,12 +118,12 @@ mypy==1.14.1 mypy-extensions==1.1.0 # via mypy myst-parser==3.0.1 -nh3==0.2.21 +nh3==0.3.0 # via readme-renderer nodeenv==1.9.1 # via pre-commit # via pyright -openai==1.88.0 +openai==1.109.1 # via evolution-openai packaging==25.0 # via build @@ -140,13 +140,13 @@ pydantic==2.10.6 # via python-semantic-release pydantic-core==2.27.2 # via pydantic -pygments==2.19.1 +pygments==2.19.2 # via readme-renderer # via rich # via sphinx pyproject-hooks==1.2.0 # via build -pyright==1.1.402 +pyright==1.1.406 pytest==8.3.5 # via pytest-asyncio # via pytest-cov @@ -160,11 +160,11 @@ python-dotenv==1.0.1 # via evolution-openai python-gitlab==4.13.0 # via python-semantic-release -python-semantic-release==10.1.0 +python-semantic-release==10.4.1 pytz==2025.2 # via babel # via dirty-equals -pyyaml==6.0.2 +pyyaml==6.0.3 # via myst-parser # via pre-commit # via responses @@ -182,13 +182,13 @@ requests==2.32.4 requests-toolbelt==1.0.0 # via python-gitlab # via twine -responses==0.25.7 +responses==0.25.8 rfc3986==2.0.0 # via twine -rich==14.0.0 +rich==14.1.0 # via python-semantic-release # via twine -ruff==0.12.0 +ruff==0.13.3 shellingham==1.5.4 # via python-semantic-release six==1.17.0 @@ -237,20 +237,21 @@ typing-extensions==4.13.2 # via annotated-types # via anyio # via exceptiongroup + # via gitpython # via mypy # via openai # via pydantic # via pydantic-core # via pyright - # via rich + # via virtualenv urllib3==2.2.3 # via requests # via responses # via twine # via types-requests -virtualenv==20.31.2 +virtualenv==20.34.0 # via pre-commit -wrapt==1.17.2 +wrapt==1.17.3 # via deprecated zipp==3.20.2 # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index e8676d8..ab814d7 100644 --- a/requirements.lock +++ b/requirements.lock @@ -15,11 +15,11 @@ annotated-types==0.7.0 anyio==4.5.2 # via httpx # via openai -certifi==2025.6.15 +certifi==2025.10.5 # via httpcore # via httpx # via requests -charset-normalizer==3.4.2 +charset-normalizer==3.4.3 # via requests distro==1.9.0 # via openai @@ -38,7 +38,7 @@ idna==3.10 # via requests jiter==0.9.1 # via openai -openai==1.88.0 +openai==1.109.1 # via evolution-openai pydantic==2.10.6 # via openai diff --git a/src/evolution_openai/client.py b/src/evolution_openai/client.py index 7921b02..471a075 100644 --- a/src/evolution_openai/client.py +++ b/src/evolution_openai/client.py @@ -2,8 +2,12 @@ Основные клиенты Evolution OpenAI """ +import os +import asyncio import logging +import contextlib from typing import Any, Dict, Type, Union, Optional +from datetime import datetime, timedelta from typing_extensions import override from evolution_openai.token_manager import EvolutionTokenManager @@ -43,6 +47,22 @@ logger = logging.getLogger(__name__) +# Configure logger level from environment (default INFO) +_level_name = os.getenv("LOG_LEVEL", "INFO").strip().upper() +if _level_name == "WARN": + _level_name = "WARNING" +logger.setLevel(getattr(logging, _level_name, logging.INFO)) +_root_logger = logging.getLogger() +if not logger.handlers and not _root_logger.handlers: + _handler = logging.StreamHandler() + _handler.setLevel(getattr(logging, _level_name, logging.INFO)) + _formatter = logging.Formatter( + "%(asctime)s - %(levelname)s - %(name)s - %(message)s" + ) + _handler.setFormatter(_formatter) + logger.addHandler(_handler) +logger.debug(f"LOG_LEVEL from env: {_level_name}") + class EvolutionOpenAI(_BaseOpenAI): # type: ignore[reportUnknownBaseType,reportUnknownMemberType,reportUnknownArgumentType,misc] """ @@ -53,7 +73,7 @@ class EvolutionOpenAI(_BaseOpenAI): # type: ignore[reportUnknownBaseType,report client = OpenAI(api_key="...") На: - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI client = OpenAI(key_id="...", secret="...", base_url="...") И все остальные методы будут работать точно так же! @@ -67,7 +87,6 @@ def __init__( # Параметры совместимые с OpenAI SDK api_key: Optional[str] = None, # Игнорируется organization: Optional[str] = None, - project_id: Optional[str] = None, timeout: Union[float, None] = None, max_retries: int = 2, default_headers: Optional[Dict[str, str]] = None, @@ -84,15 +103,16 @@ def __init__( # Сохраняем Cloud.ru credentials self.key_id = key_id self.secret = secret - self.project_id = project_id # Инициализируем token manager self.token_manager = EvolutionTokenManager(key_id, secret) # Получаем первоначальный токен initial_token = self.token_manager.get_valid_token() + if initial_token: + logger.debug(f"[token] init prefix={initial_token[:16]}...") - # Подготавливаем заголовки с project_id + # Подготавливаем заголовки prepared_headers = self._prepare_default_headers(default_headers) # Инициализируем родительский OpenAI client @@ -117,17 +137,13 @@ def __init__( def _prepare_default_headers( self, user_headers: Optional[Dict[str, str]] ) -> Dict[str, str]: - """Подготавливает заголовки по умолчанию с учетом project_id""" + """Подготавливает заголовки по умолчанию""" headers: Dict[str, str] = {} # Добавляем пользовательские заголовки if user_headers: headers.update(user_headers) - # Добавляем project_id заголовок если он установлен - if self.project_id: - headers["x-project-id"] = self.project_id - return headers def _initialize_headers(self) -> None: @@ -151,12 +167,16 @@ def patched_request(*args: Any, **kwargs: Any) -> Any: # type: ignore[reportUnk current_token = self.token_manager.get_valid_token() self.api_key = current_token or "" # type: ignore[reportUnknownMemberType] self._update_auth_headers(current_token or "") + logger.debug("Выполняется HTTP запрос с обновленным токеном") try: return original_request(*args, **kwargs) except Exception as e: # Если ошибка авторизации, принудительно обновляем токен if self._is_auth_error(e): + logger.debug( + f"Обнаружена ошибка авторизации: {e.__class__.__name__}" + ) logger.warning( "Ошибка авторизации, принудительно обновляем токен" ) @@ -171,6 +191,7 @@ def patched_request(*args: Any, **kwargs: Any) -> Any: # type: ignore[reportUnk # Устанавливаем патченый метод setattr(self._client, method_name, patched_request) # type: ignore[reportUnknownMemberType,reportUnknownArgumentType] + logger.debug("HTTP клиент успешно пропатчен для автообновления токена") def _update_auth_headers(self, token: str) -> None: """Обновляет заголовки авторизации""" @@ -180,32 +201,44 @@ def _update_auth_headers(self, token: str) -> None: # Пытаемся обновить заголовки различными способами if hasattr(self._client, "_auth_headers"): self._client._auth_headers["Authorization"] = auth_header # type: ignore[reportAttributeAccessIssue] - # Добавляем project_id заголовок если он установлен - if self.project_id: - self._client._auth_headers["x-project-id"] = self.project_id # type: ignore[reportAttributeAccessIssue] headers_updated = True if hasattr(self._client, "default_headers"): self._client.default_headers["Authorization"] = auth_header # type: ignore[reportAttributeAccessIssue] - # Добавляем project_id заголовок если он установлен - if self.project_id: - self._client.default_headers["x-project-id"] = self.project_id # type: ignore[reportAttributeAccessIssue] headers_updated = True # Пытаемся обновить заголовки через _default_headers (для новых версий OpenAI SDK) if hasattr(self._client, "_default_headers"): self._client._default_headers["Authorization"] = auth_header # type: ignore[reportAttributeAccessIssue] - if self.project_id: - self._client._default_headers["x-project-id"] = self.project_id # type: ignore[reportAttributeAccessIssue] headers_updated = True # Обновляем заголовки на уровне самого клиента if hasattr(self, "default_headers") and self.default_headers: self.default_headers["Authorization"] = auth_header # type: ignore[reportAttributeAccessIssue] - if self.project_id: - self.default_headers["x-project-id"] = self.project_id # type: ignore[reportAttributeAccessIssue] headers_updated = True + # Синхронизируем api_key/авторизацию в нижележащем клиенте + try: + if hasattr(self._client, "api_key"): + self._client.api_key = token + if hasattr(self._client, "_api_key"): + self._client._api_key = token + # Попытка обновить auth объект, если он содержит токен + auth_obj = getattr(self._client, "auth", None) + if auth_obj is not None: + for attr in ("api_key", "token", "_token"): + if hasattr(auth_obj, attr): + setattr(auth_obj, attr, token) + auth_obj = getattr(self._client, "_auth", None) + if auth_obj is not None: + for attr in ("api_key", "token", "_token"): + if hasattr(auth_obj, attr): + setattr(auth_obj, attr, token) + except Exception as e: + logger.debug( + f"Не удалось обновить api_key/_auth у HTTP клиента: {e}" + ) + if not headers_updated: logger.warning( "Не удалось обновить заголовки - структура HTTP клиента не распознана" @@ -213,6 +246,22 @@ def _update_auth_headers(self, token: str) -> None: def _is_auth_error(self, error: Exception) -> bool: """Проверяет, является ли ошибка связанной с авторизацией""" + # 1) Явные коды статуса + status_code = getattr(error, "status_code", None) + if isinstance(status_code, int) and status_code in (401, 403): + return True + response = getattr(error, "response", None) + if response is not None: + resp_code = getattr(response, "status_code", None) + if isinstance(resp_code, int) and resp_code in (401, 403): + return True + # 2) Имя класса исключения + name = error.__class__.__name__.lower() + if any( + k in name for k in ("auth", "unauthor", "forbidden", "permission") + ): + return True + # 3) Подстроки в сообщении error_str = str(error).lower() return any( keyword in error_str @@ -222,6 +271,12 @@ def _is_auth_error(self, error: Exception) -> bool: "authentication", "forbidden", "403", + "jwt is expired", + "jwt expired", + "token is expired", + "token expired", + "expired jwt", + "expired token", ] ) @@ -273,7 +328,6 @@ def with_options(self, **kwargs: Any) -> "EvolutionOpenAI": # type: ignore[repo "secret": self.secret, "base_url": self.base_url, "organization": self.organization, - "project_id": self.project_id, "timeout": self.timeout, "max_retries": self.max_retries, "default_headers": self.default_headers, @@ -318,7 +372,6 @@ def __init__( # Параметры совместимые с AsyncOpenAI api_key: Optional[str] = None, organization: Optional[str] = None, - project_id: Optional[str] = None, timeout: Union[float, None] = None, max_retries: int = 2, default_headers: Optional[Dict[str, str]] = None, @@ -334,15 +387,16 @@ def __init__( # Сохраняем Cloud.ru credentials self.key_id = key_id self.secret = secret - self.project_id = project_id # Инициализируем token manager self.token_manager = EvolutionTokenManager(key_id, secret) # Получаем первоначальный токен initial_token = self.token_manager.get_valid_token() + if initial_token: + logger.debug(f"[token] init prefix={initial_token[:16]}...") - # Подготавливаем заголовки с project_id + # Подготавливаем заголовки prepared_headers = self._prepare_default_headers(default_headers) # Инициализируем родительский AsyncOpenAI client @@ -364,20 +418,29 @@ def __init__( # Устанавливаем заголовки после инициализации родительского класса self._initialize_headers() + # Запускаем фоновое автообновление токена при наличии event loop + try: + loop = asyncio.get_running_loop() + except RuntimeError: + loop = None + if loop is not None: + self._token_refresh_task = loop.create_task( + self._auto_refresh_token_loop() + ) # type: ignore[reportAttributeAccessIssue] + logger.debug("Фоновая задача автообновления токена запущена") + else: + self._token_refresh_task = None # type: ignore[reportAttributeAccessIssue] + def _prepare_default_headers( self, user_headers: Optional[Dict[str, str]] ) -> Dict[str, str]: - """Подготавливает заголовки по умолчанию с учетом project_id""" + """Подготавливает заголовки по умолчанию""" headers: Dict[str, str] = {} # Добавляем пользовательские заголовки if user_headers: headers.update(user_headers) - # Добавляем project_id заголовок если он установлен - if self.project_id: - headers["x-project-id"] = self.project_id - return headers def _initialize_headers(self) -> None: @@ -399,20 +462,26 @@ def _patch_async_client(self) -> None: return async def patched_request(*args: Any, **kwargs: Any) -> Any: # type: ignore[reportUnknownMemberType,reportUnknownArgumentType,reportUnknownVariableType,reportUnknownReturnType] - # Обновляем токен перед каждым запросом - current_token = self.token_manager.get_valid_token() + # Обновляем токен перед каждым запросом (АСИНХРОННО!) + current_token = await self.token_manager.get_valid_token_async() self.api_key = current_token or "" # type: ignore[reportUnknownMemberType,reportUnknownVariableType] self._update_auth_headers(current_token or "") + logger.debug("Выполняется async HTTP запрос с обновленным токеном") try: return await original_request(*args, **kwargs) except Exception as e: if self._is_auth_error(e): + logger.debug( + f"Обнаружена ошибка авторизации: {e.__class__.__name__}" + ) logger.warning( "Ошибка авторизации, принудительно обновляем токен" ) self.token_manager.invalidate_token() - new_token = self.token_manager.get_valid_token() + new_token = ( + await self.token_manager.get_valid_token_async() + ) self.api_key = new_token or "" # type: ignore[reportUnknownMemberType,reportUnknownVariableType] self._update_auth_headers(new_token or "") return await original_request(*args, **kwargs) @@ -421,6 +490,9 @@ async def patched_request(*args: Any, **kwargs: Any) -> Any: # type: ignore[rep # Устанавливаем патченый метод setattr(self._client, method_name, patched_request) # type: ignore[reportUnknownMemberType,reportUnknownArgumentType] + logger.debug( + "Async HTTP клиент успешно пропатчен для автообновления токена" + ) def _update_auth_headers(self, token: str) -> None: """Обновляет заголовки авторизации""" @@ -430,32 +502,43 @@ def _update_auth_headers(self, token: str) -> None: # Пытаемся обновить заголовки различными способами if hasattr(self._client, "_auth_headers"): self._client._auth_headers["Authorization"] = auth_header # type: ignore[reportAttributeAccessIssue] - # Добавляем project_id заголовок если он установлен - if self.project_id: - self._client._auth_headers["x-project-id"] = self.project_id # type: ignore[reportAttributeAccessIssue] headers_updated = True if hasattr(self._client, "default_headers"): self._client.default_headers["Authorization"] = auth_header # type: ignore[reportAttributeAccessIssue] - # Добавляем project_id заголовок если он установлен - if self.project_id: - self._client.default_headers["x-project-id"] = self.project_id # type: ignore[reportAttributeAccessIssue] headers_updated = True # Пытаемся обновить заголовки через _default_headers (для новых версий OpenAI SDK) if hasattr(self._client, "_default_headers"): self._client._default_headers["Authorization"] = auth_header # type: ignore[reportAttributeAccessIssue] - if self.project_id: - self._client._default_headers["x-project-id"] = self.project_id # type: ignore[reportAttributeAccessIssue] headers_updated = True # Обновляем заголовки на уровне самого клиента if hasattr(self, "default_headers") and self.default_headers: self.default_headers["Authorization"] = auth_header # type: ignore[reportAttributeAccessIssue] - if self.project_id: - self.default_headers["x-project-id"] = self.project_id # type: ignore[reportAttributeAccessIssue] headers_updated = True + # Синхронизируем api_key/авторизацию в нижележащем клиенте + try: + if hasattr(self._client, "api_key"): + self._client.api_key = token + if hasattr(self._client, "_api_key"): + self._client._api_key = token + auth_obj = getattr(self._client, "auth", None) + if auth_obj is not None: + for attr in ("api_key", "token", "_token"): + if hasattr(auth_obj, attr): + setattr(auth_obj, attr, token) + auth_obj = getattr(self._client, "_auth", None) + if auth_obj is not None: + for attr in ("api_key", "token", "_token"): + if hasattr(auth_obj, attr): + setattr(auth_obj, attr, token) + except Exception as e: + logger.debug( + f"Не удалось обновить api_key/_auth у async HTTP клиента: {e}" + ) + if not headers_updated: logger.warning( "Не удалось обновить заголовки - структура HTTP клиента не распознана" @@ -463,6 +546,19 @@ def _update_auth_headers(self, token: str) -> None: def _is_auth_error(self, error: Exception) -> bool: """Проверяет, является ли ошибка связанной с авторизацией""" + status_code = getattr(error, "status_code", None) + if isinstance(status_code, int) and status_code in (401, 403): + return True + response = getattr(error, "response", None) + if response is not None: + resp_code = getattr(response, "status_code", None) + if isinstance(resp_code, int) and resp_code in (401, 403): + return True + name = error.__class__.__name__.lower() + if any( + k in name for k in ("auth", "unauthor", "forbidden", "permission") + ): + return True error_str = str(error).lower() return any( keyword in error_str @@ -472,18 +568,25 @@ def _is_auth_error(self, error: Exception) -> bool: "authentication", "forbidden", "403", + # Расширенные признаки истекшего токена/JWT от backend + "jwt is expired", + "jwt expired", + "token is expired", + "token expired", + "expired jwt", + "expired token", ] ) @property def current_token(self) -> Optional[str]: """Возвращает текущий действующий токен""" - return self.token_manager.get_valid_token() + return self.token_manager.access_token - def refresh_token(self) -> Optional[str]: - """Принудительно обновляет токен""" + async def refresh_token(self) -> Optional[str]: + """Принудительно обновляет токен (асинхронно)""" self.token_manager.invalidate_token() - return self.token_manager.get_valid_token() + return await self.token_manager.get_valid_token_async() def get_token_info(self) -> Dict[str, Any]: """Возвращает информацию о токене""" @@ -523,7 +626,6 @@ def with_options(self, **kwargs: Any) -> "EvolutionAsyncOpenAI": # type: ignore "secret": self.secret, "base_url": self.base_url, "organization": self.organization, - "project_id": self.project_id, "timeout": self.timeout, "max_retries": self.max_retries, "default_headers": self.default_headers, @@ -533,6 +635,47 @@ def with_options(self, **kwargs: Any) -> "EvolutionAsyncOpenAI": # type: ignore options.update(kwargs) return EvolutionAsyncOpenAI(**options) + async def _auto_refresh_token_loop(self) -> None: + """Фоновый цикл автообновления токена до истечения срока.""" + try: + while True: + # Обеспечиваем наличие валидного токена и обновляем заголовки + token = await self.token_manager.get_valid_token_async() + self.api_key = token or "" # type: ignore[reportUnknownMemberType] + self._update_auth_headers(token or "") + + # Планируем следующее обновление заранее, до буфера + expires_at = self.token_manager.token_expires_at + buffer_seconds = self.token_manager.buffer_seconds + now = datetime.now() + + if expires_at is None: + # Если срок неизвестен — проверяем чаще + await asyncio.sleep(5) + continue + + # Обновляем немного раньше буфера, чтобы избежать гонок + target_time = expires_at - timedelta( + seconds=buffer_seconds + 5 + ) + sleep_seconds = (target_time - now).total_seconds() + if sleep_seconds < 1: + sleep_seconds = 1 + await asyncio.sleep(sleep_seconds) + except asyncio.CancelledError: + return + + @override + async def close(self) -> None: # type: ignore[misc] + """Закрывает клиент и останавливает фоновую задачу автообновления.""" + task = getattr(self, "_token_refresh_task", None) + if task is not None: + task.cancel() + with contextlib.suppress(asyncio.CancelledError): + await task + if hasattr(super(), "close"): + await super().close() # type: ignore[reportUnknownMemberType] + async def __aenter__(self) -> "EvolutionAsyncOpenAI": # type: ignore[reportUnknownReturnType,reportUnknownMemberType] """Асинхронный контекстный менеджер - вход""" # Вызываем родительский асинхронный контекстный менеджер если он есть diff --git a/src/evolution_openai/token_manager.py b/src/evolution_openai/token_manager.py index 2bc9b61..00bc991 100644 --- a/src/evolution_openai/token_manager.py +++ b/src/evolution_openai/token_manager.py @@ -2,6 +2,8 @@ Менеджер токенов для Cloud.ru API """ +import os +import asyncio import logging import threading from typing import Any, Dict, Optional @@ -9,6 +11,14 @@ import requests +try: + import httpx + + _httpx_available = True +except ImportError: + httpx = None # type: ignore[assignment] + _httpx_available = False + from evolution_openai.exceptions import ( EvolutionAuthError, EvolutionTokenError, @@ -17,6 +27,22 @@ logger = logging.getLogger(__name__) +# Configure logger level from environment (default INFO) +_level_name = os.getenv("LOG_LEVEL", "INFO").strip().upper() +if _level_name == "WARN": + _level_name = "WARNING" +logger.setLevel(getattr(logging, _level_name, logging.INFO)) +_root_logger = logging.getLogger() +if not logger.handlers and not _root_logger.handlers: + _handler = logging.StreamHandler() + _handler.setLevel(getattr(logging, _level_name, logging.INFO)) + _formatter = logging.Formatter( + "%(asctime)s - %(levelname)s - %(name)s - %(message)s" + ) + _handler.setFormatter(_formatter) + logger.addHandler(_handler) +logger.debug(f"LOG_LEVEL from env: {_level_name}") + class EvolutionTokenManager: """Менеджер токенов с автоматическим обновлением""" @@ -40,6 +66,7 @@ def __init__( self.access_token: Optional[str] = None self.token_expires_at: Optional[datetime] = None self._lock = threading.Lock() + self._async_lock: Optional[asyncio.Lock] = None def _request_token(self) -> Dict[str, Any]: """Запрашивает новый access token""" @@ -91,6 +118,7 @@ def get_valid_token(self) -> Optional[str]: ) if should_refresh: + had_token_before = self.access_token is not None logger.info("Обновление access token...") try: token_data = self._request_token() @@ -103,6 +131,10 @@ def get_valid_token(self) -> Optional[str]: f"Токен обновлен, действителен до: " f"{self.token_expires_at}" ) + if had_token_before and self.access_token: + logger.debug( + f"[token] refreshed prefix={self.access_token[:16]}..." + ) except KeyError as e: raise EvolutionTokenError( f"Неожиданный формат ответа от сервера токенов: {e}" @@ -137,3 +169,94 @@ def get_token_info(self) -> Dict[str, Any]: "is_valid": self.is_token_valid(), "buffer_seconds": self.buffer_seconds, } + + async def _request_token_async(self) -> Dict[str, Any]: + """Асинхронно запрашивает новый access token""" + payload = {"keyId": self.key_id, "secret": self.secret} + headers = {"Content-Type": "application/json"} + + if _httpx_available and httpx is not None: + # Используем httpx для асинхронных запросов + try: + async with httpx.AsyncClient(timeout=30.0) as client: + response = await client.post( + self.token_url, json=payload, headers=headers + ) + response.raise_for_status() + return response.json() # type: ignore[no-any-return] + + except httpx.HTTPStatusError as e: + if e.response.status_code == 401: + raise EvolutionAuthError( + f"Неверные учетные данные: {e}", status_code=401 + ) from None + elif e.response.status_code == 403: + raise EvolutionAuthError( + f"Доступ запрещен: {e}", status_code=403 + ) from None + else: + raise EvolutionNetworkError( + f"HTTP ошибка при получении токена: {e}", + original_error=e, + ) from None + except httpx.RequestError as e: + raise EvolutionNetworkError( + f"Сетевая ошибка при получении токена: {e}", + original_error=e, + ) from None + except Exception as e: + raise EvolutionTokenError( + f"Неожиданная ошибка при получении токена: {e}" + ) from None + else: + # Fallback: используем синхронный requests в отдельном потоке + logger.warning( + "httpx не установлен, используется синхронный fallback. " + "Установите httpx для лучшей производительности: pip install httpx" + ) + loop = asyncio.get_event_loop() + return await loop.run_in_executor(None, self._request_token) + + async def get_valid_token_async(self) -> Optional[str]: + """Асинхронно возвращает валидный токен, обновляя при необходимости""" + # Ленивая инициализация async lock + if self._async_lock is None: + self._async_lock = asyncio.Lock() + + async with self._async_lock: + now = datetime.now() + + should_refresh = ( + self.access_token is None + or self.token_expires_at is None + or now + >= ( + self.token_expires_at + - timedelta(seconds=self.buffer_seconds) + ) + ) + + if should_refresh: + had_token_before = self.access_token is not None + logger.info("Обновление access token (async)...") + try: + token_data = await self._request_token_async() + + self.access_token = token_data["access_token"] + expires_in = token_data.get("expires_in", 3600) + self.token_expires_at = now + timedelta(seconds=expires_in) + + logger.info( + f"Токен обновлен (async), действителен до: " + f"{self.token_expires_at}" + ) + if had_token_before and self.access_token: + logger.debug( + f"[token] refreshed (async) prefix={self.access_token[:16]}..." + ) + except KeyError as e: + raise EvolutionTokenError( + f"Неожиданный формат ответа от сервера токенов: {e}" + ) from None + + return self.access_token diff --git a/tests/conftest.py b/tests/conftest.py index e255e0a..1b801e6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -13,8 +13,8 @@ from pytest_asyncio import is_async_test from evolution_openai import ( - OpenAI as EvolutionOpenAI, - AsyncOpenAI as CloudAsyncOpenAI, + EvolutionOpenAI, + EvolutionAsyncOpenAI, ) if TYPE_CHECKING: @@ -51,23 +51,6 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: if "integration" in item.keywords: item.add_marker(skip_integration) - # Skip foundation models tests if not enabled - foundation_models_enabled = ( - os.getenv("ENABLE_FOUNDATION_MODELS_TESTS", "false").lower() == "true" - or os.getenv("ENABLE_INTEGRATION_TESTS", "false").lower() == "true" - ) - if not foundation_models_enabled: - skip_foundation_models = pytest.mark.skip( - reason="Foundation Models tests disabled. " - "Set ENABLE_FOUNDATION_MODELS_TESTS=true or ENABLE_INTEGRATION_TESTS=true to enable." - ) - for item in items: - if ( - "foundation_models" in item.keywords - or "test_foundation_models" in item.name - ): - item.add_marker(skip_foundation_models) - def pytest_configure(config: Config) -> None: """Register custom markers""" @@ -76,9 +59,6 @@ def pytest_configure(config: Config) -> None: ) config.addinivalue_line("markers", "unit: mark test as unit test") config.addinivalue_line("markers", "slow: mark test as slow running") - config.addinivalue_line( - "markers", "foundation_models: mark test as foundation models test" - ) # Load environment variables from .env file if exists try: @@ -100,7 +80,6 @@ def test_credentials() -> Dict[str, Optional[str]]: "key_id": os.getenv("EVOLUTION_KEY_ID"), "secret": os.getenv("EVOLUTION_SECRET"), "base_url": os.getenv("EVOLUTION_BASE_URL"), - "project_id": os.getenv("EVOLUTION_PROJECT_ID"), "token_url": os.getenv( "EVOLUTION_TOKEN_URL", "https://iam.api.cloud.ru/api/v1/auth/token" ), @@ -120,23 +99,10 @@ def mock_credentials() -> Dict[str, str]: "key_id": "test_key_id", "secret": "test_secret", "base_url": "https://test.example.com/v1", - "project_id": "test_project_id", "token_url": "https://iam.api.cloud.ru/api/v1/auth/token", } -@pytest.fixture(scope="session") -def project_id() -> Optional[str]: - """Fixture providing project_id from environment variables""" - return os.getenv("EVOLUTION_PROJECT_ID") - - -@pytest.fixture -def mock_project_id() -> str: - """Fixture providing mock project_id for unit tests""" - return "test_project_id" - - @pytest.fixture(scope="session") def client( request: FixtureRequest, test_credentials: Dict[str, Optional[str]] @@ -166,7 +132,7 @@ def client( @pytest.fixture(scope="session") async def async_client( request: FixtureRequest, test_credentials: Dict[str, Optional[str]] -) -> AsyncIterator[CloudAsyncOpenAI]: +) -> AsyncIterator[EvolutionAsyncOpenAI]: """Session-scoped async client fixture with proper cleanup""" if not test_credentials["key_id"] or not test_credentials["secret"]: pytest.skip("Real credentials not provided for async client fixture") @@ -178,7 +144,7 @@ async def async_client( ) try: - async with CloudAsyncOpenAI( + async with EvolutionAsyncOpenAI( key_id=test_credentials["key_id"] or "", secret=test_credentials["secret"] or "", base_url=test_credentials["base_url"] or "", @@ -187,104 +153,3 @@ async def async_client( yield client except Exception as e: pytest.skip(f"Failed to create async client: {e}") - - -@pytest.fixture(scope="session") -def foundation_models_credentials() -> Dict[str, Optional[str]]: - """Fixture providing Foundation Models specific credentials from environment variables""" - return { - "key_id": os.getenv("EVOLUTION_KEY_ID"), - "secret": os.getenv("EVOLUTION_SECRET"), - "base_url": os.getenv( - "EVOLUTION_FOUNDATION_MODELS_URL", - "https://foundation-models.api.cloud.ru/api/gigacube/openai/v1", - ), - "project_id": os.getenv("EVOLUTION_PROJECT_ID"), - "token_url": os.getenv( - "EVOLUTION_TOKEN_URL", "https://iam.api.cloud.ru/api/v1/auth/token" - ), - } - - -@pytest.fixture(scope="session") -def foundation_models_enabled() -> bool: - """Fixture checking if foundation models integration tests are enabled""" - return ( - os.getenv("ENABLE_FOUNDATION_MODELS_TESTS", "false").lower() == "true" - or os.getenv("ENABLE_INTEGRATION_TESTS", "false").lower() == "true" - ) - - -@pytest.fixture(scope="session") -def foundation_models_client( - request: FixtureRequest, - foundation_models_credentials: Dict[str, Optional[str]], -) -> Iterator[EvolutionOpenAI]: - """Session-scoped Foundation Models sync client fixture with proper cleanup""" - if ( - not foundation_models_credentials["key_id"] - or not foundation_models_credentials["secret"] - ): - pytest.skip( - "Real Foundation Models credentials not provided for client fixture" - ) - - strict = getattr(request, "param", True) - if not isinstance(strict, bool): - raise TypeError( - f"Unexpected fixture parameter type {type(strict)}, expected {bool}" - ) - - try: - with EvolutionOpenAI( - key_id=foundation_models_credentials["key_id"] or "", - secret=foundation_models_credentials["secret"] or "", - base_url=foundation_models_credentials["base_url"] or "", - project_id=foundation_models_credentials["project_id"], - timeout=60.0, - ) as client: - yield client - except Exception as e: - pytest.skip(f"Failed to create Foundation Models client: {e}") - - -@pytest.fixture(scope="session") -async def foundation_models_async_client( - request: FixtureRequest, - foundation_models_credentials: Dict[str, Optional[str]], -) -> AsyncIterator[CloudAsyncOpenAI]: - """Session-scoped Foundation Models async client fixture with proper cleanup""" - if ( - not foundation_models_credentials["key_id"] - or not foundation_models_credentials["secret"] - ): - pytest.skip( - "Real Foundation Models credentials not provided for async client fixture" - ) - - strict = getattr(request, "param", True) - if not isinstance(strict, bool): - raise TypeError( - f"Unexpected fixture parameter type {type(strict)}, expected {bool}" - ) - - try: - async with CloudAsyncOpenAI( - key_id=foundation_models_credentials["key_id"] or "", - secret=foundation_models_credentials["secret"] or "", - base_url=foundation_models_credentials["base_url"] or "", - project_id=foundation_models_credentials["project_id"], - timeout=60.0, - ) as client: - yield client - except Exception as e: - pytest.skip(f"Failed to create Foundation Models async client: {e}") - - -@pytest.fixture(scope="session") -def foundation_models_default_model() -> str: - """Fixture providing default Foundation Models model name""" - return os.getenv( - "EVOLUTION_FOUNDATION_MODELS_DEFAULT_MODEL", - "RefalMachine/RuadaptQwen2.5-7B-Lite-Beta", - ) diff --git a/tests/test_client.py b/tests/test_client.py index ff8ed2d..30c3002 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -7,8 +7,8 @@ import pytest from evolution_openai import ( - OpenAI, - AsyncOpenAI, + EvolutionOpenAI, + EvolutionAsyncOpenAI, create_client, create_async_client, ) @@ -27,7 +27,7 @@ def test_client_initialization(self, mock_token_manager, mock_credentials): mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -48,7 +48,7 @@ def test_client_properties(self, mock_token_manager, mock_credentials): } mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -84,7 +84,7 @@ def test_async_client_initialization( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -150,7 +150,7 @@ def test_missing_openai_dependency(self, mock_credentials): """Test behavior when OpenAI SDK is not installed""" with patch("evolution_openai.client.OPENAI_AVAILABLE", False): with pytest.raises(ImportError) as exc_info: - OpenAI( + EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -168,7 +168,7 @@ def test_auth_error_handling(self, mock_token_manager, mock_credentials): mock_token_manager.return_value = mock_manager with pytest.raises(EvolutionAuthError): - OpenAI( + EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], diff --git a/tests/test_client_advanced.py b/tests/test_client_advanced.py index 18f948d..f1f5a92 100644 --- a/tests/test_client_advanced.py +++ b/tests/test_client_advanced.py @@ -6,7 +6,7 @@ import pytest -from evolution_openai import OpenAI, AsyncOpenAI +from evolution_openai import EvolutionOpenAI, EvolutionAsyncOpenAI @pytest.mark.unit @@ -14,40 +14,38 @@ class TestHeaderManagement: """Test header management functionality""" @patch("evolution_openai.client.EvolutionTokenManager") - def test_prepare_default_headers_with_project_id( + def test_prepare_default_headers_custom_merge( self, mock_token_manager, mock_credentials ): - """Test _prepare_default_headers with project_id""" + """No extra project header is added to headers""" mock_manager = MagicMock() mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], - project_id="test_project_123", default_headers={"Custom-Header": "custom_value"}, ) - assert client.project_id == "test_project_123" # Check that headers are properly prepared headers = client._prepare_default_headers( {"User-Header": "user_value"} ) assert headers["User-Header"] == "user_value" - assert headers["x-project-id"] == "test_project_123" + assert "x-project-id" not in headers @patch("evolution_openai.client.EvolutionTokenManager") - def test_prepare_default_headers_without_project_id( + def test_prepare_default_headers_without_extra_header( self, mock_token_manager, mock_credentials ): - """Test _prepare_default_headers without project_id""" + """Test _prepare_default_headers without extra project header""" mock_manager = MagicMock() mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -68,11 +66,10 @@ def test_update_auth_headers_multiple_sources( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], - project_id="test_project", ) # Mock various header sources @@ -90,21 +87,17 @@ def test_update_auth_headers_multiple_sources( mock_http_client._auth_headers["Authorization"] == "Bearer new_token" ) - assert mock_http_client._auth_headers["x-project-id"] == "test_project" + assert "x-project-id" not in mock_http_client._auth_headers assert ( mock_http_client.default_headers["Authorization"] == "Bearer new_token" ) - assert ( - mock_http_client.default_headers["x-project-id"] == "test_project" - ) + assert "x-project-id" not in mock_http_client.default_headers assert ( mock_http_client._default_headers["Authorization"] == "Bearer new_token" ) - assert ( - mock_http_client._default_headers["x-project-id"] == "test_project" - ) + assert "x-project-id" not in mock_http_client._default_headers @patch("evolution_openai.client.EvolutionTokenManager") def test_update_auth_headers_no_header_sources( @@ -115,7 +108,7 @@ def test_update_auth_headers_no_header_sources( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -139,7 +132,7 @@ def test_get_request_headers(self, mock_token_manager, mock_credentials): mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -176,7 +169,7 @@ def test_is_auth_error_detection( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -189,6 +182,11 @@ def test_is_auth_error_detection( Exception("403 Forbidden"), Exception("UNAUTHORIZED access"), Exception("Authentication error occurred"), + # New: expired token/JWT messages + Exception("Jwt is expired"), + Exception("JWT expired"), + Exception("token is expired"), + Exception("Expired token"), ] for error in auth_errors: @@ -219,7 +217,7 @@ def test_patch_client_with_request_method( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -248,7 +246,7 @@ def test_patch_client_without_request_method( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -271,7 +269,7 @@ def test_patched_request_success( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -309,7 +307,7 @@ def test_patched_request_auth_error_retry( ] mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -337,6 +335,43 @@ def test_patched_request_auth_error_retry( assert original_request.call_count == 2 mock_manager.invalidate_token.assert_called_once() + @patch("evolution_openai.client.EvolutionTokenManager") + def test_patched_request_expired_jwt_retry( + self, mock_token_manager, mock_credentials + ): + """Retry should also trigger on 'Jwt is expired' errors""" + mock_manager = MagicMock() + mock_manager.get_valid_token.side_effect = [ + "test_token", + "test_token", + "new_token", + "new_token", + ] + mock_token_manager.return_value = mock_manager + + client = EvolutionOpenAI( + key_id=mock_credentials["key_id"], + secret=mock_credentials["secret"], + base_url=mock_credentials["base_url"], + ) + + mock_http_client = MagicMock() + original_request = MagicMock() + original_request.side_effect = [ + Exception("Jwt is expired"), + "success", + ] + mock_http_client.request = original_request + client._client = mock_http_client + + client._patch_client() + + result = mock_http_client.request("arg1", kwarg1="value1") + + assert result == "success" + assert original_request.call_count == 2 + mock_manager.invalidate_token.assert_called_once() + @patch("evolution_openai.client.EvolutionTokenManager") def test_patched_request_non_auth_error( self, mock_token_manager, mock_credentials @@ -346,7 +381,7 @@ def test_patched_request_non_auth_error( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -384,7 +419,7 @@ async def test_patch_async_client_with_request_method( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -411,9 +446,12 @@ async def test_patched_async_request_success( """Test patched async request method successful execution""" mock_manager = MagicMock() mock_manager.get_valid_token.return_value = "test_token" + mock_manager.get_valid_token_async = AsyncMock( + return_value="test_token" + ) mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -449,9 +487,15 @@ async def test_patched_async_request_auth_error_retry( "new_token", "new_token", ] + mock_manager.get_valid_token_async = AsyncMock( + side_effect=[ + "test_token", + "new_token", + ] + ) mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -479,6 +523,49 @@ async def test_patched_async_request_auth_error_retry( assert original_request.call_count == 2 mock_manager.invalidate_token.assert_called_once() + @patch("evolution_openai.client.EvolutionTokenManager") + async def test_patched_async_request_expired_jwt_retry( + self, mock_token_manager, mock_credentials + ): + """Async retry should trigger on 'Jwt is expired' errors""" + mock_manager = MagicMock() + mock_manager.get_valid_token.side_effect = [ + "test_token", + "test_token", + "new_token", + "new_token", + ] + mock_manager.get_valid_token_async = AsyncMock( + side_effect=[ + "test_token", + "new_token", + ] + ) + mock_token_manager.return_value = mock_manager + + client = EvolutionAsyncOpenAI( + key_id=mock_credentials["key_id"], + secret=mock_credentials["secret"], + base_url=mock_credentials["base_url"], + ) + + mock_http_client = MagicMock() + original_request = AsyncMock() + original_request.side_effect = [ + Exception("Jwt is expired"), + "success", + ] + mock_http_client.request = original_request + client._client = mock_http_client + + client._patch_async_client() + + result = await mock_http_client.request("arg1", kwarg1="value1") + + assert result == "success" + assert original_request.call_count == 2 + mock_manager.invalidate_token.assert_called_once() + @pytest.mark.unit class TestContextManagers: @@ -499,7 +586,7 @@ def test_sync_context_manager_with_parent( mock_enter.return_value = MagicMock() mock_exit.return_value = None - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -522,7 +609,7 @@ def test_sync_context_manager_without_parent( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -547,7 +634,7 @@ def test_sync_context_manager_exit_error( mock_enter.return_value = MagicMock() mock_exit.side_effect = Exception("Parent exit error") - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -572,7 +659,7 @@ async def test_async_context_manager_with_parent( mock_aenter.return_value = AsyncMock() mock_aexit.return_value = AsyncMock() - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -601,7 +688,7 @@ async def test_async_context_manager_exit_error( mock_aenter.return_value = AsyncMock() mock_aexit.side_effect = Exception("Parent async exit error") - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -625,7 +712,7 @@ def test_with_options_creates_new_client( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -655,11 +742,10 @@ def test_with_options_preserves_credentials( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], - project_id="test_project", ) # Create new client with different options @@ -668,7 +754,6 @@ def test_with_options_preserves_credentials( # Verify credentials are preserved assert new_client.key_id == mock_credentials["key_id"] assert new_client.secret == mock_credentials["secret"] - assert new_client.project_id == "test_project" @patch("evolution_openai.client.EvolutionTokenManager") def test_async_with_options_creates_new_client( @@ -679,7 +764,7 @@ def test_async_with_options_creates_new_client( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -714,7 +799,7 @@ def test_initialize_headers_with_valid_token( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -744,7 +829,7 @@ def test_initialize_headers_with_no_token( mock_manager.get_valid_token.side_effect = ["test_token", None, None] mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -767,16 +852,14 @@ def test_client_with_none_values( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], - project_id=None, default_headers=None, timeout=None, ) - assert client.project_id is None assert client.timeout is None @patch("evolution_openai.client.EvolutionTokenManager") @@ -789,7 +872,7 @@ def test_token_manager_get_valid_token_returns_none( mock_manager.get_valid_token.side_effect = ["test_token", None, None] mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -807,7 +890,7 @@ def test_refresh_token_returns_none( mock_manager.get_valid_token.side_effect = ["test_token", None, None] mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -826,7 +909,7 @@ def test_client_properties_with_empty_strings( mock_manager.get_valid_token.return_value = "" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -843,7 +926,7 @@ def test_get_request_headers_with_none_values( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], diff --git a/tests/test_client_api_forwarding.py b/tests/test_client_api_forwarding.py index e37d348..3f14d7e 100644 --- a/tests/test_client_api_forwarding.py +++ b/tests/test_client_api_forwarding.py @@ -6,7 +6,7 @@ import pytest -from evolution_openai import OpenAI, AsyncOpenAI +from evolution_openai import EvolutionOpenAI, EvolutionAsyncOpenAI @pytest.mark.unit @@ -22,7 +22,7 @@ def test_chat_completions_create_forwarding( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -59,7 +59,7 @@ def test_models_list_forwarding( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -88,7 +88,7 @@ def test_models_retrieve_forwarding( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -117,7 +117,7 @@ def test_completions_create_forwarding( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -154,7 +154,7 @@ async def test_async_chat_completions_create_forwarding( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -191,7 +191,7 @@ async def test_async_models_list_forwarding( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -225,7 +225,7 @@ def test_streaming_chat_completions( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -266,7 +266,7 @@ async def test_async_streaming_chat_completions( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -319,7 +319,7 @@ def test_extra_headers_handling( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -350,7 +350,7 @@ def test_extra_query_handling(self, mock_token_manager, mock_credentials): mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -381,7 +381,7 @@ def test_extra_body_handling(self, mock_token_manager, mock_credentials): mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -414,7 +414,7 @@ def test_timeout_parameter_handling( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -452,7 +452,7 @@ def test_with_raw_response_support( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -495,7 +495,7 @@ def test_with_streaming_response_support( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -539,7 +539,7 @@ def test_client_with_empty_base_url( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url="", @@ -556,7 +556,7 @@ def test_client_with_none_credentials(self, mock_token_manager): mock_token_manager.return_value = mock_manager # This should work as the client doesn't validate credentials - client = OpenAI( + client = EvolutionOpenAI( key_id="", secret="", base_url="https://api.example.com", @@ -574,7 +574,7 @@ def test_client_method_attribute_access( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -598,7 +598,7 @@ def test_async_client_method_attribute_access( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -622,7 +622,7 @@ def test_client_with_unusual_parameters( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -642,14 +642,14 @@ def test_client_inheritance_chain( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], ) # Should be instance of both Evolution and OpenAI classes - assert isinstance(client, OpenAI) + assert isinstance(client, EvolutionOpenAI) # Check that it has the expected methods assert hasattr(client, "current_token") assert hasattr(client, "refresh_token") @@ -664,14 +664,14 @@ def test_async_client_inheritance_chain( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], ) # Should be instance of both Evolution and AsyncOpenAI classes - assert isinstance(client, AsyncOpenAI) + assert isinstance(client, EvolutionAsyncOpenAI) # Check that it has the expected methods assert hasattr(client, "current_token") assert hasattr(client, "refresh_token") diff --git a/tests/test_client_integration_scenarios.py b/tests/test_client_integration_scenarios.py index fd1a731..2887e3f 100644 --- a/tests/test_client_integration_scenarios.py +++ b/tests/test_client_integration_scenarios.py @@ -6,7 +6,7 @@ import pytest -from evolution_openai import OpenAI, AsyncOpenAI +from evolution_openai import EvolutionOpenAI, EvolutionAsyncOpenAI from evolution_openai.exceptions import EvolutionAuthError @@ -27,7 +27,7 @@ def test_openai_version_check_success(self, mock_credentials): mock_token_manager.return_value = mock_manager # Should not raise an error - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -47,7 +47,7 @@ def test_openai_version_check_malformed_version(self, mock_credentials): mock_token_manager.return_value = mock_manager # Should still work as we check if len(version_parts) > 1 - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -58,7 +58,7 @@ def test_openai_not_available_fallback(self, mock_credentials): """Test behavior when OpenAI is not available""" with patch("evolution_openai.client.OPENAI_AVAILABLE", False): with pytest.raises(ImportError) as exc_info: - OpenAI( + EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -66,24 +66,21 @@ def test_openai_not_available_fallback(self, mock_credentials): assert "OpenAI SDK required" in str(exc_info.value) - def test_supports_project_flag(self, mock_credentials): - """Test SUPPORTS_PROJECT flag behavior""" - with patch("evolution_openai.client.SUPPORTS_PROJECT", True): - with patch( - "evolution_openai.client.EvolutionTokenManager" - ) as mock_token_manager: - mock_manager = MagicMock() - mock_manager.get_valid_token.return_value = "test_token" - mock_token_manager.return_value = mock_manager + def test_no_project_flag(self, mock_credentials): + """Project flag is removed and not used""" + with patch( + "evolution_openai.client.EvolutionTokenManager" + ) as mock_token_manager: + mock_manager = MagicMock() + mock_manager.get_valid_token.return_value = "test_token" + mock_token_manager.return_value = mock_manager - client = OpenAI( - key_id=mock_credentials["key_id"], - secret=mock_credentials["secret"], - base_url=mock_credentials["base_url"], - project_id="test_project", - ) - - assert client.project_id == "test_project" + client = EvolutionOpenAI( + key_id=mock_credentials["key_id"], + secret=mock_credentials["secret"], + base_url=mock_credentials["base_url"], + ) + assert client is not None @pytest.mark.unit @@ -99,7 +96,7 @@ def test_client_ignores_api_key_parameter( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -118,13 +115,12 @@ def test_client_with_all_openai_parameters( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], api_key="ignored", organization="test_org", - project_id="test_project", timeout=45.0, max_retries=3, default_headers={"X-Custom": "test"}, @@ -133,7 +129,6 @@ def test_client_with_all_openai_parameters( assert client.key_id == mock_credentials["key_id"] assert client.secret == mock_credentials["secret"] - assert client.project_id == "test_project" assert client.timeout == 45.0 assert client.max_retries == 3 @@ -146,13 +141,12 @@ def test_async_client_with_all_parameters( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], api_key="ignored", organization="test_org", - project_id="test_project", timeout=45.0, max_retries=3, default_headers={"X-Custom": "test"}, @@ -161,7 +155,6 @@ def test_async_client_with_all_parameters( assert client.key_id == mock_credentials["key_id"] assert client.secret == mock_credentials["secret"] - assert client.project_id == "test_project" assert client.timeout == 45.0 assert client.max_retries == 3 @@ -179,7 +172,7 @@ def test_token_refresh_on_client_creation( mock_manager.get_valid_token.return_value = "initial_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -205,7 +198,7 @@ def test_multiple_token_refresh_calls( ] mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -234,7 +227,7 @@ def test_token_info_retrieval(self, mock_token_manager, mock_credentials): } mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -257,7 +250,7 @@ def test_token_manager_exception_handling( mock_token_manager.return_value = mock_manager with pytest.raises(EvolutionAuthError): - OpenAI( + EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -269,19 +262,18 @@ class TestHeaderInjectionScenarios: """Test header injection scenarios""" @patch("evolution_openai.client.EvolutionTokenManager") - def test_project_id_header_injection_sync( + def test_no_project_header_injection_sync( self, mock_token_manager, mock_credentials ): - """Test project_id header injection in sync client""" + """Project header injection is not supported""" mock_manager = MagicMock() mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], - project_id="test_project_123", ) # Mock HTTP client to test header injection @@ -294,34 +286,24 @@ def test_project_id_header_injection_sync( # Update headers client._update_auth_headers("new_token") - # Check that project_id header was added to all sources - assert ( - mock_http_client._auth_headers.get("x-project-id") - == "test_project_123" - ) - assert ( - mock_http_client.default_headers.get("x-project-id") - == "test_project_123" - ) - assert ( - mock_http_client._default_headers.get("x-project-id") - == "test_project_123" - ) + # Project header should not be present + assert "x-project-id" not in mock_http_client._auth_headers + assert "x-project-id" not in mock_http_client.default_headers + assert "x-project-id" not in mock_http_client._default_headers @patch("evolution_openai.client.EvolutionTokenManager") - def test_project_id_header_injection_async( + def test_no_project_header_injection_async( self, mock_token_manager, mock_credentials ): - """Test project_id header injection in async client""" + """Project header injection is not supported""" mock_manager = MagicMock() mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], - project_id="test_project_async", ) # Mock HTTP client to test header injection @@ -334,19 +316,10 @@ def test_project_id_header_injection_async( # Update headers client._update_auth_headers("new_token") - # Check that project_id header was added to all sources - assert ( - mock_http_client._auth_headers.get("x-project-id") - == "test_project_async" - ) - assert ( - mock_http_client.default_headers.get("x-project-id") - == "test_project_async" - ) - assert ( - mock_http_client._default_headers.get("x-project-id") - == "test_project_async" - ) + # Project header should not be present + assert "x-project-id" not in mock_http_client._auth_headers + assert "x-project-id" not in mock_http_client.default_headers + assert "x-project-id" not in mock_http_client._default_headers @patch("evolution_openai.client.EvolutionTokenManager") def test_custom_headers_preservation( @@ -362,7 +335,7 @@ def test_custom_headers_preservation( "User-Agent": "custom-agent", } - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -374,19 +347,18 @@ def test_custom_headers_preservation( assert client.default_headers["User-Agent"] == "custom-agent" @patch("evolution_openai.client.EvolutionTokenManager") - def test_headers_with_none_project_id( + def test_headers_without_project( self, mock_token_manager, mock_credentials ): - """Test header handling when project_id is None""" + """No project header is added regardless""" mock_manager = MagicMock() mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], - project_id=None, ) # Mock HTTP client @@ -397,7 +369,7 @@ def test_headers_with_none_project_id( # Update headers client._update_auth_headers("new_token") - # Should not add project_id header when it's None + # Should not add project header assert "x-project-id" not in mock_http_client._auth_headers @@ -420,7 +392,7 @@ def test_request_interception_token_update( ] mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -461,7 +433,7 @@ def test_request_interception_auth_error_recovery( ] mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -502,9 +474,10 @@ async def test_async_request_interception_token_update( "token3", "token4", ] + mock_manager.get_valid_token_async = AsyncMock(return_value="token3") mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -543,7 +516,7 @@ def test_client_credentials_immutability( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -572,7 +545,7 @@ def test_non_auth_error_propagation( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -606,7 +579,7 @@ def test_token_manager_error_propagation( mock_token_manager.return_value = mock_manager with pytest.raises(Exception) as exc_info: - OpenAI( + EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -621,9 +594,12 @@ async def test_async_error_propagation( """Test that async errors are propagated correctly""" mock_manager = MagicMock() mock_manager.get_valid_token.return_value = "test_token" + mock_manager.get_valid_token_async = AsyncMock( + return_value="test_token" + ) mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], diff --git a/tests/test_compatibility.py b/tests/test_compatibility.py index 2ac21de..a716124 100644 --- a/tests/test_compatibility.py +++ b/tests/test_compatibility.py @@ -10,7 +10,7 @@ import pytest -from evolution_openai import OpenAI, AsyncOpenAI +from evolution_openai import EvolutionOpenAI, EvolutionAsyncOpenAI try: from openai import OpenAI as OriginalOpenAI @@ -35,7 +35,7 @@ def test_client_has_chat_completions_create( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -56,7 +56,7 @@ def test_client_has_models_methods( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -78,7 +78,7 @@ def test_client_has_completions_create( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -98,7 +98,7 @@ def test_advanced_features_compatibility( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -209,7 +209,7 @@ def test_async_client_has_chat_completions( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -229,7 +229,7 @@ def test_async_client_has_models_methods( mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -247,7 +247,7 @@ def test_async_context_manager(self, mock_token_manager, mock_credentials): mock_manager.get_valid_token.return_value = "test_token" mock_token_manager.return_value = mock_manager - client = AsyncOpenAI( + client = EvolutionAsyncOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -288,7 +288,7 @@ def test_chat_create_accepts_all_parameters( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -344,7 +344,7 @@ def test_models_list_accepts_parameters( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -398,7 +398,7 @@ def test_streaming_response_compatibility( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -470,7 +470,7 @@ def test_openai_errors_passthrough( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -532,7 +532,7 @@ def test_response_object_compatibility( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], diff --git a/tests/test_foundation_models_integration.py b/tests/test_foundation_models_integration.py deleted file mode 100644 index 3ede59f..0000000 --- a/tests/test_foundation_models_integration.py +++ /dev/null @@ -1,719 +0,0 @@ -""" -Integration tests for Evolution Foundation Models - -These tests require real Evolution Foundation Models credentials and are only run when -ENABLE_FOUNDATION_MODELS_TESTS=true or ENABLE_INTEGRATION_TESTS=true is set in environment or .env file. - -Based on examples/foundation_models_example.py -""" - -import time -import asyncio - -import pytest - - -@pytest.mark.integration -@pytest.mark.foundation_models -class TestFoundationModelsIntegration: - """Integration tests with real Evolution Foundation Models API""" - - def test_foundation_models_token_acquisition( - self, foundation_models_client, foundation_models_credentials - ): - """Test acquiring real token from Foundation Models API""" - # Test token acquisition - token = foundation_models_client.current_token - assert token is not None - assert len(token) > 0 - - # Test token info - token_info = foundation_models_client.get_token_info() - assert token_info["has_token"] is True - assert token_info["is_valid"] is True - - # Verify project_id is configured - assert ( - foundation_models_client.project_id - == foundation_models_credentials["project_id"] - ) - - print( - f"✅ Foundation Models token acquired successfully: {token[:20]}..." - ) - print(f"🏷️ Project ID: {foundation_models_client.project_id}") - - def test_foundation_models_basic_chat_completion( - self, foundation_models_client, foundation_models_default_model - ): - """Test basic chat completion with Foundation Models API""" - print(f"🔧 Using model: {foundation_models_default_model}") - - response = foundation_models_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "system", - "content": "Ты полезный помощник, использующий Evolution Foundation Models.", - }, - { - "role": "user", - "content": "Расскажи кратко о возможностях искусственного интеллекта", - }, - ], - max_tokens=10, - temperature=0.7, - ) - - # Verify response structure - assert response.choices is not None - assert len(response.choices) > 0 - assert response.choices[0].message is not None - assert response.choices[0].message.content is not None - assert len(response.choices[0].message.content) > 0 - - # Verify response metadata - assert response.model is not None - assert response.usage is not None - assert response.usage.total_tokens > 0 - - print(f"✅ Response: {response.choices[0].message.content}") - print(f"📊 Model: {response.model}") - print(f"🔢 Total tokens: {response.usage.total_tokens}") - - def test_foundation_models_streaming( - self, foundation_models_client, foundation_models_default_model - ): - """Test streaming with Foundation Models API""" - print( - f"🔧 Using model for streaming: {foundation_models_default_model}" - ) - - stream = foundation_models_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "user", - "content": "Напиши короткое стихотворение про технологии", - } - ], - stream=True, - max_tokens=15, - temperature=0.8, - ) - - content_parts = [] - chunk_count = 0 - for chunk in stream: - chunk_count += 1 - if ( - chunk.choices - and len(chunk.choices) > 0 - and chunk.choices[0].delta - and chunk.choices[0].delta.content - ): - content = chunk.choices[0].delta.content - content_parts.append(content) - - full_content = "".join(content_parts) - assert len(full_content) > 0 - assert len(content_parts) > 0 - assert chunk_count > 0 - - print(f"✅ Streaming response: {full_content}") - print( - f"📊 Received {len(content_parts)} content chunks in {chunk_count} total chunks" - ) - - def test_foundation_models_with_options( - self, foundation_models_client, foundation_models_default_model - ): - """Test Foundation Models with additional options""" - print( - f"🔧 Using model with options: {foundation_models_default_model}" - ) - - # Test with_options for configuration - client_with_options = foundation_models_client.with_options( - timeout=60.0, max_retries=3 - ) - - response = client_with_options.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "user", - "content": "Создай план изучения Python для начинающих", - } - ], - max_tokens=15, - temperature=0.3, - ) - - assert response.choices is not None - assert len(response.choices) > 0 - assert response.choices[0].message is not None - assert response.choices[0].message.content is not None - assert len(response.choices[0].message.content) > 0 - - # Test token info - token_info = client_with_options.get_token_info() - assert token_info["has_token"] is True - assert token_info["is_valid"] is True - - print( - f"✅ Response with options: {response.choices[0].message.content}" - ) - print(f"📊 Model: {response.model}") - print(f"🔢 Total tokens: {response.usage.total_tokens}") - print(f"🔑 Token status: {token_info}") - - def test_foundation_models_token_refresh(self, foundation_models_client): - """Test token refresh with Foundation Models API""" - # Get initial token - token1 = foundation_models_client.current_token - assert token1 is not None - assert len(token1) > 0 - - # Force refresh - token2 = foundation_models_client.refresh_token() - assert token2 is not None - assert len(token2) > 0 - - # Tokens should be different (new token) - assert token1 != token2 - - print(f"✅ Token refresh: {token1[:15]}... -> {token2[:15]}...") - - async def test_foundation_models_async_basic( - self, foundation_models_async_client, foundation_models_default_model - ): - """Test basic async Foundation Models functionality""" - print(f"🔧 Using model for async: {foundation_models_default_model}") - - response = await foundation_models_async_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "user", - "content": "Объясни простыми словами, что такое машинное обучение", - } - ], - max_tokens=12, - temperature=0.5, - ) - - assert response.choices is not None - assert len(response.choices) > 0 - assert response.choices[0].message is not None - assert response.choices[0].message.content is not None - assert len(response.choices[0].message.content) > 0 - - print(f"✅ Async response: {response.choices[0].message.content}") - print(f"📊 Model: {response.model}") - print(f"🔢 Total tokens: {response.usage.total_tokens}") - - async def test_foundation_models_async_streaming( - self, foundation_models_async_client, foundation_models_default_model - ): - """Test async streaming with Foundation Models""" - print( - f"🔧 Using model for async streaming: {foundation_models_default_model}" - ) - - stream = await foundation_models_async_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "user", - "content": "Напиши короткое стихотворение про космос", - } - ], - stream=True, - max_tokens=12, - temperature=0.8, - ) - - content_parts = [] - chunk_count = 0 - async for chunk in stream: - chunk_count += 1 - if ( - chunk.choices - and len(chunk.choices) > 0 - and chunk.choices[0].delta - and chunk.choices[0].delta.content - ): - content = chunk.choices[0].delta.content - content_parts.append(content) - - full_content = "".join(content_parts) - assert len(full_content) > 0 - assert len(content_parts) > 0 - assert chunk_count > 0 - - print(f"✅ Async streaming response: {full_content}") - print( - f"📊 Received {len(content_parts)} content chunks in {chunk_count} total chunks" - ) - - async def test_foundation_models_parallel_requests( - self, foundation_models_async_client, foundation_models_default_model - ): - """Test parallel requests to Foundation Models""" - print( - f"🔧 Using model for parallel requests: {foundation_models_default_model}" - ) - - # List of questions for parallel processing - questions = [ - "Что такое искусственный интеллект?", - "Как работает машинное обучение?", - "Что такое нейронные сети?", - ] - - # Create tasks for parallel execution - tasks = [] - for question in questions: - task = foundation_models_async_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "system", - "content": "Дай краткий ответ в 1-2 предложения.", - }, - {"role": "user", "content": question}, - ], - max_tokens=10, - temperature=0.5, - ) - tasks.append(task) - - # Execute all requests in parallel - start_time = time.time() - responses = await asyncio.gather(*tasks) - end_time = time.time() - - elapsed = end_time - start_time - print( - f"⚡ Processed {len(questions)} requests in {elapsed:.2f} seconds" - ) - - # Verify all responses - for i, (question, response) in enumerate(zip(questions, responses)): - assert response.choices is not None - assert len(response.choices) > 0 - assert response.choices[0].message is not None - assert response.choices[0].message.content is not None - assert len(response.choices[0].message.content) > 0 - - print(f"❓ Question {i + 1}: {question}") - print(f"✅ Answer: {response.choices[0].message.content}") - print(f"🔢 Tokens: {response.usage.total_tokens}") - print("-" * 50) - - assert len(responses) == len(questions) - - -@pytest.mark.integration -@pytest.mark.foundation_models -@pytest.mark.slow -class TestFoundationModelsPerformance: - """Performance and load tests for Foundation Models API""" - - def test_foundation_models_multiple_sequential_requests( - self, foundation_models_client, foundation_models_default_model - ): - """Test multiple sequential requests to Foundation Models""" - print( - f"🔧 Testing sequential requests with model: {foundation_models_default_model}" - ) - - request_count = 3 - responses = [] - start_time = time.time() - - for i in range(request_count): - response = foundation_models_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "user", - "content": f"Вопрос {i + 1}: Что такое программирование?", - } - ], - max_tokens=8, - temperature=0.3, - ) - responses.append(response) - - end_time = time.time() - elapsed = end_time - start_time - - # Verify all responses - for i, response in enumerate(responses): - assert response.choices is not None - assert len(response.choices) > 0 - assert response.choices[0].message is not None - assert response.choices[0].message.content is not None - print(f"✅ Request {i + 1}: {response.choices[0].message.content}") - - print( - f"⏱️ {request_count} sequential requests completed in {elapsed:.2f} seconds" - ) - print( - f"📊 Average time per request: {elapsed / request_count:.2f} seconds" - ) - - def test_foundation_models_streaming_performance( - self, foundation_models_client, foundation_models_default_model - ): - """Test streaming performance with Foundation Models""" - print( - f"🔧 Testing streaming performance with model: {foundation_models_default_model}" - ) - - start_time = time.time() - stream = foundation_models_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "user", - "content": "Напиши подробный рассказ о технологиях будущего", - } - ], - stream=True, - max_tokens=20, - temperature=0.7, - ) - - content_parts = [] - chunk_timestamps = [] - - for chunk in stream: - chunk_timestamps.append(time.time()) - if ( - chunk.choices - and len(chunk.choices) > 0 - and chunk.choices[0].delta - and chunk.choices[0].delta.content - ): - content_parts.append(chunk.choices[0].delta.content) - - end_time = time.time() - total_elapsed = end_time - start_time - - full_content = "".join(content_parts) - assert len(full_content) > 0 - - # Calculate streaming statistics - first_chunk_time = ( - chunk_timestamps[0] - start_time if chunk_timestamps else 0 - ) - avg_chunk_interval = 0 - if len(chunk_timestamps) > 1: - intervals = [ - chunk_timestamps[i] - chunk_timestamps[i - 1] - for i in range(1, len(chunk_timestamps)) - ] - avg_chunk_interval = sum(intervals) / len(intervals) - - print("✅ Streaming completed") - print(f"📊 Total content length: {len(full_content)} characters") - print(f"⏱️ Total time: {total_elapsed:.2f} seconds") - print(f"🚀 Time to first chunk: {first_chunk_time:.2f} seconds") - print(f"📈 Average chunk interval: {avg_chunk_interval:.3f} seconds") - print(f"🔢 Total chunks: {len(chunk_timestamps)}") - print(f"📝 Content chunks: {len(content_parts)}") - - async def test_foundation_models_concurrent_load( - self, foundation_models_async_client, foundation_models_default_model - ): - """Test concurrent load on Foundation Models API""" - print( - f"🔧 Testing concurrent load with model: {foundation_models_default_model}" - ) - - # Test with multiple concurrent requests - concurrent_count = 5 - tasks = [] - - for i in range(concurrent_count): - task = foundation_models_async_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "user", - "content": f"Concurrent request {i + 1}: Explain artificial intelligence briefly", - } - ], - max_tokens=8, - temperature=0.4, - ) - tasks.append(task) - - start_time = time.time() - responses = await asyncio.gather(*tasks, return_exceptions=True) - end_time = time.time() - - elapsed = end_time - start_time - - # Verify responses - successful_responses = 0 - failed_responses = 0 - - for i, response in enumerate(responses): - if isinstance(response, Exception): - print(f"❌ Request {i + 1} failed: {response}") - failed_responses += 1 - else: - assert response.choices is not None - assert len(response.choices) > 0 - assert response.choices[0].message is not None - assert response.choices[0].message.content is not None - print( - f"✅ Request {i + 1}: {response.choices[0].message.content}" - ) - successful_responses += 1 - - print( - f"⚡ {concurrent_count} concurrent requests completed in {elapsed:.2f} seconds" - ) - print( - f"📊 Success rate: {successful_responses}/{concurrent_count} ({successful_responses / concurrent_count * 100:.1f}%)" - ) - print( - f"📈 Average time per request: {elapsed / concurrent_count:.2f} seconds" - ) - - # At least 80% should succeed - assert successful_responses / concurrent_count >= 0.8 - - -@pytest.mark.integration -@pytest.mark.foundation_models -class TestFoundationModelsErrorHandling: - """Error handling tests for Foundation Models API""" - - def test_foundation_models_invalid_model(self, foundation_models_client): - """Test error handling with invalid model name""" - with pytest.raises(Exception) as exc_info: - foundation_models_client.chat.completions.create( - model="invalid-model-name-12345", - messages=[ - { - "role": "user", - "content": "This should fail", - } - ], - max_tokens=10, - ) - - print(f"✅ Expected error for invalid model: {exc_info.value}") - - def test_foundation_models_invalid_parameters( - self, foundation_models_client, foundation_models_default_model - ): - """Test error handling with invalid parameters""" - # Test with extremely high max_tokens (might be clamped instead of raising error) - try: - response = foundation_models_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "user", - "content": "Test with very high max_tokens", - } - ], - max_tokens=999999, # Extremely high value - ) - # If it succeeds, the API might clamp the value rather than error - print( - f"✅ API handled high max_tokens gracefully: {response.usage.total_tokens} tokens" - ) - except Exception as e: - print(f"✅ Expected error for invalid max_tokens: {e}") - - # Test with invalid temperature (outside normal range) - try: - response = foundation_models_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "user", - "content": "Test with invalid temperature", - } - ], - max_tokens=8, - temperature=10.0, # Invalid temperature value - ) - # If it succeeds, the API might clamp the value rather than error - print("✅ API handled high temperature gracefully") - except Exception as e: - print(f"✅ Expected error for invalid temperature: {e}") - - # Test with completely invalid parameter type (this should fail) - with pytest.raises(Exception) as exc_info: - foundation_models_client.chat.completions.create( - model=foundation_models_default_model, - messages="invalid_messages_type", # Should be list, not string - max_tokens=8, - ) - - print(f"✅ Expected error for invalid message type: {exc_info.value}") - - def test_foundation_models_empty_messages( - self, foundation_models_client, foundation_models_default_model - ): - """Test error handling with empty messages""" - with pytest.raises(Exception) as exc_info: - foundation_models_client.chat.completions.create( - model=foundation_models_default_model, - messages=[], - max_tokens=10, - ) - - print(f"✅ Expected error for empty messages: {exc_info.value}") - - async def test_foundation_models_async_error_handling( - self, foundation_models_async_client, foundation_models_default_model - ): - """Test async error handling""" - with pytest.raises(Exception) as exc_info: - await foundation_models_async_client.chat.completions.create( - model="invalid-async-model", - messages=[ - { - "role": "user", - "content": "This should fail async", - } - ], - max_tokens=10, - ) - - print(f"✅ Expected async error: {exc_info.value}") - - -@pytest.mark.integration -@pytest.mark.foundation_models -class TestFoundationModelsCompatibility: - """Compatibility tests for Foundation Models API""" - - def test_foundation_models_different_temperatures( - self, foundation_models_client, foundation_models_default_model - ): - """Test Foundation Models with different temperature settings""" - temperatures = [0.1, 0.5, 0.9] - - for temp in temperatures: - response = foundation_models_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "user", - "content": f"Generate text with temperature {temp}", - } - ], - max_tokens=8, - temperature=temp, - ) - - assert response.choices is not None - assert len(response.choices) > 0 - assert response.choices[0].message is not None - assert response.choices[0].message.content is not None - - print( - f"✅ Temperature {temp}: {response.choices[0].message.content}" - ) - - def test_foundation_models_different_max_tokens( - self, foundation_models_client, foundation_models_default_model - ): - """Test Foundation Models with different max_tokens settings""" - max_tokens_values = [5, 10, 15] - - for max_tokens in max_tokens_values: - response = foundation_models_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "user", - "content": "Tell me about programming", - } - ], - max_tokens=max_tokens, - temperature=0.5, - ) - - assert response.choices is not None - assert len(response.choices) > 0 - assert response.choices[0].message is not None - assert response.choices[0].message.content is not None - assert response.usage.total_tokens > 0 - - print( - f"✅ Max tokens {max_tokens}: {len(response.choices[0].message.content)} chars, {response.usage.total_tokens} tokens" - ) - - def test_foundation_models_system_messages( - self, foundation_models_client, foundation_models_default_model - ): - """Test Foundation Models with system messages""" - response = foundation_models_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "system", - "content": "Ты эксперт по программированию. Отвечай кратко и точно.", - }, - { - "role": "user", - "content": "Что такое Python?", - }, - ], - max_tokens=10, - temperature=0.3, - ) - - assert response.choices is not None - assert len(response.choices) > 0 - assert response.choices[0].message is not None - assert response.choices[0].message.content is not None - - print( - f"✅ System message response: {response.choices[0].message.content}" - ) - - def test_foundation_models_conversation_history( - self, foundation_models_client, foundation_models_default_model - ): - """Test Foundation Models with conversation history""" - response = foundation_models_client.chat.completions.create( - model=foundation_models_default_model, - messages=[ - { - "role": "user", - "content": "Привет! Как дела?", - }, - { - "role": "assistant", - "content": "Привет! Дела хорошо, спасибо за вопрос!", - }, - { - "role": "user", - "content": "Можешь рассказать о машинном обучении?", - }, - ], - max_tokens=12, - temperature=0.4, - ) - - assert response.choices is not None - assert len(response.choices) > 0 - assert response.choices[0].message is not None - assert response.choices[0].message.content is not None - - print( - f"✅ Conversation history response: {response.choices[0].message.content}" - ) diff --git a/tests/test_foundation_models_unit.py b/tests/test_foundation_models_unit.py deleted file mode 100644 index af8af1b..0000000 --- a/tests/test_foundation_models_unit.py +++ /dev/null @@ -1,248 +0,0 @@ -#!/usr/bin/env python3 -""" -Unit tests for Foundation Models functionality -""" - -from unittest.mock import MagicMock, patch - -import pytest - -from evolution_openai import OpenAI, AsyncOpenAI -from evolution_openai.exceptions import EvolutionAuthError - - -@pytest.mark.unit -class TestFoundationModelsUnit: - """Unit tests for Foundation Models functionality""" - - FOUNDATION_MODELS_URL = ( - "https://foundation-models.api.cloud.ru/api/gigacube/openai/v1" - ) - DEFAULT_MODEL = "RefalMachine/RuadaptQwen2.5-7B-Lite-Beta" - - @patch("evolution_openai.client.EvolutionTokenManager") - def test_foundation_models_client_initialization( - self, mock_token_manager, mock_credentials - ): - """Test Foundation Models client initialization with project_id""" - mock_manager = MagicMock() - mock_manager.get_valid_token.return_value = "test_token" - mock_token_manager.return_value = mock_manager - - client = OpenAI( - key_id=mock_credentials["key_id"], - secret=mock_credentials["secret"], - base_url=self.FOUNDATION_MODELS_URL, - project_id="test_project_id", - ) - - assert client.key_id == mock_credentials["key_id"] - assert client.secret == mock_credentials["secret"] - assert str(client.base_url) == self.FOUNDATION_MODELS_URL + "/" - assert client.project_id == "test_project_id" - assert client.token_manager == mock_manager - - @patch("evolution_openai.client.EvolutionTokenManager") - def test_foundation_models_async_client_initialization( - self, mock_token_manager, mock_credentials - ): - """Test Foundation Models async client initialization with project_id""" - mock_manager = MagicMock() - mock_manager.get_valid_token.return_value = "test_token" - mock_token_manager.return_value = mock_manager - - client = AsyncOpenAI( - key_id=mock_credentials["key_id"], - secret=mock_credentials["secret"], - base_url=self.FOUNDATION_MODELS_URL, - project_id="test_project_id", - ) - - assert client.key_id == mock_credentials["key_id"] - assert client.secret == mock_credentials["secret"] - assert str(client.base_url) == self.FOUNDATION_MODELS_URL + "/" - assert client.project_id == "test_project_id" - assert client.token_manager == mock_manager - - @patch("evolution_openai.client.EvolutionTokenManager") - def test_foundation_models_client_properties( - self, mock_token_manager, mock_credentials - ): - """Test Foundation Models client properties""" - mock_manager = MagicMock() - mock_manager.get_valid_token.return_value = "test_token" - mock_manager.get_token_info.return_value = { - "has_token": True, - "is_valid": True, - } - mock_token_manager.return_value = mock_manager - - client = OpenAI( - key_id=mock_credentials["key_id"], - secret=mock_credentials["secret"], - base_url=self.FOUNDATION_MODELS_URL, - project_id="test_project_id", - ) - - # Test current_token property - assert client.current_token == "test_token" - - # Test get_token_info method - info = client.get_token_info() - assert info["has_token"] is True - assert info["is_valid"] is True - - # Test refresh_token method - mock_manager.invalidate_token = MagicMock() - mock_manager.get_valid_token.return_value = "new_token" - - new_token = client.refresh_token() - assert new_token == "new_token" - mock_manager.invalidate_token.assert_called_once() - - @patch("evolution_openai.client.EvolutionTokenManager") - def test_foundation_models_with_options( - self, mock_token_manager, mock_credentials - ): - """Test Foundation Models client with_options method""" - mock_manager = MagicMock() - mock_manager.get_valid_token.return_value = "test_token" - mock_token_manager.return_value = mock_manager - - client = OpenAI( - key_id=mock_credentials["key_id"], - secret=mock_credentials["secret"], - base_url=self.FOUNDATION_MODELS_URL, - project_id="test_project_id", - timeout=30.0, - ) - - # Test with_options returns a new client instance - new_client = client.with_options(timeout=60.0, max_retries=3) - - # Original client should be unchanged - assert client.timeout == 30.0 - - # New client should have updated options - assert new_client.timeout == 60.0 - assert new_client.max_retries == 3 - - @patch("evolution_openai.client.EvolutionTokenManager") - def test_foundation_models_auth_error_handling( - self, mock_token_manager, mock_credentials - ): - """Test Foundation Models authentication error handling""" - mock_manager = MagicMock() - mock_manager.get_valid_token.side_effect = EvolutionAuthError( - "Authentication failed", status_code=401 - ) - mock_token_manager.return_value = mock_manager - - with pytest.raises(EvolutionAuthError): - OpenAI( - key_id=mock_credentials["key_id"], - secret=mock_credentials["secret"], - base_url=self.FOUNDATION_MODELS_URL, - project_id="test_project_id", - ) - - def test_foundation_models_missing_project_id(self, mock_credentials): - """Test Foundation Models client without project_id""" - # This should work fine - project_id is optional - with patch( - "evolution_openai.client.EvolutionTokenManager" - ) as mock_token_manager: - mock_manager = MagicMock() - mock_manager.get_valid_token.return_value = "test_token" - mock_token_manager.return_value = mock_manager - - client = OpenAI( - key_id=mock_credentials["key_id"], - secret=mock_credentials["secret"], - base_url=self.FOUNDATION_MODELS_URL, - # No project_id provided - ) - - assert client.project_id is None - - @patch("evolution_openai.client.EvolutionTokenManager") - def test_foundation_models_context_manager( - self, mock_token_manager, mock_credentials - ): - """Test Foundation Models client as context manager""" - mock_manager = MagicMock() - mock_manager.get_valid_token.return_value = "test_token" - mock_token_manager.return_value = mock_manager - - # Test sync client context manager - with OpenAI( - key_id=mock_credentials["key_id"], - secret=mock_credentials["secret"], - base_url=self.FOUNDATION_MODELS_URL, - project_id="test_project_id", - ) as client: - assert client.current_token == "test_token" - - @patch("evolution_openai.client.EvolutionTokenManager") - async def test_foundation_models_async_context_manager( - self, mock_token_manager, mock_credentials - ): - """Test Foundation Models async client as context manager""" - mock_manager = MagicMock() - mock_manager.get_valid_token.return_value = "test_token" - mock_token_manager.return_value = mock_manager - - # Test async client context manager - async with AsyncOpenAI( - key_id=mock_credentials["key_id"], - secret=mock_credentials["secret"], - base_url=self.FOUNDATION_MODELS_URL, - project_id="test_project_id", - ) as client: - assert client.current_token == "test_token" - - -@pytest.mark.unit -class TestFoundationModelsConfiguration: - """Unit tests for Foundation Models configuration""" - - def test_foundation_models_url_validation(self): - """Test Foundation Models URL validation""" - foundation_models_url = ( - "https://foundation-models.api.cloud.ru/api/gigacube/openai/v1" - ) - - # URL should be valid - assert foundation_models_url.startswith("https://") - assert "foundation-models.api.cloud.ru" in foundation_models_url - assert foundation_models_url.endswith("/v1") - - def test_foundation_models_default_model(self): - """Test Foundation Models default model""" - default_model = "RefalMachine/RuadaptQwen2.5-7B-Lite-Beta" - - # Model name should be valid - assert "/" in default_model - assert len(default_model) > 0 - assert default_model.startswith("RefalMachine/") - - def test_foundation_models_timeout_configuration(self): - """Test Foundation Models timeout configuration""" - # Foundation Models should have longer timeout than regular API - regular_timeout = 30.0 - foundation_models_timeout = 60.0 - - assert foundation_models_timeout > regular_timeout - assert foundation_models_timeout >= 60.0 # At least 1 minute - - def test_foundation_models_required_params(self): - """Test Foundation Models required parameters""" - required_params = ["key_id", "secret", "base_url"] - optional_params = ["project_id", "timeout", "max_retries"] - - # Verify we have the right parameter lists - assert len(required_params) == 3 - assert len(optional_params) == 3 - assert ( - "project_id" in optional_params - ) # project_id is optional but recommended diff --git a/tests/test_method_compatibility.py b/tests/test_method_compatibility.py index 76f1526..421f4d4 100644 --- a/tests/test_method_compatibility.py +++ b/tests/test_method_compatibility.py @@ -10,7 +10,7 @@ import pytest -from evolution_openai import OpenAI +from evolution_openai import EvolutionOpenAI try: import openai @@ -161,7 +161,7 @@ def test_chat_completions_all_parameters( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -233,7 +233,7 @@ def test_streaming_parameters( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -289,7 +289,7 @@ def test_models_list_call( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -332,7 +332,7 @@ def test_models_retrieve_call( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], diff --git a/tests/test_method_compatibility_minimal.py b/tests/test_method_compatibility_minimal.py index 0ee9f8d..72f776c 100644 --- a/tests/test_method_compatibility_minimal.py +++ b/tests/test_method_compatibility_minimal.py @@ -34,9 +34,9 @@ def test_chat_completions_basic( # Мокаем HTTP клиент mock_openai_instance._client = MagicMock() - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -75,9 +75,9 @@ def test_models_list_basic( # Мокаем HTTP клиент mock_openai_instance._client = MagicMock() - from evolution_openai import OpenAI + from evolution_openai import EvolutionOpenAI - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], diff --git a/tests/test_version_compatibility.py b/tests/test_version_compatibility.py index 57d786a..d07cd6c 100644 --- a/tests/test_version_compatibility.py +++ b/tests/test_version_compatibility.py @@ -9,7 +9,7 @@ import pytest -from evolution_openai import OpenAI +from evolution_openai import EvolutionOpenAI try: import openai @@ -58,7 +58,7 @@ def test_v1_api_compatibility( mock_openai_instance.api_key = "test_token" # Создаем клиент - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -119,7 +119,7 @@ def test_function_calling_compatibility( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -169,7 +169,7 @@ def test_tools_compatibility( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -225,7 +225,7 @@ def test_old_parameter_names( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -273,7 +273,7 @@ def test_legacy_completion_api( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -320,7 +320,7 @@ def test_new_parameters_passthrough( mock_http_client = MagicMock() mock_openai_instance._client = mock_http_client - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"], @@ -465,7 +465,7 @@ def test_optional_parameters_work( mock_openai_instance._client = MagicMock() mock_openai_instance.api_key = "test_token" - client = OpenAI( + client = EvolutionOpenAI( key_id=mock_credentials["key_id"], secret=mock_credentials["secret"], base_url=mock_credentials["base_url"],