From d296abdcdcf004355239cce013f9b9675f3c8410 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 10:59:40 +0100 Subject: [PATCH 01/30] Python: fix OpenAI Azure routing and provider samples Prefer OpenAI when OPENAI_API_KEY is present unless Azure is explicitly requested. Clarify constructor docs, keep deprecated Azure wrappers compatible with stricter settings validation, and refresh the provider samples and tests to use the current client patterns. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- python/README.md | 10 + .../_deprecated_azure_openai.py | 91 ++-- python/packages/openai/AGENTS.md | 4 + python/packages/openai/README.md | 4 + .../agent_framework_openai/_chat_client.py | 317 +++++++------ .../_chat_completion_client.py | 416 ++++++++++-------- .../openai/agent_framework_openai/_shared.py | 247 ++++++----- .../packages/openai/tests/openai/conftest.py | 4 + .../tests/openai/test_openai_chat_client.py | 5 +- .../openai/test_openai_chat_client_azure.py | 99 +++-- .../test_openai_chat_completion_client.py | 6 +- ...est_openai_chat_completion_client_azure.py | 102 +++-- .../02-agents/providers/azure/README.md | 48 +- ...=> openai_chat_completion_client_basic.py} | 33 +- ...mpletion_client_with_explicit_settings.py} | 21 +- ..._completion_client_with_function_tools.py} | 0 ...ai_chat_completion_client_with_session.py} | 0 .../providers/azure/openai_client_basic.py | 90 ++++ .../openai_client_with_function_tools.py | 137 ++++++ .../azure/openai_client_with_session.py | 152 +++++++ .../openai_client_with_structured_output.py | 93 ++++ .../02-agents/providers/custom/README.md | 6 +- .../02-agents/providers/openai/README.md | 102 +++-- .../openai/chat_completion_client_basic.py | 85 ++++ ...mpletion_client_with_explicit_settings.py} | 18 +- ..._completion_client_with_function_tools.py} | 14 +- ... chat_completion_client_with_local_mcp.py} | 12 +- ...letion_client_with_runtime_json_schema.py} | 10 +- ...=> chat_completion_client_with_session.py} | 16 +- ...chat_completion_client_with_web_search.py} | 8 +- ...i_chat_client_basic.py => client_basic.py} | 26 +- ...e_analysis.py => client_image_analysis.py} | 12 +- ...neration.py => client_image_generation.py} | 10 +- ...lient_reasoning.py => client_reasoning.py} | 8 +- ...y => client_streaming_image_generation.py} | 6 +- ...s_tool.py => client_with_agent_as_tool.py} | 8 +- ...ter.py => client_with_code_interpreter.py} | 12 +- ... => client_with_code_interpreter_files.py} | 10 +- ...gs.py => client_with_explicit_settings.py} | 0 ...e_search.py => client_with_file_search.py} | 12 +- ...tools.py => client_with_function_tools.py} | 0 ...osted_mcp.py => client_with_hosted_mcp.py} | 16 +- ..._local_mcp.py => client_with_local_mcp.py} | 12 +- ...al_shell.py => client_with_local_shell.py} | 6 +- ....py => client_with_runtime_json_schema.py} | 4 +- ...with_session.py => client_with_session.py} | 14 +- ...ent_with_shell.py => client_with_shell.py} | 12 +- ...ut.py => client_with_structured_output.py} | 16 +- ...eb_search.py => client_with_web_search.py} | 8 +- .../openai/openai_assistants_basic.py | 98 ----- .../openai_assistants_provider_methods.py | 158 ------- ...openai_assistants_with_code_interpreter.py | 81 ---- ...enai_assistants_with_existing_assistant.py | 118 ----- ...penai_assistants_with_explicit_settings.py | 61 --- .../openai_assistants_with_file_search.py | 78 ---- .../openai_assistants_with_function_tools.py | 159 ------- .../openai_assistants_with_response_format.py | 96 ---- .../openai/openai_assistants_with_session.py | 172 -------- .../openai/openai_responses_client_basic.py | 132 ------ python/samples/README.md | 10 + 60 files changed, 1581 insertions(+), 1924 deletions(-) rename python/samples/02-agents/providers/azure/{openai_chat_completion_client_azure_basic.py => openai_chat_completion_client_basic.py} (70%) rename python/samples/02-agents/providers/azure/{openai_chat_completion_client_azure_with_explicit_settings.py => openai_chat_completion_client_with_explicit_settings.py} (71%) rename python/samples/02-agents/providers/azure/{openai_chat_completion_client_azure_with_function_tools.py => openai_chat_completion_client_with_function_tools.py} (100%) rename python/samples/02-agents/providers/azure/{openai_chat_completion_client_azure_with_session.py => openai_chat_completion_client_with_session.py} (100%) create mode 100644 python/samples/02-agents/providers/azure/openai_client_basic.py create mode 100644 python/samples/02-agents/providers/azure/openai_client_with_function_tools.py create mode 100644 python/samples/02-agents/providers/azure/openai_client_with_session.py create mode 100644 python/samples/02-agents/providers/azure/openai_client_with_structured_output.py create mode 100644 python/samples/02-agents/providers/openai/chat_completion_client_basic.py rename python/samples/02-agents/providers/openai/{openai_responses_client_with_explicit_settings.py => chat_completion_client_with_explicit_settings.py} (74%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_function_tools.py => chat_completion_client_with_function_tools.py} (91%) rename python/samples/02-agents/providers/openai/{openai_chat_client_with_local_mcp.py => chat_completion_client_with_local_mcp.py} (88%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_runtime_json_schema.py => chat_completion_client_with_runtime_json_schema.py} (89%) rename python/samples/02-agents/providers/openai/{openai_chat_client_with_session.py => chat_completion_client_with_session.py} (91%) rename python/samples/02-agents/providers/openai/{openai_chat_client_with_web_search.py => chat_completion_client_with_web_search.py} (86%) rename python/samples/02-agents/providers/openai/{openai_chat_client_basic.py => client_basic.py} (73%) rename python/samples/02-agents/providers/openai/{openai_responses_client_image_analysis.py => client_image_analysis.py} (70%) rename python/samples/02-agents/providers/openai/{openai_responses_client_image_generation.py => client_image_generation.py} (90%) rename python/samples/02-agents/providers/openai/{openai_responses_client_reasoning.py => client_reasoning.py} (91%) rename python/samples/02-agents/providers/openai/{openai_responses_client_streaming_image_generation.py => client_streaming_image_generation.py} (96%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_agent_as_tool.py => client_with_agent_as_tool.py} (90%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_code_interpreter.py => client_with_code_interpreter.py} (85%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_code_interpreter_files.py => client_with_code_interpreter_files.py} (92%) rename python/samples/02-agents/providers/openai/{openai_chat_client_with_explicit_settings.py => client_with_explicit_settings.py} (100%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_file_search.py => client_with_file_search.py} (85%) rename python/samples/02-agents/providers/openai/{openai_chat_client_with_function_tools.py => client_with_function_tools.py} (100%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_hosted_mcp.py => client_with_hosted_mcp.py} (95%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_local_mcp.py => client_with_local_mcp.py} (90%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_local_shell.py => client_with_local_shell.py} (96%) rename python/samples/02-agents/providers/openai/{openai_chat_client_with_runtime_json_schema.py => client_with_runtime_json_schema.py} (95%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_session.py => client_with_session.py} (93%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_shell.py => client_with_shell.py} (82%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_structured_output.py => client_with_structured_output.py} (87%) rename python/samples/02-agents/providers/openai/{openai_responses_client_with_web_search.py => client_with_web_search.py} (88%) delete mode 100644 python/samples/02-agents/providers/openai/openai_assistants_basic.py delete mode 100644 python/samples/02-agents/providers/openai/openai_assistants_provider_methods.py delete mode 100644 python/samples/02-agents/providers/openai/openai_assistants_with_code_interpreter.py delete mode 100644 python/samples/02-agents/providers/openai/openai_assistants_with_existing_assistant.py delete mode 100644 python/samples/02-agents/providers/openai/openai_assistants_with_explicit_settings.py delete mode 100644 python/samples/02-agents/providers/openai/openai_assistants_with_file_search.py delete mode 100644 python/samples/02-agents/providers/openai/openai_assistants_with_function_tools.py delete mode 100644 python/samples/02-agents/providers/openai/openai_assistants_with_response_format.py delete mode 100644 python/samples/02-agents/providers/openai/openai_assistants_with_session.py delete mode 100644 python/samples/02-agents/providers/openai/openai_responses_client_basic.py diff --git a/python/README.md b/python/README.md index f9350a08a4..32462ee0a2 100644 --- a/python/README.md +++ b/python/README.md @@ -57,6 +57,16 @@ FOUNDRY_PROJECT_ENDPOINT=... FOUNDRY_MODEL=... ``` +For the generic OpenAI clients (`OpenAIChatClient` and `OpenAIChatCompletionClient`), configuration +resolves in this order: + +1. Explicit Azure inputs such as `credential`, `azure_endpoint`, or `api_version` +2. `OPENAI_API_KEY` / explicit OpenAI API-key parameters +3. Azure environment fallback such as `AZURE_OPENAI_ENDPOINT` and `AZURE_OPENAI_API_KEY` + +This means mixed shells default to OpenAI when `OPENAI_API_KEY` is present. To force Azure routing, +pass an explicit Azure input such as `credential=AzureCliCredential()`. + You can also override environment variables by explicitly passing configuration parameters to the chat client constructor: ```python diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_deprecated_azure_openai.py b/python/packages/azure-ai/agent_framework_azure_ai/_deprecated_azure_openai.py index 8370412394..f222e4792c 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_deprecated_azure_openai.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_deprecated_azure_openai.py @@ -11,8 +11,10 @@ import json import logging +import os import sys from collections.abc import Mapping, Sequence +from contextlib import contextmanager from copy import copy from typing import TYPE_CHECKING, Any, ClassVar, Final, Generic, cast from urllib.parse import urljoin, urlparse @@ -109,6 +111,39 @@ def _apply_azure_defaults( settings["token_endpoint"] = default_token_endpoint +@contextmanager +def _prefer_single_azure_endpoint_env(*, endpoint: str | None, base_url: str | None) -> Any: + """Temporarily expose only the Azure endpoint setting that raw OpenAI clients accept. + + The deprecated Azure wrappers have historically tolerated both + ``AZURE_OPENAI_BASE_URL`` and ``AZURE_OPENAI_ENDPOINT`` being present and prefer + ``base_url`` when both are available. The raw OpenAI constructors now validate + that exactly one is set, so we temporarily hide the unused env var while + delegating to those constructors. + """ + original_base_url = os.environ.get("AZURE_OPENAI_BASE_URL") + original_endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT") + + try: + if base_url: + os.environ["AZURE_OPENAI_BASE_URL"] = str(base_url) + os.environ.pop("AZURE_OPENAI_ENDPOINT", None) + elif endpoint: + os.environ["AZURE_OPENAI_ENDPOINT"] = str(endpoint) + os.environ.pop("AZURE_OPENAI_BASE_URL", None) + yield + finally: + if original_base_url is None: + os.environ.pop("AZURE_OPENAI_BASE_URL", None) + else: + os.environ["AZURE_OPENAI_BASE_URL"] = original_base_url + + if original_endpoint is None: + os.environ.pop("AZURE_OPENAI_ENDPOINT", None) + else: + os.environ["AZURE_OPENAI_ENDPOINT"] = original_endpoint + + # endregion @@ -315,6 +350,8 @@ def __init__( "or 'AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME' environment variable." ) + endpoint_value = azure_openai_settings.get("endpoint") + client_base_url = azure_openai_settings.get("base_url") if not async_client: # Create the Azure OpenAI client directly merged_headers = dict(copy(default_headers)) if default_headers else {} @@ -332,9 +369,7 @@ def __init__( if not api_key_secret and not ad_token_provider: raise ValueError("Please provide either api_key, credential, or a client.") - client_endpoint = azure_openai_settings.get("endpoint") - client_base_url = azure_openai_settings.get("base_url") - if not client_endpoint and not client_base_url: + if not endpoint_value and not client_base_url: raise ValueError("Please provide an endpoint or a base_url") client_args: dict[str, Any] = {"default_headers": merged_headers} @@ -346,8 +381,8 @@ def __init__( client_args["api_key"] = api_key_secret.get_secret_value() if client_base_url: client_args["base_url"] = str(client_base_url) - if client_endpoint and not client_base_url: - client_args["azure_endpoint"] = str(client_endpoint) + if endpoint_value and not client_base_url: + client_args["azure_endpoint"] = str(endpoint_value) if responses_deployment_name: client_args["azure_deployment"] = responses_deployment_name if "websocket_base_url" in kwargs: @@ -360,16 +395,17 @@ def __init__( self.api_version = azure_openai_settings.get("api_version") or "" self.deployment_name = responses_deployment_name - super().__init__( - async_client=async_client, - model=responses_deployment_name, - api_version=azure_openai_settings.get("api_version"), - instruction_role=instruction_role, - default_headers=default_headers, - middleware=middleware, # type: ignore[arg-type] - function_invocation_configuration=function_invocation_configuration, - **kwargs, - ) + with _prefer_single_azure_endpoint_env(endpoint=endpoint_value, base_url=client_base_url): + super().__init__( + async_client=async_client, + model=responses_deployment_name, + api_version=azure_openai_settings.get("api_version"), + instruction_role=instruction_role, + default_headers=default_headers, + middleware=middleware, # type: ignore[arg-type] + function_invocation_configuration=function_invocation_configuration, + **kwargs, + ) @staticmethod def _create_client_from_project( @@ -530,6 +566,8 @@ def __init__( "or 'AZURE_OPENAI_CHAT_DEPLOYMENT_NAME' environment variable." ) + endpoint_value = azure_openai_settings.get("endpoint") + base_url_value = azure_openai_settings.get("base_url") if not async_client: # Create the Azure OpenAI client directly merged_headers = dict(copy(default_headers)) if default_headers else {} @@ -547,8 +585,6 @@ def __init__( if not api_key_secret and not ad_token_provider: raise ValueError("Please provide either api_key, credential, or a client.") - endpoint_value = azure_openai_settings.get("endpoint") - base_url_value = azure_openai_settings.get("base_url") if not endpoint_value and not base_url_value: raise ValueError("Please provide an endpoint or a base_url") @@ -573,16 +609,17 @@ def __init__( self.api_version = azure_openai_settings.get("api_version") or "" self.deployment_name = chat_deployment_name - super().__init__( - async_client=async_client, - model=chat_deployment_name, - api_version=azure_openai_settings.get("api_version"), - instruction_role=instruction_role, - default_headers=default_headers, - additional_properties=additional_properties, - middleware=middleware, # type: ignore[arg-type] - function_invocation_configuration=function_invocation_configuration, - ) + with _prefer_single_azure_endpoint_env(endpoint=endpoint_value, base_url=base_url_value): + super().__init__( + async_client=async_client, + model=chat_deployment_name, + api_version=azure_openai_settings.get("api_version"), + instruction_role=instruction_role, + default_headers=default_headers, + additional_properties=additional_properties, + middleware=middleware, # type: ignore[arg-type] + function_invocation_configuration=function_invocation_configuration, + ) @override def _parse_text_from_openai(self, choice: Choice | ChunkChoice) -> Content | None: diff --git a/python/packages/openai/AGENTS.md b/python/packages/openai/AGENTS.md index d31506cf5d..2c92e1d18b 100644 --- a/python/packages/openai/AGENTS.md +++ b/python/packages/openai/AGENTS.md @@ -27,6 +27,10 @@ agent_framework_openai/ All clients follow the Raw + Full-Featured pattern (e.g., `RawOpenAIChatClient` + `OpenAIChatClient`). +The generic OpenAI chat clients support both OpenAI and Azure OpenAI routing. Precedence is: +explicit Azure inputs (`credential`, `azure_endpoint`, `api_version`) → OpenAI API key +(`OPENAI_API_KEY`) → Azure environment fallback (`AZURE_OPENAI_*`). + ## Dependencies - `agent-framework-core` — core abstractions diff --git a/python/packages/openai/README.md b/python/packages/openai/README.md index 6ed4d20c03..d9cf09dde9 100644 --- a/python/packages/openai/README.md +++ b/python/packages/openai/README.md @@ -15,3 +15,7 @@ from agent_framework.openai import OpenAIChatClient client = OpenAIChatClient(model_id="gpt-4o") ``` + +When both OpenAI and Azure environment variables are present, the generic OpenAI clients prefer +OpenAI whenever `OPENAI_API_KEY` is configured. To force Azure routing, pass an explicit Azure input +such as `credential`, `azure_endpoint`, or `api_version`. diff --git a/python/packages/openai/agent_framework_openai/_chat_client.py b/python/packages/openai/agent_framework_openai/_chat_client.py index 86af86895e..6e4f6eb579 100644 --- a/python/packages/openai/agent_framework_openai/_chat_client.py +++ b/python/packages/openai/agent_framework_openai/_chat_client.py @@ -14,7 +14,6 @@ MutableMapping, Sequence, ) -from copy import copy from datetime import datetime, timezone from itertools import chain from typing import ( @@ -28,12 +27,11 @@ cast, overload, ) -from urllib.parse import urljoin, urlparse from agent_framework._clients import BaseChatClient -from agent_framework._middleware import ChatMiddlewareLayer +from agent_framework._middleware import ChatAndFunctionMiddlewareTypes, ChatMiddlewareLayer from agent_framework._settings import SecretString -from agent_framework._telemetry import APP_INFO, USER_AGENT_KEY, prepend_agent_framework_to_user_agent +from agent_framework._telemetry import USER_AGENT_KEY from agent_framework._tools import ( SHELL_TOOL_KIND_VALUE, FunctionInvocationConfiguration, @@ -87,8 +85,7 @@ from ._exceptions import OpenAIContentFilterException from ._shared import ( - DEFAULT_AZURE_OPENAI_RESPONSES_API_VERSION, - get_api_key, + AzureTokenProvider, load_openai_service_settings, maybe_append_azure_endpoint_guidance, ) @@ -107,14 +104,15 @@ from typing_extensions import TypedDict # type: ignore # pragma: no cover if TYPE_CHECKING: - from agent_framework._middleware import ( - ChatMiddleware, - ChatMiddlewareCallable, - FunctionMiddleware, - FunctionMiddlewareCallable, - ) + from azure.core.credentials import TokenCredential + from azure.core.credentials_async import AsyncTokenCredential + + AzureCredentialTypes = TokenCredential | AsyncTokenCredential logger = logging.getLogger("agent_framework.openai") + +DEFAULT_AZURE_OPENAI_RESPONSES_API_VERSION = "preview" + OPENAI_SHELL_ENVIRONMENT_KEY = "openai.responses.shell.environment" OPENAI_SHELL_OUTPUT_TYPE_KEY = "openai.responses.shell.output_type" OPENAI_LOCAL_SHELL_CALL_ITEM_ID_KEY = "openai.responses.local_shell.call_item_id" @@ -272,8 +270,8 @@ class RawOpenAIChatClient( # type: ignore[misc] @overload def __init__( self, - *, model: str | None = None, + *, api_key: str | SecretString | Callable[[], str | Awaitable[str]] | None = None, org_id: str | None = None, base_url: str | None = None, @@ -282,31 +280,45 @@ def __init__( instruction_role: str | None = None, env_file_path: str | None = None, env_file_encoding: str | None = None, - ) -> None: ... + ) -> None: + """Initialize a raw OpenAI Responses client with OpenAI-only routing. + + This overload describes the OpenAI shape. Explicit keyword arguments are used first, + then ``OPENAI_*`` values from ``env_file_path`` or the process environment. + """ + ... @overload def __init__( self, - *, model: str | None = None, - api_key: str | SecretString | Callable[[], str | Awaitable[str]] | None = None, - org_id: str | None = None, - base_url: str | None = None, + *, azure_endpoint: str, + credential: AzureCredentialTypes | AzureTokenProvider | None = None, api_version: str | None = None, + api_key: str | SecretString | Callable[[], str | Awaitable[str]] | None = None, + base_url: str | None = None, default_headers: Mapping[str, str] | None = None, async_client: AsyncAzureOpenAI | AsyncOpenAI | None = None, instruction_role: str | None = None, env_file_path: str | None = None, env_file_encoding: str | None = None, - ) -> None: ... + ) -> None: + """Initialize a raw OpenAI Responses client with Azure routing. + + This overload describes the Azure shape. Explicit Azure inputs force Azure routing, + and missing Azure values fall back to ``AZURE_OPENAI_*`` values from ``env_file_path`` + or the process environment. + """ + ... def __init__( self, - *, model: str | None = None, + *, model_id: str | None = None, api_key: str | SecretString | Callable[[], str | Awaitable[str]] | None = None, + credential: AzureCredentialTypes | AzureTokenProvider | None = None, org_id: str | None = None, base_url: str | None = None, azure_endpoint: str | None = None, @@ -321,26 +333,47 @@ def __init__( """Initialize a raw OpenAI Responses client. Keyword Args: - model: OpenAI model name. + model: Model identifier to use for the request. When not provided, the constructor + reads ``OPENAI_MODEL`` for OpenAI routing or ``AZURE_OPENAI_DEPLOYMENT_NAME`` + for Azure routing. model_id: Deprecated alias for ``model``. - api_key: OpenAI API key, SecretString, or callable returning a key. - org_id: OpenAI organization ID. - base_url: Custom API base URL. - azure_endpoint: Azure OpenAI endpoint. When provided, the client uses - ``AsyncAzureOpenAI`` instead of ``AsyncOpenAI``. The value should be the - resource endpoint and should not end with ``/openai/v1``. For Azure OpenAI - key auth, either pass the resource endpoint without that suffix to - ``azure_endpoint`` or pass the full ``.../openai/v1`` URL to ``base_url``. - Can also be set via ``AZURE_OPENAI_ENDPOINT`` when no ``OPENAI_BASE_URL`` - is configured. - api_version: Azure OpenAI API version. Can also be set via - ``AZURE_OPENAI_API_VERSION``. + api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. + For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + auth. A callable token provider is also accepted for backwards compatibility, + but ``credential`` is the preferred Azure auth surface. + credential: Azure credential or token provider for Azure OpenAI auth. Passing this + is an explicit Azure signal, even when ``OPENAI_API_KEY`` is also configured. + Credential objects require the optional ``azure-identity`` package. + org_id: OpenAI organization ID. Used only for OpenAI routing and resolved from + ``OPENAI_ORG_ID`` when not provided. + base_url: Base URL override. For OpenAI routing this maps to ``OPENAI_BASE_URL``. + For Azure routing this may be used instead of ``azure_endpoint`` when you want + to pass the full ``.../openai/v1`` base URL directly. + azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing + falls back to ``AZURE_OPENAI_ENDPOINT``. + api_version: Azure API version. When not provided explicitly, Azure routing falls + back to ``AZURE_OPENAI_API_VERSION`` and then the Responses default. default_headers: Additional HTTP headers. - async_client: Pre-configured AsyncOpenAI client (skips client creation). - instruction_role: Role for instruction messages (e.g. ``"system"``). - env_file_path: Path to .env file for settings. - env_file_encoding: Encoding for .env file. + async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on + Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI and bypasses env lookup. + instruction_role: Role for instruction messages (for example ``"system"``). + env_file_path: Optional ``.env`` file that is checked before process environment + variables. The same file is used for both ``OPENAI_*`` and ``AZURE_OPENAI_*`` + lookups. + env_file_encoding: Encoding for the ``.env`` file. kwargs: Additional keyword arguments forwarded to ``BaseChatClient``. + + Notes: + Environment resolution and routing precedence are: + + 1. Explicit Azure inputs (``azure_endpoint``, ``api_version``, or ``credential``) + 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` + 3. Azure environment fallback + + OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and + ``OPENAI_BASE_URL``. Azure routing reads ``AZURE_OPENAI_ENDPOINT``, + ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, + ``AZURE_OPENAI_DEPLOYMENT_NAME``, and ``AZURE_OPENAI_API_VERSION``. """ if model_id is not None and model is None: import warnings @@ -348,98 +381,36 @@ def __init__( warnings.warn("model_id is deprecated, use model instead", DeprecationWarning, stacklevel=2) model = model_id - openai_settings: dict[str, Any] = {} - use_azure_client = isinstance(async_client, AsyncAzureOpenAI) - if not async_client: - resolved_settings, use_azure_client = load_openai_service_settings( - model=model, - api_key=api_key, - org_id=org_id, - base_url=base_url, - azure_endpoint=azure_endpoint, - api_version=api_version, - env_file_path=env_file_path, - env_file_encoding=env_file_encoding, - azure_model_env_vars=("AZURE_OPENAI_DEPLOYMENT_NAME",), - default_azure_api_version=DEFAULT_AZURE_OPENAI_RESPONSES_API_VERSION, - ) - openai_settings = dict(resolved_settings) - - api_key_value = openai_settings.get("api_key") - if not api_key_value: - raise ValueError( - "OpenAI API key is required. Set via the 'api_key' parameter or the " - "'OPENAI_API_KEY' or 'AZURE_OPENAI_API_KEY' environment variables." - ) - resolved_model = openai_settings.get("model") or model - if not resolved_model: - raise ValueError( - "OpenAI model is required. Set via the 'model' parameter or the " - "'OPENAI_MODEL' or 'AZURE_OPENAI_DEPLOYMENT_NAME' environment variables." - ) - model = resolved_model - - resolved_api_key = get_api_key(api_key_value) - - # Merge APP_INFO into the headers - merged_headers = dict(copy(default_headers)) if default_headers else {} - if APP_INFO: - merged_headers.update(APP_INFO) - merged_headers = prepend_agent_framework_to_user_agent(merged_headers) - - client_args: dict[str, Any] = {"api_key": resolved_api_key, "default_headers": merged_headers} - if use_azure_client: - endpoint_value = openai_settings.get("azure_endpoint") - if ( - not openai_settings.get("base_url") - and endpoint_value - and (hostname := urlparse(str(endpoint_value)).hostname) - and hostname.endswith(".openai.azure.com") - ): - openai_settings["base_url"] = urljoin(str(endpoint_value), "/openai/v1/") - - client_args.pop("api_key") - if resolved_api_version := openai_settings.get("api_version"): - client_args["api_version"] = resolved_api_version - if resolved_base_url := openai_settings.get("base_url"): - client_args["base_url"] = resolved_base_url - elif resolved_azure_endpoint := openai_settings.get("azure_endpoint"): - client_args["azure_endpoint"] = resolved_azure_endpoint - if callable(resolved_api_key): - client_args["azure_ad_token_provider"] = resolved_api_key - else: - client_args["api_key"] = resolved_api_key - client_args["azure_deployment"] = resolved_model - async_client = AsyncAzureOpenAI(**client_args) - else: - if resolved_org_id := openai_settings.get("org_id"): - client_args["organization"] = resolved_org_id - if resolved_base_url := openai_settings.get("base_url"): - client_args["base_url"] = resolved_base_url - - async_client = AsyncOpenAI(**client_args) + settings, client, use_azure_client = load_openai_service_settings( + model=model, + api_key=api_key, + credential=credential, + org_id=org_id, + base_url=base_url, + endpoint=azure_endpoint, + api_version=api_version, + default_azure_api_version=DEFAULT_AZURE_OPENAI_RESPONSES_API_VERSION, + default_headers=default_headers, + client=async_client, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) - self.client = async_client - self.model: str | None = model.strip() if model else None + self.client = client + self.model: str = settings.get("model") or settings.get("deployment_name") or "" # Store configuration for serialization - resolved_base_url = openai_settings.get("base_url") or base_url - resolved_azure_endpoint = openai_settings.get("azure_endpoint") or azure_endpoint - resolved_api_version = openai_settings.get("api_version") or api_version - self.org_id = openai_settings.get("org_id") or org_id - self.base_url = str(resolved_base_url) if resolved_base_url else None - self.azure_endpoint = str(resolved_azure_endpoint) if resolved_azure_endpoint else None - self.api_version = str(resolved_api_version) if use_azure_client and resolved_api_version else None + self.org_id = settings.get("org_id") + self.base_url = settings.get("base_url") + self.azure_endpoint = settings.get("endpoint") + self.api_version = settings.get("api_version") if default_headers: self.default_headers: dict[str, Any] | None = { k: v for k, v in default_headers.items() if k != USER_AGENT_KEY } else: self.default_headers = None - - if instruction_role is not None: - self.instruction_role = instruction_role - + self.instruction_role = instruction_role if use_azure_client: self.OTEL_PROVIDER_NAME = "azure.ai.openai" # type: ignore[misc] @@ -2452,8 +2423,8 @@ class OpenAIChatClient( # type: ignore[misc] @overload def __init__( self, - *, model: str | None = None, + *, api_key: str | Callable[[], str | Awaitable[str]] | None = None, org_id: str | None = None, base_url: str | None = None, @@ -2462,38 +2433,48 @@ def __init__( instruction_role: str | None = None, env_file_path: str | None = None, env_file_encoding: str | None = None, - middleware: ( - Sequence[ChatMiddleware | ChatMiddlewareCallable | FunctionMiddleware | FunctionMiddlewareCallable] | None - ) = None, + middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, function_invocation_configuration: FunctionInvocationConfiguration | None = None, - ) -> None: ... + ) -> None: + """Initialize an OpenAI Responses client with OpenAI-only routing. + + This overload describes the OpenAI shape. Explicit keyword arguments are used first, + then ``OPENAI_*`` values from ``env_file_path`` or the process environment. + """ + ... @overload def __init__( self, - *, model: str | None = None, + *, + azure_endpoint: str | None = None, + credential: AzureCredentialTypes | AzureTokenProvider | None = None, + api_version: str | None = None, api_key: str | Callable[[], str | Awaitable[str]] | None = None, - org_id: str | None = None, base_url: str | None = None, - azure_endpoint: str, - api_version: str | None = None, default_headers: Mapping[str, str] | None = None, async_client: AsyncAzureOpenAI | AsyncOpenAI | None = None, instruction_role: str | None = None, env_file_path: str | None = None, env_file_encoding: str | None = None, - middleware: ( - Sequence[ChatMiddleware | ChatMiddlewareCallable | FunctionMiddleware | FunctionMiddlewareCallable] | None - ) = None, + middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, function_invocation_configuration: FunctionInvocationConfiguration | None = None, - ) -> None: ... + ) -> None: + """Initialize an OpenAI Responses client with Azure routing. + + This overload describes the Azure shape. Explicit Azure inputs force Azure routing, + and missing Azure values fall back to ``AZURE_OPENAI_*`` values from ``env_file_path`` + or the process environment. + """ + ... def __init__( self, - *, model: str | None = None, + *, api_key: str | Callable[[], str | Awaitable[str]] | None = None, + credential: AzureCredentialTypes | AzureTokenProvider | None = None, org_id: str | None = None, base_url: str | None = None, azure_endpoint: str | None = None, @@ -2503,43 +2484,56 @@ def __init__( instruction_role: str | None = None, env_file_path: str | None = None, env_file_encoding: str | None = None, - middleware: ( - Sequence[ChatMiddleware | ChatMiddlewareCallable | FunctionMiddleware | FunctionMiddlewareCallable] | None - ) = None, + middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, function_invocation_configuration: FunctionInvocationConfiguration | None = None, **kwargs: Any, ) -> None: """Initialize an OpenAI Responses client. Keyword Args: - model: OpenAI model name, see https://platform.openai.com/docs/models. - Can also be set via environment variable OPENAI_MODEL. - api_key: The API key to use. If provided will override the env vars or .env file value. - Can also be set via environment variable OPENAI_API_KEY. - org_id: The org ID to use. If provided will override the env vars or .env file value. - Can also be set via environment variable OPENAI_ORG_ID. - base_url: The base URL to use. If provided will override the standard value. - Can also be set via environment variable OPENAI_BASE_URL. - azure_endpoint: Azure OpenAI endpoint. When provided, the client uses - ``AsyncAzureOpenAI``. The value should be the Azure resource endpoint and - should not end with ``/openai/v1``. For Azure OpenAI key auth, either pass - the resource endpoint without that suffix to ``azure_endpoint`` or pass the - full ``.../openai/v1`` URL to ``base_url`` instead. Can also be discovered - from ``AZURE_OPENAI_ENDPOINT`` when no OpenAI base URL is configured. - api_version: Azure OpenAI API version. Can also be set via - ``AZURE_OPENAI_API_VERSION``. - default_headers: The default headers mapping of string keys to - string values for HTTP requests. - async_client: An existing client to use. - instruction_role: The role to use for 'instruction' messages, for example, - "system" or "developer". If not provided, the default is "system". - env_file_path: Use the environment settings file as a fallback - to environment variables. - env_file_encoding: The encoding of the environment settings file. + model: Model identifier to use for the request. When not provided, the constructor + reads ``OPENAI_MODEL`` for OpenAI routing or ``AZURE_OPENAI_DEPLOYMENT_NAME`` + for Azure routing. + api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. + For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + auth. A callable token provider is also accepted for backwards compatibility, + but ``credential`` is the preferred Azure auth surface. + credential: Azure credential or token provider for Azure OpenAI auth. Passing this + is an explicit Azure signal, even when ``OPENAI_API_KEY`` is also configured. + Credential objects require the optional ``azure-identity`` package. + org_id: OpenAI organization ID. Used only for OpenAI routing and resolved from + ``OPENAI_ORG_ID`` when not provided. + base_url: Base URL override. For OpenAI routing this maps to ``OPENAI_BASE_URL``. + For Azure routing this may be used instead of ``azure_endpoint`` when you want + to pass the full ``.../openai/v1`` base URL directly. + azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing + falls back to ``AZURE_OPENAI_ENDPOINT``. + api_version: Azure API version. When not provided explicitly, Azure routing falls + back to ``AZURE_OPENAI_API_VERSION`` and then the Responses default. + default_headers: Default HTTP headers that are merged into each request. + async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on + Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI and bypasses env lookup. + instruction_role: Role to use for instruction messages (for example ``"system"``). + env_file_path: Optional ``.env`` file that is checked before process environment + variables. The same file is used for both ``OPENAI_*`` and ``AZURE_OPENAI_*`` + lookups. + env_file_encoding: Encoding for the ``.env`` file. middleware: Optional middleware to apply to the client. function_invocation_configuration: Optional function invocation configuration override. kwargs: Other keyword parameters. + Notes: + Environment resolution and routing precedence are: + + 1. Explicit Azure inputs (``azure_endpoint``, ``api_version``, or ``credential``) + 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` + 3. Azure environment fallback + + OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and + ``OPENAI_BASE_URL``. Azure routing reads ``AZURE_OPENAI_ENDPOINT``, + ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, + ``AZURE_OPENAI_DEPLOYMENT_NAME``, and ``AZURE_OPENAI_API_VERSION``. + Examples: .. code-block:: python @@ -2571,6 +2565,7 @@ class MyOptions(OpenAIChatOptions, total=False): super().__init__( model=model, api_key=api_key, + credential=credential, org_id=org_id, base_url=base_url, azure_endpoint=azure_endpoint, diff --git a/python/packages/openai/agent_framework_openai/_chat_completion_client.py b/python/packages/openai/agent_framework_openai/_chat_completion_client.py index aa78079dd2..85f0167c45 100644 --- a/python/packages/openai/agent_framework_openai/_chat_completion_client.py +++ b/python/packages/openai/agent_framework_openai/_chat_completion_client.py @@ -13,16 +13,15 @@ MutableMapping, Sequence, ) -from copy import copy from datetime import datetime, timezone from itertools import chain -from typing import Any, ClassVar, Generic, Literal, cast, overload +from typing import TYPE_CHECKING, Any, ClassVar, Generic, Literal, cast, overload from agent_framework._clients import BaseChatClient from agent_framework._docstrings import apply_layered_docstring from agent_framework._middleware import ChatAndFunctionMiddlewareTypes, ChatMiddlewareLayer from agent_framework._settings import SecretString -from agent_framework._telemetry import APP_INFO, USER_AGENT_KEY, prepend_agent_framework_to_user_agent +from agent_framework._telemetry import USER_AGENT_KEY from agent_framework._tools import ( FunctionInvocationConfiguration, FunctionInvocationLayer, @@ -59,8 +58,7 @@ from ._exceptions import OpenAIContentFilterException from ._shared import ( - DEFAULT_AZURE_OPENAI_CHAT_COMPLETION_API_VERSION, - get_api_key, + AzureTokenProvider, load_openai_service_settings, maybe_append_azure_endpoint_guidance, ) @@ -78,8 +76,16 @@ else: from typing_extensions import TypedDict # type: ignore # pragma: no cover +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + from azure.core.credentials_async import AsyncTokenCredential + + AzureCredentialTypes = TokenCredential | AsyncTokenCredential + logger = logging.getLogger("agent_framework.openai") +DEFAULT_AZURE_OPENAI_CHAT_COMPLETION_API_VERSION = "2024-10-21" + ResponseModelBoundT = TypeVar("ResponseModelBoundT", bound=BaseModel) ResponseModelT = TypeVar("ResponseModelT", bound=BaseModel | None, default=None) @@ -179,8 +185,8 @@ class RawOpenAIChatCompletionClient( # type: ignore[misc] @overload def __init__( self, - *, model: str | None = None, + *, api_key: str | SecretString | Callable[[], str | Awaitable[str]] | None = None, org_id: str | None = None, base_url: str | None = None, @@ -189,31 +195,45 @@ def __init__( instruction_role: str | None = None, env_file_path: str | None = None, env_file_encoding: str | None = None, - ) -> None: ... + ) -> None: + """Initialize a raw OpenAI Chat Completions client with OpenAI-only routing. + + This overload describes the OpenAI shape. Explicit keyword arguments are used first, + then ``OPENAI_*`` values from ``env_file_path`` or the process environment. + """ + ... @overload def __init__( self, - *, model: str | None = None, + *, + azure_endpoint: str | None = None, + credential: AzureCredentialTypes | AzureTokenProvider | None = None, + api_version: str | None = None, api_key: str | SecretString | Callable[[], str | Awaitable[str]] | None = None, - org_id: str | None = None, base_url: str | None = None, - azure_endpoint: str, - api_version: str | None = None, default_headers: Mapping[str, str] | None = None, async_client: AsyncAzureOpenAI | AsyncOpenAI | None = None, instruction_role: str | None = None, env_file_path: str | None = None, env_file_encoding: str | None = None, - ) -> None: ... + ) -> None: + """Initialize a raw OpenAI Chat Completions client with Azure routing. + + This overload describes the Azure shape. Explicit Azure inputs force Azure routing, + and missing Azure values fall back to ``AZURE_OPENAI_*`` values from ``env_file_path`` + or the process environment. + """ + ... def __init__( self, - *, model: str | None = None, + *, model_id: str | None = None, api_key: str | SecretString | Callable[[], str | Awaitable[str]] | None = None, + credential: AzureCredentialTypes | AzureTokenProvider | None = None, org_id: str | None = None, base_url: str | None = None, azure_endpoint: str | None = None, @@ -228,26 +248,47 @@ def __init__( """Initialize a raw OpenAI Chat completion client. Keyword Args: - model: OpenAI model name. + model: Model identifier to use for the request. When not provided, the constructor + reads ``OPENAI_MODEL`` for OpenAI routing or ``AZURE_OPENAI_DEPLOYMENT_NAME`` + for Azure routing. model_id: Deprecated alias for ``model``. - api_key: OpenAI API key, SecretString, or callable returning a key. - org_id: OpenAI organization ID. - base_url: Custom API base URL. - azure_endpoint: Azure OpenAI endpoint. When provided, the client uses - ``AsyncAzureOpenAI`` instead of ``AsyncOpenAI``. The value should be the - resource endpoint and should not end with ``/openai/v1``. For Azure OpenAI - key auth, either pass the resource endpoint without that suffix to - ``azure_endpoint`` or pass the full ``.../openai/v1`` URL to ``base_url``. - Can also be set via ``AZURE_OPENAI_ENDPOINT`` when no ``OPENAI_BASE_URL`` - is configured. - api_version: Azure OpenAI API version. Can also be set via - ``AZURE_OPENAI_API_VERSION``. + api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. + For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + auth. A callable token provider is also accepted for backwards compatibility, + but ``credential`` is the preferred Azure auth surface. + credential: Azure credential or token provider for Azure OpenAI auth. Passing this + is an explicit Azure signal, even when ``OPENAI_API_KEY`` is also configured. + Credential objects require the optional ``azure-identity`` package. + org_id: OpenAI organization ID. Used only for OpenAI routing and resolved from + ``OPENAI_ORG_ID`` when not provided. + base_url: Base URL override. For OpenAI routing this maps to ``OPENAI_BASE_URL``. + For Azure routing this may be used instead of ``azure_endpoint`` when you want + to pass the full ``.../openai/v1`` base URL directly. + azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing + falls back to ``AZURE_OPENAI_ENDPOINT``. + api_version: Azure API version. When not provided explicitly, Azure routing falls + back to ``AZURE_OPENAI_API_VERSION`` and then the Chat Completions default. default_headers: Additional HTTP headers. - async_client: Pre-configured AsyncOpenAI client (skips client creation). - instruction_role: Role for instruction messages (e.g. ``"system"``). - env_file_path: Path to .env file for settings. - env_file_encoding: Encoding for .env file. + async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on + Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI and bypasses env lookup. + instruction_role: Role for instruction messages (for example ``"system"``). + env_file_path: Optional ``.env`` file that is checked before process environment + variables. The same file is used for both ``OPENAI_*`` and ``AZURE_OPENAI_*`` + lookups. + env_file_encoding: Encoding for the ``.env`` file. kwargs: Additional keyword arguments forwarded to ``BaseChatClient``. + + Notes: + Environment resolution and routing precedence are: + + 1. Explicit Azure inputs (``azure_endpoint``, ``api_version``, or ``credential``) + 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` + 3. Azure environment fallback + + OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and + ``OPENAI_BASE_URL``. Azure routing reads ``AZURE_OPENAI_ENDPOINT``, + ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, + ``AZURE_OPENAI_DEPLOYMENT_NAME``, and ``AZURE_OPENAI_API_VERSION``. """ if model_id is not None and model is None: import warnings @@ -255,89 +296,36 @@ def __init__( warnings.warn("model_id is deprecated, use model instead", DeprecationWarning, stacklevel=2) model = model_id - openai_settings: dict[str, Any] = {} - use_azure_client = isinstance(async_client, AsyncAzureOpenAI) - if not async_client: - resolved_settings, use_azure_client = load_openai_service_settings( - model=model, - api_key=api_key, - org_id=org_id, - base_url=base_url, - azure_endpoint=azure_endpoint, - api_version=api_version, - env_file_path=env_file_path, - env_file_encoding=env_file_encoding, - azure_model_env_vars=("AZURE_OPENAI_DEPLOYMENT_NAME",), - default_azure_api_version=DEFAULT_AZURE_OPENAI_CHAT_COMPLETION_API_VERSION, - ) - openai_settings = dict(resolved_settings) - - api_key_value = openai_settings.get("api_key") - if not api_key_value: - raise ValueError( - "OpenAI API key is required. Set via the 'api_key' parameter or the " - "'OPENAI_API_KEY' or 'AZURE_OPENAI_API_KEY' environment variables." - ) - resolved_model = openai_settings.get("model") or model - if not resolved_model: - raise ValueError( - "OpenAI model is required. Set via the 'model' parameter or the " - "'OPENAI_MODEL' or 'AZURE_OPENAI_DEPLOYMENT_NAME' environment variables." - ) - model = resolved_model - - resolved_api_key = get_api_key(api_key_value) - - # Merge APP_INFO into the headers - merged_headers = dict(copy(default_headers)) if default_headers else {} - if APP_INFO: - merged_headers.update(APP_INFO) - merged_headers = prepend_agent_framework_to_user_agent(merged_headers) - - client_args: dict[str, Any] = {"api_key": resolved_api_key, "default_headers": merged_headers} - if use_azure_client: - client_args.pop("api_key") - if resolved_api_version := openai_settings.get("api_version"): - client_args["api_version"] = resolved_api_version - if resolved_base_url := openai_settings.get("base_url"): - client_args["base_url"] = resolved_base_url - elif resolved_azure_endpoint := openai_settings.get("azure_endpoint"): - client_args["azure_endpoint"] = resolved_azure_endpoint - if callable(resolved_api_key): - client_args["azure_ad_token_provider"] = resolved_api_key - else: - client_args["api_key"] = resolved_api_key - client_args["azure_deployment"] = resolved_model - async_client = AsyncAzureOpenAI(**client_args) - else: - if resolved_org_id := openai_settings.get("org_id"): - client_args["organization"] = resolved_org_id - if resolved_base_url := openai_settings.get("base_url"): - client_args["base_url"] = resolved_base_url - - async_client = AsyncOpenAI(**client_args) + settings, client, use_azure_client = load_openai_service_settings( + model=model, + api_key=api_key, + credential=credential, + org_id=org_id, + base_url=base_url, + endpoint=azure_endpoint, + api_version=api_version, + default_azure_api_version=DEFAULT_AZURE_OPENAI_CHAT_COMPLETION_API_VERSION, + default_headers=default_headers, + client=async_client, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) - self.client = async_client - self.model: str | None = model.strip() if model else None + self.client = client + self.model: str = settings.get("model") or settings.get("deployment_name") or "" # Store configuration for serialization - resolved_base_url = openai_settings.get("base_url") or base_url - resolved_azure_endpoint = openai_settings.get("azure_endpoint") or azure_endpoint - resolved_api_version = openai_settings.get("api_version") or api_version - self.org_id = openai_settings.get("org_id") or org_id - self.base_url = str(resolved_base_url) if resolved_base_url else None - self.azure_endpoint = str(resolved_azure_endpoint) if resolved_azure_endpoint else None - self.api_version = str(resolved_api_version) if use_azure_client and resolved_api_version else None + self.org_id = settings.get("org_id") + self.base_url = settings.get("base_url") + self.azure_endpoint = settings.get("endpoint") + self.api_version = settings.get("api_version") if default_headers: self.default_headers: dict[str, Any] | None = { k: v for k, v in default_headers.items() if k != USER_AGENT_KEY } else: self.default_headers = None - - if instruction_role is not None: - self.instruction_role = instruction_role - + self.instruction_role = instruction_role if use_azure_client: self.OTEL_PROVIDER_NAME = "azure.ai.openai" # type: ignore[misc] @@ -978,78 +966,47 @@ class OpenAIChatCompletionClient( # type: ignore[misc] OTEL_PROVIDER_NAME: ClassVar[str] = "openai" # type: ignore[reportIncompatibleVariableOverride, misc] @overload - def get_response( - self, - messages: Sequence[Message], - *, - stream: Literal[False] = ..., - options: ChatOptions[ResponseModelBoundT], - function_invocation_kwargs: Mapping[str, Any] | None = None, - client_kwargs: Mapping[str, Any] | None = None, - middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, - **kwargs: Any, - ) -> Awaitable[ChatResponse[ResponseModelBoundT]]: ... - - @overload - def get_response( + def __init__( self, - messages: Sequence[Message], + model: str | None = None, *, - stream: Literal[False] = ..., - options: OpenAIChatCompletionOptionsT | ChatOptions[None] | None = None, - function_invocation_kwargs: Mapping[str, Any] | None = None, - client_kwargs: Mapping[str, Any] | None = None, + api_key: str | Callable[[], str | Awaitable[str]] | None = None, + org_id: str | None = None, + base_url: str | None = None, + default_headers: Mapping[str, str] | None = None, + async_client: AsyncOpenAI | None = None, + instruction_role: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, - **kwargs: Any, - ) -> Awaitable[ChatResponse[Any]]: ... + function_invocation_configuration: FunctionInvocationConfiguration | None = None, + ) -> None: ... @overload - def get_response( - self, - messages: Sequence[Message], - *, - stream: Literal[True], - options: OpenAIChatCompletionOptionsT | ChatOptions[Any] | None = None, - function_invocation_kwargs: Mapping[str, Any] | None = None, - client_kwargs: Mapping[str, Any] | None = None, - middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, - **kwargs: Any, - ) -> ResponseStream[ChatResponseUpdate, ChatResponse[Any]]: ... - - @override - def get_response( + def __init__( self, - messages: Sequence[Message], + model: str | None = None, *, - stream: bool = False, - options: OpenAIChatCompletionOptionsT | ChatOptions[Any] | None = None, - function_invocation_kwargs: Mapping[str, Any] | None = None, - client_kwargs: Mapping[str, Any] | None = None, + azure_endpoint: str | None = None, + credential: AzureCredentialTypes | AzureTokenProvider | None = None, + api_version: str | None = None, + api_key: str | Callable[[], str | Awaitable[str]] | None = None, + base_url: str | None = None, + default_headers: Mapping[str, str] | None = None, + async_client: AsyncAzureOpenAI | AsyncOpenAI | None = None, + instruction_role: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, - **kwargs: Any, - ) -> Awaitable[ChatResponse[Any]] | ResponseStream[ChatResponseUpdate, ChatResponse[Any]]: - """Get a response from the OpenAI chat client with all standard layers enabled.""" - super_get_response = cast( - "Callable[..., Awaitable[ChatResponse[Any]] | ResponseStream[ChatResponseUpdate, ChatResponse[Any]]]", - super().get_response, # type: ignore[misc] - ) - effective_client_kwargs = dict(client_kwargs) if client_kwargs is not None else {} - if middleware is not None: - effective_client_kwargs["middleware"] = middleware - return super_get_response( # type: ignore[no-any-return] - messages=messages, - stream=stream, - options=options, - function_invocation_kwargs=function_invocation_kwargs, - client_kwargs=effective_client_kwargs, - **kwargs, - ) + function_invocation_configuration: FunctionInvocationConfiguration | None = None, + ) -> None: ... def __init__( self, - *, model: str | None = None, + *, api_key: str | Callable[[], str | Awaitable[str]] | None = None, + credential: AzureCredentialTypes | AzureTokenProvider | None = None, org_id: str | None = None, default_headers: Mapping[str, str] | None = None, async_client: AsyncOpenAI | None = None, @@ -1065,33 +1022,47 @@ def __init__( """Initialize an OpenAI Chat completion client. Keyword Args: - model: OpenAI model name, see https://platform.openai.com/docs/models. - Can also be set via environment variable OPENAI_MODEL. - api_key: The API key to use. If provided will override the env vars or .env file value. - Can also be set via environment variable OPENAI_API_KEY. - org_id: The org ID to use. If provided will override the env vars or .env file value. - Can also be set via environment variable OPENAI_ORG_ID. - default_headers: The default headers mapping of string keys to - string values for HTTP requests. - async_client: An existing client to use. - instruction_role: The role to use for 'instruction' messages, for example, - "system" or "developer". If not provided, the default is "system". - base_url: The base URL to use. If provided will override - the standard value for an OpenAI connector, the env vars or .env file value. - Can also be set via environment variable OPENAI_BASE_URL. - azure_endpoint: Azure OpenAI endpoint. When provided, the client uses - ``AsyncAzureOpenAI``. The value should be the Azure resource endpoint and - should not end with ``/openai/v1``. For Azure OpenAI key auth, either pass - the resource endpoint without that suffix to ``azure_endpoint`` or pass the - full ``.../openai/v1`` URL to ``base_url`` instead. Can also be discovered - from ``AZURE_OPENAI_ENDPOINT`` when no OpenAI base URL is configured. - api_version: Azure OpenAI API version. Can also be set via - ``AZURE_OPENAI_API_VERSION``. + model: Model identifier to use for the request. When not provided, the constructor + reads ``OPENAI_MODEL`` for OpenAI routing or ``AZURE_OPENAI_DEPLOYMENT_NAME`` + for Azure routing. + api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. + For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + auth. A callable token provider is also accepted for backwards compatibility, + but ``credential`` is the preferred Azure auth surface. + credential: Azure credential or token provider for Azure OpenAI auth. Passing this + is an explicit Azure signal, even when ``OPENAI_API_KEY`` is also configured. + Credential objects require the optional ``azure-identity`` package. + org_id: OpenAI organization ID. Used only for OpenAI routing and resolved from + ``OPENAI_ORG_ID`` when not provided. + default_headers: Default HTTP headers that are merged into each request. + async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on + Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI and bypasses env lookup. + instruction_role: Role to use for instruction messages (for example ``"system"``). + base_url: Base URL override. For OpenAI routing this maps to ``OPENAI_BASE_URL``. + For Azure routing this may be used instead of ``azure_endpoint`` when you want + to pass the full ``.../openai/v1`` base URL directly. + azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing + falls back to ``AZURE_OPENAI_ENDPOINT``. + api_version: Azure API version. When not provided explicitly, Azure routing falls + back to ``AZURE_OPENAI_API_VERSION`` and then the Chat Completions default. middleware: Optional sequence of ChatAndFunctionMiddlewareTypes to apply to requests. function_invocation_configuration: Optional configuration for function invocation support. - env_file_path: Use the environment settings file as a fallback - to environment variables. - env_file_encoding: The encoding of the environment settings file. + env_file_path: Optional ``.env`` file that is checked before process environment + variables. The same file is used for both ``OPENAI_*`` and ``AZURE_OPENAI_*`` + lookups. + env_file_encoding: Encoding for the ``.env`` file. + + Notes: + Environment resolution and routing precedence are: + + 1. Explicit Azure inputs (``azure_endpoint``, ``api_version``, or ``credential``) + 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` + 3. Azure environment fallback + + OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and + ``OPENAI_BASE_URL``. Azure routing reads ``AZURE_OPENAI_ENDPOINT``, + ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, + ``AZURE_OPENAI_DEPLOYMENT_NAME``, and ``AZURE_OPENAI_API_VERSION``. Examples: .. code-block:: python @@ -1124,6 +1095,7 @@ class MyOptions(OpenAIChatCompletionOptions, total=False): super().__init__( model=model, api_key=api_key, + credential=credential, org_id=org_id, base_url=base_url, azure_endpoint=azure_endpoint, @@ -1137,6 +1109,74 @@ class MyOptions(OpenAIChatCompletionOptions, total=False): function_invocation_configuration=function_invocation_configuration, ) + @overload + def get_response( + self, + messages: Sequence[Message], + *, + stream: Literal[False] = ..., + options: ChatOptions[ResponseModelBoundT], + function_invocation_kwargs: Mapping[str, Any] | None = None, + client_kwargs: Mapping[str, Any] | None = None, + middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, + **kwargs: Any, + ) -> Awaitable[ChatResponse[ResponseModelBoundT]]: ... + + @overload + def get_response( + self, + messages: Sequence[Message], + *, + stream: Literal[False] = ..., + options: OpenAIChatCompletionOptionsT | ChatOptions[None] | None = None, + function_invocation_kwargs: Mapping[str, Any] | None = None, + client_kwargs: Mapping[str, Any] | None = None, + middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, + **kwargs: Any, + ) -> Awaitable[ChatResponse[Any]]: ... + + @overload + def get_response( + self, + messages: Sequence[Message], + *, + stream: Literal[True], + options: OpenAIChatCompletionOptionsT | ChatOptions[Any] | None = None, + function_invocation_kwargs: Mapping[str, Any] | None = None, + client_kwargs: Mapping[str, Any] | None = None, + middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, + **kwargs: Any, + ) -> ResponseStream[ChatResponseUpdate, ChatResponse[Any]]: ... + + @override + def get_response( + self, + messages: Sequence[Message], + *, + stream: bool = False, + options: OpenAIChatCompletionOptionsT | ChatOptions[Any] | None = None, + function_invocation_kwargs: Mapping[str, Any] | None = None, + client_kwargs: Mapping[str, Any] | None = None, + middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, + **kwargs: Any, + ) -> Awaitable[ChatResponse[Any]] | ResponseStream[ChatResponseUpdate, ChatResponse[Any]]: + """Get a response from the OpenAI chat client with all standard layers enabled.""" + super_get_response = cast( + "Callable[..., Awaitable[ChatResponse[Any]] | ResponseStream[ChatResponseUpdate, ChatResponse[Any]]]", + super().get_response, # type: ignore[misc] + ) + effective_client_kwargs = dict(client_kwargs) if client_kwargs is not None else {} + if middleware is not None: + effective_client_kwargs["middleware"] = middleware + return super_get_response( # type: ignore[no-any-return] + messages=messages, + stream=stream, + options=options, + function_invocation_kwargs=function_invocation_kwargs, + client_kwargs=effective_client_kwargs, + **kwargs, + ) + def _apply_openai_chat_completion_client_docstrings() -> None: """Align OpenAI chat completion client docstrings with the raw implementation.""" diff --git a/python/packages/openai/agent_framework_openai/_shared.py b/python/packages/openai/agent_framework_openai/_shared.py index c3c280d950..8f1d64f503 100644 --- a/python/packages/openai/agent_framework_openai/_shared.py +++ b/python/packages/openai/agent_framework_openai/_shared.py @@ -7,15 +7,16 @@ import sys from collections.abc import Awaitable, Callable, Mapping, MutableMapping, Sequence from copy import copy -from typing import Any, ClassVar, Union, cast +from typing import TYPE_CHECKING, Any, ClassVar, Union, cast import openai from agent_framework._serialization import SerializationMixin from agent_framework._settings import SecretString, load_settings from agent_framework._telemetry import APP_INFO, USER_AGENT_KEY, prepend_agent_framework_to_user_agent from agent_framework._tools import FunctionTool -from dotenv import dotenv_values -from openai import AsyncOpenAI, AsyncStream, _legacy_response # type: ignore +from agent_framework.exceptions import SettingNotFoundError +from dotenv import get_key +from openai import AsyncAzureOpenAI, AsyncOpenAI, AsyncStream, _legacy_response # type: ignore from openai.types import Completion from openai.types.audio import Transcription from openai.types.chat import ChatCompletion, ChatCompletionChunk @@ -24,10 +25,21 @@ from openai.types.responses.response_stream_event import ResponseStreamEvent from packaging.version import parse +if sys.version_info >= (3, 11): + from typing import TypedDict # type: ignore # pragma: no cover +else: + from typing_extensions import TypedDict # type: ignore # pragma: no cover + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + from azure.core.credentials_async import AsyncTokenCredential + + AzureCredentialTypes = TokenCredential | AsyncTokenCredential + + logger: logging.Logger = logging.getLogger("agent_framework.openai") -DEFAULT_AZURE_OPENAI_CHAT_COMPLETION_API_VERSION = "2024-10-21" -DEFAULT_AZURE_OPENAI_RESPONSES_API_VERSION = "preview" +AZURE_OPENAI_TOKEN_SCOPE = "https://cognitiveservices.azure.com/.default" # noqa: S105 RESPONSE_TYPE = Union[ @@ -43,12 +55,7 @@ _legacy_response.HttpxBinaryResponseContent, ] -OPTION_TYPE = dict[str, Any] - -if sys.version_info >= (3, 11): - from typing import TypedDict # type: ignore # pragma: no cover -else: - from typing_extensions import TypedDict # type: ignore # pragma: no cover +AzureTokenProvider = Callable[[], str | Awaitable[str]] def _check_openai_version_for_callable_api_key() -> None: @@ -110,123 +117,161 @@ class OpenAISettings(TypedDict, total=False): settings = load_settings(OpenAISettings, env_prefix="OPENAI_", env_file_path="path/to/.env") """ - api_key: SecretString | Callable[[], str | Awaitable[str]] | None + api_key: SecretString | None base_url: str | None org_id: str | None model: str | None embedding_model: str | None - azure_endpoint: str | None - api_version: str | None - -def _load_dotenv_values(*, env_file_path: str | None, env_file_encoding: str | None) -> dict[str, str]: - """Load dotenv values for non-standard environment variable aliases.""" - if env_file_path is None or not os.path.exists(env_file_path): - return {} - raw_dotenv_values = dotenv_values(dotenv_path=env_file_path, encoding=env_file_encoding or "utf-8") - return {key: value for key, value in raw_dotenv_values.items() if value is not None} +class AzureOpenAISettings(TypedDict, total=False): + """Azure OpenAI environment settings.""" - -def _get_setting_from_alias( - name: str, - *, - dotenv_values_by_name: Mapping[str, str], -) -> str | None: - """Resolve a setting from an explicit env-var alias.""" - if dotenv_value := dotenv_values_by_name.get(name): - return dotenv_value - return os.getenv(name) + endpoint: str | None + base_url: str | None + api_key: SecretString | None + deployment_name: str | None + api_version: str | None def load_openai_service_settings( *, model: str | None, api_key: str | SecretString | Callable[[], str | Awaitable[str]] | None, + credential: AzureCredentialTypes | AzureTokenProvider | None, org_id: str | None, base_url: str | None, - azure_endpoint: str | None, + endpoint: str | None, api_version: str | None, + default_azure_api_version: str, + default_headers: Mapping[str, str] | None = None, + client: AsyncOpenAI | None = None, env_file_path: str | None, env_file_encoding: str | None, - azure_model_env_vars: Sequence[str], - default_azure_api_version: str, -) -> tuple[OpenAISettings, bool]: +) -> tuple[dict[str, Any], AsyncOpenAI, bool]: """Load OpenAI settings, including Azure OpenAI aliases. - The generic OpenAI clients primarily read from ``OPENAI_*`` variables. When an - ``AZURE_OPENAI_ENDPOINT`` (or ``AZURE_OPENAI_BASE_URL``) is available and no - explicit OpenAI base URL is configured, this helper switches to Azure-specific - environment variables for endpoint, API key, model deployment, and API version. + The generic OpenAI clients primarily read from ``OPENAI_*`` variables. Azure-specific + environment variables are used only when an explicit Azure signal is present + (``azure_endpoint``, ``api_version``, or ``credential``) or when no explicit + OpenAI API key is available. """ - openai_settings = load_settings( - OpenAISettings, - env_prefix="OPENAI_", - api_key=api_key, - org_id=org_id, + # Merge APP_INFO into the headers + merged_headers = dict(copy(default_headers)) if default_headers else {} + if APP_INFO: + merged_headers.update(APP_INFO) + merged_headers = prepend_agent_framework_to_user_agent(merged_headers) + + api_key_callable = api_key if callable(api_key) else None + api_key_str = api_key if not callable(api_key) else None + + azure_client = isinstance(client, AsyncAzureOpenAI) + use_azure = azure_client or endpoint is not None or api_version is not None or credential is not None + if not use_azure: + openai_settings = load_settings( + OpenAISettings, + env_prefix="OPENAI_", + api_key=api_key_str, + org_id=org_id, + base_url=base_url, + model=model, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + if client: + return openai_settings, client, False # type: ignore[return-value] + if openai_settings.get("api_key") is not None: + if not (model := openai_settings.get("model")): + raise SettingNotFoundError( + "Model must be specified via the 'model' parameter or the 'OPENAI_MODEL' environment variable." + ) + + client_args: dict[str, Any] = { + "api_key": openai_settings["api_key"].get_secret_value() # type: ignore[reportOptionalMemberAccess, union-attr] + if "api_key" in openai_settings + else api_key_callable, + "organization": openai_settings.get("org_id"), + "default_headers": merged_headers, + } + if base_url := openai_settings.get("base_url"): + client_args["base_url"] = base_url + return openai_settings, AsyncOpenAI(**client_args), False # type: ignore[return-value] + + azure_settings = load_settings( + AzureOpenAISettings, + env_prefix="AZURE_OPENAI_", + required_fields=[("base_url", "endpoint")], + api_key=api_key_str, + endpoint=endpoint, base_url=base_url, - model=model, - azure_endpoint=azure_endpoint, - api_version=api_version, + deployment_name=model, + api_version=api_version or default_azure_api_version, env_file_path=env_file_path, env_file_encoding=env_file_encoding, ) - - dotenv_values_by_name = _load_dotenv_values( - env_file_path=env_file_path, - env_file_encoding=env_file_encoding, + client_args = {} + if ("deployment_name" not in azure_settings or not azure_settings["deployment_name"]) and ( + openai_model := ( + get_key(env_file_path, "OPENAI_MODEL", encoding=env_file_encoding) or os.getenv("OPENAI_MODEL") # type: ignore[reportArgumentType, arg-type] + ) + ): + # load `OPENAI_MODEL` from the environment as a fallback + azure_settings["deployment_name"] = openai_model + if model := azure_settings.get("deployment_name"): + client_args["azure_deployment"] = model + else: + raise ValueError( + "Azure OpenAI client requires a deployment name, which can be provided via the 'model' parameter, " + "the 'AZURE_OPENAI_DEPLOYMENT_NAME' environment variable, or the 'OPENAI_MODEL' environment variable." + ) + if client: + return azure_settings, client, True # type: ignore[return-value] + client_args["default_headers"] = merged_headers + if endpoint := azure_settings.get("endpoint"): + client_args["azure_endpoint"] = endpoint + if base_url := azure_settings.get("base_url"): + client_args["base_url"] = base_url + if api_key := azure_settings.get("api_key"): + client_args["api_key"] = api_key.get_secret_value() + if api_key_callable: + client_args["api_key"] = api_key_callable + if api_version := azure_settings.get("api_version"): + client_args["api_version"] = api_version + if credential: + client_args["azure_ad_token_provider"] = _resolve_azure_credential_to_token_provider(credential) + if "api_key" not in client_args and "azure_ad_token_provider" not in client_args: + raise ValueError( + "Azure OpenAI client requires either an API key or an Azure AD token provider." + " This can be provided either as a callable api_key or via the credential parameter." + ) + return azure_settings, AsyncAzureOpenAI(**client_args), True # type: ignore[return-value] + + +def _resolve_azure_credential_to_token_provider( + credential: AzureCredentialTypes | AzureTokenProvider, +) -> AzureTokenProvider: + """Resolve an Azure credential or token provider for Azure OpenAI auth.""" + try: + from azure.core.credentials import TokenCredential + from azure.core.credentials_async import AsyncTokenCredential + from azure.identity import get_bearer_token_provider + from azure.identity.aio import get_bearer_token_provider as get_async_bearer_token_provider + except ModuleNotFoundError as exc: + raise ModuleNotFoundError( + "Azure credential auth requires the 'azure-identity' package. Install it with: pip install azure-identity" + ) from exc + + if isinstance(credential, AsyncTokenCredential): + return get_async_bearer_token_provider(credential, AZURE_OPENAI_TOKEN_SCOPE) + if isinstance(credential, TokenCredential): + return get_bearer_token_provider(credential, AZURE_OPENAI_TOKEN_SCOPE) # type: ignore[arg-type] + if callable(credential): + return credential + raise ValueError( + "The 'credential' parameter must be an Azure TokenCredential, AsyncTokenCredential, or a " + "callable token provider." ) - resolved_azure_endpoint = azure_endpoint - resolved_azure_base_url: str | None = None - if not openai_settings.get("base_url"): - if resolved_azure_endpoint is None: - resolved_azure_endpoint = _get_setting_from_alias( - "AZURE_OPENAI_ENDPOINT", - dotenv_values_by_name=dotenv_values_by_name, - ) - if resolved_azure_endpoint is None: - resolved_azure_base_url = _get_setting_from_alias( - "AZURE_OPENAI_BASE_URL", - dotenv_values_by_name=dotenv_values_by_name, - ) - if resolved_azure_base_url is not None: - openai_settings["base_url"] = resolved_azure_base_url - - use_azure_client = resolved_azure_endpoint is not None or resolved_azure_base_url is not None - if resolved_azure_endpoint is not None: - openai_settings["azure_endpoint"] = resolved_azure_endpoint - - if use_azure_client: - if api_key is None: - resolved_azure_api_key = _get_setting_from_alias( - "AZURE_OPENAI_API_KEY", - dotenv_values_by_name=dotenv_values_by_name, - ) - if resolved_azure_api_key is not None: - openai_settings["api_key"] = SecretString(resolved_azure_api_key) - - if model is None: - for env_var_name in azure_model_env_vars: - resolved_model = _get_setting_from_alias( - env_var_name, - dotenv_values_by_name=dotenv_values_by_name, - ) - if resolved_model is not None: - openai_settings["model"] = resolved_model - break - - if api_version is not None: - openai_settings["api_version"] = api_version - else: - resolved_api_version = _get_setting_from_alias( - "AZURE_OPENAI_API_VERSION", - dotenv_values_by_name=dotenv_values_by_name, - ) - openai_settings["api_version"] = resolved_api_version or default_azure_api_version - - return openai_settings, use_azure_client - def maybe_append_azure_endpoint_guidance(message: str, *, azure_endpoint: str | None) -> str: """Append Azure endpoint guidance only when the configured endpoint shape looks suspicious.""" diff --git a/python/packages/openai/tests/openai/conftest.py b/python/packages/openai/tests/openai/conftest.py index 1ef52baf81..07f6012209 100644 --- a/python/packages/openai/tests/openai/conftest.py +++ b/python/packages/openai/tests/openai/conftest.py @@ -53,6 +53,8 @@ def openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): # "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_BASE_URL", "AZURE_OPENAI_API_KEY", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", + "AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME", "AZURE_OPENAI_DEPLOYMENT_NAME", "AZURE_OPENAI_API_VERSION", ], @@ -114,6 +116,8 @@ def azure_openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dic env_vars = { "AZURE_OPENAI_ENDPOINT": "https://test-endpoint.openai.azure.com", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "test_chat_deployment", + "AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME": "test_responses_deployment", "AZURE_OPENAI_DEPLOYMENT_NAME": "test_deployment", "AZURE_OPENAI_API_KEY": "test_api_key", "AZURE_OPENAI_API_VERSION": "2024-12-01-preview", diff --git a/python/packages/openai/tests/openai/test_openai_chat_client.py b/python/packages/openai/tests/openai/test_openai_chat_client.py index 897fe5f913..15c64d7e89 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client.py @@ -28,6 +28,7 @@ from agent_framework.exceptions import ( ChatClientException, ChatClientInvalidRequestException, + SettingNotFoundError, ) from openai import BadRequestError from openai.types.responses.response_reasoning_item import Summary @@ -143,7 +144,7 @@ def test_init_with_default_header(openai_unit_test_env: dict[str, str]) -> None: @pytest.mark.parametrize("exclude_list", [["OPENAI_MODEL"]], indirect=True) def test_init_with_empty_model_id(openai_unit_test_env: dict[str, str]) -> None: - with pytest.raises(ValueError): + with pytest.raises(SettingNotFoundError): OpenAIChatClient() @@ -151,7 +152,7 @@ def test_init_with_empty_model_id(openai_unit_test_env: dict[str, str]) -> None: def test_init_with_empty_api_key(openai_unit_test_env: dict[str, str]) -> None: model_id = "test_model_id" - with pytest.raises(ValueError): + with pytest.raises(SettingNotFoundError): OpenAIChatClient( model=model_id, ) diff --git a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py index 8646f2d959..c364690cab 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py @@ -6,10 +6,12 @@ import os from pathlib import Path from typing import Any +from unittest.mock import AsyncMock, MagicMock, patch import pytest from agent_framework import Agent, AgentResponse, ChatResponse, Content, Message, SupportsChatGetResponse, tool -from azure.identity.aio import AzureCliCredential, get_bearer_token_provider +from azure.core.credentials_async import AsyncTokenCredential +from azure.identity.aio import AzureCliCredential from openai import AsyncAzureOpenAI from pydantic import BaseModel from pytest import param @@ -20,11 +22,18 @@ skip_if_azure_openai_integration_tests_disabled = pytest.mark.skipif( os.getenv("AZURE_OPENAI_ENDPOINT", "") in ("", "https://test-endpoint.openai.azure.com") - or os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "") == "", + or ( + os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME", "") == "" + and os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "") == "" + ), reason="No real Azure OpenAI endpoint or responses deployment provided; skipping integration tests.", ) +def _get_azure_responses_deployment_name() -> str: + return os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") or os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"] + + class OutputStruct(BaseModel): """A structured output for testing purposes.""" @@ -35,12 +44,17 @@ class OutputStruct(BaseModel): def _create_azure_openai_chat_client( *, api_key: Any = None, + credential: AsyncTokenCredential | None = None, ) -> OpenAIChatClient: + resolved_api_key = ( + api_key if api_key is not None else None if credential is not None else os.environ["AZURE_OPENAI_API_KEY"] + ) return OpenAIChatClient( - model=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], - api_key=api_key or os.environ["AZURE_OPENAI_API_KEY"], + model=_get_azure_responses_deployment_name(), + api_key=resolved_api_key, azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], api_version=os.getenv("AZURE_OPENAI_API_VERSION"), + credential=credential, ) @@ -81,7 +95,7 @@ async def get_weather(location: str) -> str: def test_init_with_azure_endpoint(azure_openai_unit_test_env: dict[str, str]) -> None: client = _create_azure_openai_chat_client() - assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] assert isinstance(client, SupportsChatGetResponse) assert isinstance(client.client, AsyncAzureOpenAI) assert client.OTEL_PROVIDER_NAME == "azure.ai.openai" @@ -97,11 +111,48 @@ def test_init_auto_detects_azure_env(azure_openai_unit_test_env: dict[str, str]) assert client.azure_endpoint == azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"] +def test_openai_api_key_wins_over_azure_env(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_MODEL", "gpt-5") + + client = OpenAIChatClient() + + assert client.model == "gpt-5" + assert not isinstance(client.client, AsyncAzureOpenAI) + assert client.azure_endpoint is None + + +def test_explicit_credential_wins_over_openai_api_key(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_MODEL", "gpt-5") + + client = OpenAIChatClient(credential=lambda: "token") + + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert isinstance(client.client, AsyncAzureOpenAI) + assert client.azure_endpoint == azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"] + + +def test_init_with_credential_wraps_async_token_credential( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_MODEL", "gpt-5") + credential = AsyncMock(spec=AsyncTokenCredential) + token_provider = MagicMock() + + with patch("azure.identity.aio.get_bearer_token_provider", return_value=token_provider) as mock_provider: + client = OpenAIChatClient(credential=credential) + + assert isinstance(client.client, AsyncAzureOpenAI) + mock_provider.assert_called_once_with(credential, "https://cognitiveservices.azure.com/.default") + + @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_VERSION"]], indirect=True) def test_init_uses_default_azure_api_version(azure_openai_unit_test_env: dict[str, str]) -> None: client = _create_azure_openai_chat_client() - assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] assert client.api_version == "preview" @@ -180,9 +231,7 @@ async def test_integration_options( needs_validation: bool, ) -> None: async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ) + client = _create_azure_openai_chat_client(credential=credential) client.function_invocation_configuration["max_iterations"] = 2 for streaming in [False, True]: @@ -235,9 +284,7 @@ async def test_integration_options( @skip_if_azure_openai_integration_tests_disabled async def test_integration_web_search() -> None: async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ) + client = _create_azure_openai_chat_client(credential=credential) for streaming in [False, True]: content = { @@ -288,9 +335,7 @@ async def test_integration_web_search() -> None: @skip_if_azure_openai_integration_tests_disabled async def test_integration_client_file_search() -> None: async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ) + client = _create_azure_openai_chat_client(credential=credential) file_id, vector_store = await create_vector_store(client) try: response = await client.get_response( @@ -312,9 +357,7 @@ async def test_integration_client_file_search() -> None: @skip_if_azure_openai_integration_tests_disabled async def test_integration_client_file_search_streaming() -> None: async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ) + client = _create_azure_openai_chat_client(credential=credential) file_id, vector_store = await create_vector_store(client) try: response_stream = client.get_response( @@ -338,9 +381,7 @@ async def test_integration_client_file_search_streaming() -> None: @skip_if_azure_openai_integration_tests_disabled async def test_integration_client_agent_hosted_mcp_tool() -> None: async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ) + client = _create_azure_openai_chat_client(credential=credential) response = await client.get_response( messages=[Message(role="user", text="How to create an Azure storage account using az cli?")], options={ @@ -363,9 +404,7 @@ async def test_integration_client_agent_hosted_mcp_tool() -> None: @skip_if_azure_openai_integration_tests_disabled async def test_integration_client_agent_hosted_code_interpreter_tool() -> None: async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ) + client = _create_azure_openai_chat_client(credential=credential) response = await client.get_response( messages=[Message(role="user", text="Calculate the sum of numbers from 1 to 10 using Python code.")], @@ -386,9 +425,7 @@ async def test_integration_client_agent_existing_session() -> None: preserved_session = None async with Agent( - client=_create_azure_openai_chat_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ), + client=_create_azure_openai_chat_client(credential=credential), instructions="You are a helpful assistant with good memory.", ) as first_agent: session = first_agent.create_session() @@ -403,9 +440,7 @@ async def test_integration_client_agent_existing_session() -> None: if preserved_session: async with Agent( - client=_create_azure_openai_chat_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ), + client=_create_azure_openai_chat_client(credential=credential), instructions="You are a helpful assistant with good memory.", ) as second_agent: second_response = await second_agent.run("What is my hobby?", session=preserved_session) @@ -428,9 +463,7 @@ def get_test_image() -> Content: return Content.from_data(data=image_bytes, media_type="image/jpeg") async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ) + client = _create_azure_openai_chat_client(credential=credential) client.function_invocation_configuration["max_iterations"] = 2 for streaming in [False, True]: diff --git a/python/packages/openai/tests/openai/test_openai_chat_completion_client.py b/python/packages/openai/tests/openai/test_openai_chat_completion_client.py index 391432958f..d48ab7d476 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_completion_client.py +++ b/python/packages/openai/tests/openai/test_openai_chat_completion_client.py @@ -13,7 +13,7 @@ SupportsChatGetResponse, tool, ) -from agent_framework.exceptions import ChatClientException +from agent_framework.exceptions import ChatClientException, SettingNotFoundError from openai import BadRequestError from openai.types.chat.chat_completion import ChatCompletion, Choice from openai.types.chat.chat_completion_message import ChatCompletionMessage @@ -93,7 +93,7 @@ def test_init_base_url_from_settings_env() -> None: @pytest.mark.parametrize("exclude_list", [["OPENAI_MODEL"]], indirect=True) def test_init_with_empty_model_id(openai_unit_test_env: dict[str, str]) -> None: - with pytest.raises(ValueError): + with pytest.raises(SettingNotFoundError): OpenAIChatCompletionClient() @@ -101,7 +101,7 @@ def test_init_with_empty_model_id(openai_unit_test_env: dict[str, str]) -> None: def test_init_with_empty_api_key(openai_unit_test_env: dict[str, str]) -> None: model_id = "test_model_id" - with pytest.raises(ValueError): + with pytest.raises(SettingNotFoundError): OpenAIChatCompletionClient( model=model_id, ) diff --git a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py index 148fcb68b9..0a57a4b860 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py @@ -4,6 +4,7 @@ import os from collections.abc import Awaitable, Callable +from unittest.mock import MagicMock, patch import pytest from agent_framework import ( @@ -16,7 +17,8 @@ SupportsChatGetResponse, tool, ) -from azure.identity.aio import AzureCliCredential, get_bearer_token_provider +from azure.core.credentials_async import AsyncTokenCredential +from azure.identity.aio import AzureCliCredential from openai import AsyncAzureOpenAI from agent_framework_openai import OpenAIChatCompletionClient @@ -25,20 +27,31 @@ skip_if_azure_openai_integration_tests_disabled = pytest.mark.skipif( os.getenv("AZURE_OPENAI_ENDPOINT", "") in ("", "https://test-endpoint.openai.azure.com") - or os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "") == "", + or ( + os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", "") == "" and os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "") == "" + ), reason="No real Azure OpenAI endpoint or chat deployment provided; skipping integration tests.", ) +def _get_azure_chat_deployment_name() -> str: + return os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME") or os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"] + + def _create_azure_chat_completion_client( *, api_key: str | Callable[[], str | Awaitable[str]] | None = None, + credential: AsyncTokenCredential | None = None, ) -> OpenAIChatCompletionClient: + resolved_api_key = ( + api_key if api_key is not None else None if credential is not None else os.environ["AZURE_OPENAI_API_KEY"] + ) return OpenAIChatCompletionClient( - model=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], - api_key=api_key or os.environ["AZURE_OPENAI_API_KEY"], + model=_get_azure_chat_deployment_name(), + api_key=resolved_api_key, azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], api_version=os.getenv("AZURE_OPENAI_API_VERSION"), + credential=credential, ) @@ -62,7 +75,7 @@ async def get_weather(location: str) -> str: def test_init_with_azure_endpoint(azure_openai_unit_test_env: dict[str, str]) -> None: client = _create_azure_chat_completion_client() - assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] assert isinstance(client, SupportsChatGetResponse) assert isinstance(client.client, AsyncAzureOpenAI) assert client.OTEL_PROVIDER_NAME == "azure.ai.openai" @@ -78,12 +91,49 @@ def test_init_auto_detects_azure_env(azure_openai_unit_test_env: dict[str, str]) assert client.azure_endpoint == azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"] +def test_openai_api_key_wins_over_azure_env(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_MODEL", "gpt-5") + + client = OpenAIChatCompletionClient() + + assert client.model == "gpt-5" + assert not isinstance(client.client, AsyncAzureOpenAI) + assert client.azure_endpoint is None + + +def test_explicit_credential_wins_over_openai_api_key(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_MODEL", "gpt-5") + + client = OpenAIChatCompletionClient(credential=lambda: "token") + + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert isinstance(client.client, AsyncAzureOpenAI) + assert client.azure_endpoint == azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"] + + +def test_init_with_credential_wraps_async_token_credential( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_MODEL", "gpt-5") + credential = MagicMock(spec=AsyncTokenCredential) + token_provider = MagicMock() + + with patch("azure.identity.aio.get_bearer_token_provider", return_value=token_provider) as mock_provider: + client = OpenAIChatCompletionClient(credential=credential) + + assert isinstance(client.client, AsyncAzureOpenAI) + mock_provider.assert_called_once_with(credential, "https://cognitiveservices.azure.com/.default") + + @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_VERSION"]], indirect=True) def test_init_uses_default_azure_api_version(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: monkeypatch.setenv("OPENAI_API_VERSION", "preview") client = _create_azure_chat_completion_client() - assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] assert client.api_version == "2024-10-21" @@ -104,9 +154,7 @@ def test_openai_base_url_wins_over_azure_aliases(monkeypatch, azure_openai_unit_ @skip_if_azure_openai_integration_tests_disabled async def test_azure_openai_chat_completion_client_response() -> None: async with AzureCliCredential() as credential: - client = _create_azure_chat_completion_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ) + client = _create_azure_chat_completion_client(credential=credential) assert isinstance(client, SupportsChatGetResponse) messages = [ @@ -136,9 +184,7 @@ async def test_azure_openai_chat_completion_client_response() -> None: @skip_if_azure_openai_integration_tests_disabled async def test_azure_openai_chat_completion_client_response_tools() -> None: async with AzureCliCredential() as credential: - client = _create_azure_chat_completion_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ) + client = _create_azure_chat_completion_client(credential=credential) response = await client.get_response( messages=[Message(role="user", text="who are Emily and David?")], @@ -155,9 +201,7 @@ async def test_azure_openai_chat_completion_client_response_tools() -> None: @skip_if_azure_openai_integration_tests_disabled async def test_azure_openai_chat_completion_client_streaming() -> None: async with AzureCliCredential() as credential: - client = _create_azure_chat_completion_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ) + client = _create_azure_chat_completion_client(credential=credential) response = client.get_response( messages=[ @@ -192,9 +236,7 @@ async def test_azure_openai_chat_completion_client_streaming() -> None: @skip_if_azure_openai_integration_tests_disabled async def test_azure_openai_chat_completion_client_streaming_tools() -> None: async with AzureCliCredential() as credential: - client = _create_azure_chat_completion_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ) + client = _create_azure_chat_completion_client(credential=credential) response = client.get_response( messages=[Message(role="user", text="who are Emily and David?")], @@ -219,9 +261,7 @@ async def test_azure_openai_chat_completion_client_agent_basic_run() -> None: async with ( AzureCliCredential() as credential, Agent( - client=_create_azure_chat_completion_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ), + client=_create_azure_chat_completion_client(credential=credential), ) as agent, ): response = await agent.run("Please respond with exactly: 'This is a response test.'") @@ -238,9 +278,7 @@ async def test_azure_openai_chat_completion_client_agent_basic_run_streaming() - async with ( AzureCliCredential() as credential, Agent( - client=_create_azure_chat_completion_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ), + client=_create_azure_chat_completion_client(credential=credential), ) as agent, ): full_text = "" @@ -262,9 +300,7 @@ async def test_azure_openai_chat_completion_client_agent_session_persistence() - async with ( AzureCliCredential() as credential, Agent( - client=_create_azure_chat_completion_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ), + client=_create_azure_chat_completion_client(credential=credential), instructions="You are a helpful assistant with good memory.", ) as agent, ): @@ -286,9 +322,7 @@ async def test_azure_openai_chat_completion_client_agent_existing_session() -> N preserved_session = None async with Agent( - client=_create_azure_chat_completion_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ), + client=_create_azure_chat_completion_client(credential=credential), instructions="You are a helpful assistant with good memory.", ) as first_agent: session = first_agent.create_session() @@ -299,9 +333,7 @@ async def test_azure_openai_chat_completion_client_agent_existing_session() -> N if preserved_session: async with Agent( - client=_create_azure_chat_completion_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ), + client=_create_azure_chat_completion_client(credential=credential), instructions="You are a helpful assistant with good memory.", ) as second_agent: second_response = await second_agent.run("What is my name?", session=preserved_session) @@ -318,9 +350,7 @@ async def test_azure_chat_completion_client_agent_level_tool_persistence() -> No async with ( AzureCliCredential() as credential, Agent( - client=_create_azure_chat_completion_client( - api_key=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") - ), + client=_create_azure_chat_completion_client(credential=credential), instructions="You are a helpful assistant that uses available tools.", tools=[get_weather], ) as agent, diff --git a/python/samples/02-agents/providers/azure/README.md b/python/samples/02-agents/providers/azure/README.md index 1e06bda482..cd34fe2717 100644 --- a/python/samples/02-agents/providers/azure/README.md +++ b/python/samples/02-agents/providers/azure/README.md @@ -1,12 +1,48 @@ # Azure Provider Samples -This folder contains Azure OpenAI chat completion samples for Agent Framework. +This folder contains Azure-backed samples for the generic OpenAI clients in +`agent_framework.openai`. -## Azure OpenAI ChatCompletionClient Samples +## Chat Completions API samples (`OpenAIChatCompletionClient`) | File | Description | |------|-------------| -| [`openai_chat_completion_client_azure_basic.py`](openai_chat_completion_client_azure_basic.py) | Azure OpenAI Chat Client Basic Example | -| [`openai_chat_completion_client_azure_with_explicit_settings.py`](openai_chat_completion_client_azure_with_explicit_settings.py) | Azure OpenAI Chat Client with Explicit Settings Example | -| [`openai_chat_completion_client_azure_with_function_tools.py`](openai_chat_completion_client_azure_with_function_tools.py) | Azure OpenAI Chat Client with Function Tools Example | -| [`openai_chat_completion_client_azure_with_session.py`](openai_chat_completion_client_azure_with_session.py) | Azure OpenAI Chat Client with Session Management Example | +| [`openai_chat_completion_client_basic.py`](openai_chat_completion_client_basic.py) | Basic Azure chat completions sample using explicit Azure settings and `credential=AzureCliCredential()`. | +| [`openai_chat_completion_client_with_explicit_settings.py`](openai_chat_completion_client_with_explicit_settings.py) | Azure chat completions sample with explicit settings. | +| [`openai_chat_completion_client_with_function_tools.py`](openai_chat_completion_client_with_function_tools.py) | Azure chat completions sample with function tools. | +| [`openai_chat_completion_client_with_session.py`](openai_chat_completion_client_with_session.py) | Azure chat completions sample with session management. | + +## Responses API samples (`OpenAIChatClient`) + +| File | Description | +|------|-------------| +| [`openai_client_basic.py`](openai_client_basic.py) | Basic Azure responses sample using explicit settings and `credential=AzureCliCredential()`. | +| [`openai_client_with_function_tools.py`](openai_client_with_function_tools.py) | Azure responses sample with function tools. | +| [`openai_client_with_session.py`](openai_client_with_session.py) | Azure responses sample with session management. | +| [`openai_client_with_structured_output.py`](openai_client_with_structured_output.py) | Azure responses sample with structured output. | + +## Environment Variables + +Set these before running the Azure provider samples: + +- `AZURE_OPENAI_ENDPOINT` +- `AZURE_OPENAI_DEPLOYMENT_NAME` + +Optionally, you can also set: + +- `AZURE_OPENAI_API_KEY` +- `AZURE_OPENAI_API_VERSION` +- `AZURE_OPENAI_BASE_URL` + +These Azure samples are written around explicit Azure inputs such as +`credential=AzureCliCredential()`, so they stay on Azure even if `OPENAI_API_KEY` is also present. + +## Optional Dependencies + +Credential-based samples require `azure-identity`: + +```bash +pip install azure-identity +``` + +Run `az login` before executing the credential-based samples. diff --git a/python/samples/02-agents/providers/azure/openai_chat_completion_client_azure_basic.py b/python/samples/02-agents/providers/azure/openai_chat_completion_client_basic.py similarity index 70% rename from python/samples/02-agents/providers/azure/openai_chat_completion_client_azure_basic.py rename to python/samples/02-agents/providers/azure/openai_chat_completion_client_basic.py index db5740cfc3..030828da89 100644 --- a/python/samples/02-agents/providers/azure/openai_chat_completion_client_azure_basic.py +++ b/python/samples/02-agents/providers/azure/openai_chat_completion_client_basic.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from random import randint from typing import Annotated @@ -16,14 +17,12 @@ """ Azure OpenAI Chat Client Basic Example -This sample demonstrates basic usage of OpenAIChatCompletionClient for direct chat-based -interactions, showing both streaming and non-streaming responses. +This sample demonstrates basic usage of OpenAIChatCompletionClient with explicit Azure +settings and a credential, showing both streaming and non-streaming responses. """ -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; -# see samples/02-agents/tools/function_tool_with_approval.py -# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production. @tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], @@ -37,11 +36,14 @@ async def non_streaming_example() -> None: """Example of non-streaming response (get the complete result at once).""" print("=== Non-streaming Response Example ===") - # Create agent with Azure Chat Client - # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred - # authentication option. agent = Agent( - client=OpenAIChatCompletionClient(credential=AzureCliCredential()), + client=OpenAIChatCompletionClient( + model=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"), + azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), + api_version=os.getenv("AZURE_OPENAI_API_VERSION"), + credential=AzureCliCredential(), + ), + name="WeatherAgent", instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -56,11 +58,14 @@ async def streaming_example() -> None: """Example of streaming response (get results as they are generated).""" print("=== Streaming Response Example ===") - # Create agent with Azure Chat Client - # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred - # authentication option. agent = Agent( - client=OpenAIChatCompletionClient(credential=AzureCliCredential()), + client=OpenAIChatCompletionClient( + model=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"), + azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), + api_version=os.getenv("AZURE_OPENAI_API_VERSION"), + credential=AzureCliCredential(), + ), + name="WeatherAgent", instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -75,7 +80,7 @@ async def streaming_example() -> None: async def main() -> None: - print("=== Basic Azure Chat Client Agent Example ===") + print("=== Basic Azure Chat Completion Client Agent Example ===") await non_streaming_example() await streaming_example() diff --git a/python/samples/02-agents/providers/azure/openai_chat_completion_client_azure_with_explicit_settings.py b/python/samples/02-agents/providers/azure/openai_chat_completion_client_with_explicit_settings.py similarity index 71% rename from python/samples/02-agents/providers/azure/openai_chat_completion_client_azure_with_explicit_settings.py rename to python/samples/02-agents/providers/azure/openai_chat_completion_client_with_explicit_settings.py index 16ecc8a091..cf26f6ba38 100644 --- a/python/samples/02-agents/providers/azure/openai_chat_completion_client_azure_with_explicit_settings.py +++ b/python/samples/02-agents/providers/azure/openai_chat_completion_client_with_explicit_settings.py @@ -15,16 +15,16 @@ load_dotenv() """ -Azure OpenAI Chat Client with Explicit Settings Example +OpenAI Chat Completion Client with Explicit Settings Example -This sample demonstrates creating Azure OpenAI Chat Client with explicit configuration +This samples connects to Azure OpenAI. + +This sample demonstrates creating OpenAI Chat Completion Client with explicit configuration settings rather than relying on environment variable defaults. """ -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; -# see samples/02-agents/tools/function_tool_with_approval.py -# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production. @tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], @@ -39,13 +39,12 @@ async def main() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - _client = OpenAIChatCompletionClient( - model=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], - endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], - credential=AzureCliCredential(), - ) agent = Agent( - client=_client, + client=OpenAIChatCompletionClient( + model=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], + credential=AzureCliCredential(), + ), instructions="You are a helpful weather agent.", tools=[get_weather], ) diff --git a/python/samples/02-agents/providers/azure/openai_chat_completion_client_azure_with_function_tools.py b/python/samples/02-agents/providers/azure/openai_chat_completion_client_with_function_tools.py similarity index 100% rename from python/samples/02-agents/providers/azure/openai_chat_completion_client_azure_with_function_tools.py rename to python/samples/02-agents/providers/azure/openai_chat_completion_client_with_function_tools.py diff --git a/python/samples/02-agents/providers/azure/openai_chat_completion_client_azure_with_session.py b/python/samples/02-agents/providers/azure/openai_chat_completion_client_with_session.py similarity index 100% rename from python/samples/02-agents/providers/azure/openai_chat_completion_client_azure_with_session.py rename to python/samples/02-agents/providers/azure/openai_chat_completion_client_with_session.py diff --git a/python/samples/02-agents/providers/azure/openai_client_basic.py b/python/samples/02-agents/providers/azure/openai_client_basic.py new file mode 100644 index 0000000000..3029c03cda --- /dev/null +++ b/python/samples/02-agents/providers/azure/openai_client_basic.py @@ -0,0 +1,90 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import os +from random import randint +from typing import Annotated + +from agent_framework import Agent, tool +from agent_framework.openai import OpenAIChatClient +from azure.identity import AzureCliCredential +from dotenv import load_dotenv +from pydantic import Field + +# Load environment variables from .env file +load_dotenv() + +""" +Azure OpenAI Chat Client Basic Example + +This sample demonstrates basic usage of OpenAIChatClient with explicit Azure +settings and a credential, showing both streaming and non-streaming responses. +""" + + +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production. +@tool(approval_mode="never_require") +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +async def non_streaming_example() -> None: + """Example of non-streaming response (get the complete result at once).""" + print("=== Non-streaming Response Example ===") + + agent = Agent( + client=OpenAIChatClient( + model=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"), + azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), + api_version=os.getenv("AZURE_OPENAI_API_VERSION"), + credential=AzureCliCredential(), + ), + name="WeatherAgent", + instructions="You are a helpful weather agent.", + tools=get_weather, + ) + + query = "What's the weather in Seattle?" + print(f"User: {query}") + result = await agent.run(query) + print(f"Result: {result}\n") + + +async def streaming_example() -> None: + """Example of streaming response (get results as they are generated).""" + print("=== Streaming Response Example ===") + + agent = Agent( + client=OpenAIChatClient( + model=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"), + azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), + api_version=os.getenv("AZURE_OPENAI_API_VERSION"), + credential=AzureCliCredential(), + ), + name="WeatherAgent", + instructions="You are a helpful weather agent.", + tools=get_weather, + ) + + query = "What's the weather in Portland?" + print(f"User: {query}") + print("Agent: ", end="", flush=True) + async for chunk in agent.run(query, stream=True): + if chunk.text: + print(chunk.text, end="", flush=True) + print("\n") + + +async def main() -> None: + print("=== Basic Azure OpenAI Chat Client Agent Example ===") + + await non_streaming_example() + await streaming_example() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/02-agents/providers/azure/openai_client_with_function_tools.py b/python/samples/02-agents/providers/azure/openai_client_with_function_tools.py new file mode 100644 index 0000000000..8080b65418 --- /dev/null +++ b/python/samples/02-agents/providers/azure/openai_client_with_function_tools.py @@ -0,0 +1,137 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from datetime import datetime, timezone +from random import randint +from typing import Annotated + +from agent_framework import Agent, tool +from agent_framework.openai import OpenAIChatClient +from azure.identity import AzureCliCredential +from dotenv import load_dotenv +from pydantic import Field + +# Load environment variables from .env file +load_dotenv() + +""" +Azure OpenAI Chat Client with Function Tools Example + +This sample demonstrates function tool integration with Azure OpenAI Chat Client, +showing both agent-level and query-level tool configuration patterns. +""" + + +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; +# see samples/02-agents/tools/function_tool_with_approval.py +# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. +@tool(approval_mode="never_require") +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +@tool(approval_mode="never_require") +def get_time() -> str: + """Get the current UTC time.""" + current_time = datetime.now(timezone.utc) + return f"The current UTC time is {current_time.strftime('%Y-%m-%d %H:%M:%S')}." + + +async def tools_on_agent_level() -> None: + """Example showing tools defined when creating the agent.""" + print("=== Tools Defined on Agent Level ===") + + # Tools are provided when creating the agent + # The agent can use these tools for any query during its lifetime + agent = Agent( + client=OpenAIChatClient(credential=AzureCliCredential()), + instructions="You are a helpful assistant that can provide weather and time information.", + tools=[get_weather, get_time], # Tools defined at agent creation + ) + + # First query - agent can use weather tool + query1 = "What's the weather like in New York?" + print(f"User: {query1}") + result1 = await agent.run(query1) + print(f"Agent: {result1}\n") + + # Second query - agent can use time tool + query2 = "What's the current UTC time?" + print(f"User: {query2}") + result2 = await agent.run(query2) + print(f"Agent: {result2}\n") + + # Third query - agent can use both tools if needed + query3 = "What's the weather in London and what's the current UTC time?" + print(f"User: {query3}") + result3 = await agent.run(query3) + print(f"Agent: {result3}\n") + + +async def tools_on_run_level() -> None: + """Example showing tools passed to the run method.""" + print("=== Tools Passed to Run Method ===") + + # Agent created without tools + agent = Agent( + client=OpenAIChatClient(credential=AzureCliCredential()), + instructions="You are a helpful assistant.", + # No tools defined here + ) + + # First query with weather tool + query1 = "What's the weather like in Seattle?" + print(f"User: {query1}") + result1 = await agent.run(query1, tools=[get_weather]) # Tool passed to run method + print(f"Agent: {result1}\n") + + # Second query with time tool + query2 = "What's the current UTC time?" + print(f"User: {query2}") + result2 = await agent.run(query2, tools=[get_time]) # Different tool for this query + print(f"Agent: {result2}\n") + + # Third query with multiple tools + query3 = "What's the weather in Chicago and what's the current UTC time?" + print(f"User: {query3}") + result3 = await agent.run(query3, tools=[get_weather, get_time]) # Multiple tools + print(f"Agent: {result3}\n") + + +async def mixed_tools_example() -> None: + """Example showing both agent-level tools and run-method tools.""" + print("=== Mixed Tools Example (Agent + Run Method) ===") + + # Agent created with some base tools + agent = Agent( + client=OpenAIChatClient(credential=AzureCliCredential()), + instructions="You are a comprehensive assistant that can help with various information requests.", + tools=[get_weather], # Base tool available for all queries + ) + + # Query using both agent tool and additional run-method tools + query = "What's the weather in Denver and what's the current UTC time?" + print(f"User: {query}") + + # Agent has access to get_weather (from creation) + additional tools from run method + result = await agent.run( + query, + tools=[get_time], # Additional tools for this specific query + ) + print(f"Agent: {result}\n") + + +async def main() -> None: + print("=== Azure OpenAI Chat Client Agent with Function Tools Examples ===\n") + + await tools_on_agent_level() + await tools_on_run_level() + await mixed_tools_example() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/02-agents/providers/azure/openai_client_with_session.py b/python/samples/02-agents/providers/azure/openai_client_with_session.py new file mode 100644 index 0000000000..ad1c87f2d2 --- /dev/null +++ b/python/samples/02-agents/providers/azure/openai_client_with_session.py @@ -0,0 +1,152 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from random import randint +from typing import Annotated + +from agent_framework import Agent, AgentSession, tool +from agent_framework.openai import OpenAIChatClient +from azure.identity import AzureCliCredential +from dotenv import load_dotenv +from pydantic import Field + +# Load environment variables from .env file +load_dotenv() + +""" +Azure OpenAI Chat Client with Session Management Example + +This sample demonstrates session management with Azure OpenAI Chat Client, showing +persistent conversation context and simplified response handling. +""" + + +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; +# see samples/02-agents/tools/function_tool_with_approval.py +# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. +@tool(approval_mode="never_require") +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +async def example_with_automatic_session_creation() -> None: + """Example showing automatic session creation.""" + print("=== Automatic Session Creation Example ===") + + agent = Agent( + client=OpenAIChatClient(credential=AzureCliCredential()), + instructions="You are a helpful weather agent.", + tools=get_weather, + ) + + # First conversation - no session provided, will be created automatically + query1 = "What's the weather like in Seattle?" + print(f"User: {query1}") + result1 = await agent.run(query1) + print(f"Agent: {result1.text}") + + # Second conversation - still no session provided, will create another new session + query2 = "What was the last city I asked about?" + print(f"\nUser: {query2}") + result2 = await agent.run(query2) + print(f"Agent: {result2.text}") + print("Note: Each call creates a separate session, so the agent doesn't remember previous context.\n") + + +async def example_with_session_persistence_in_memory() -> None: + """ + Example showing session persistence across multiple conversations. + In this example, messages are stored in-memory. + """ + print("=== Session Persistence Example (In-Memory) ===") + + agent = Agent( + client=OpenAIChatClient(credential=AzureCliCredential()), + instructions="You are a helpful weather agent.", + tools=get_weather, + ) + + # Create a new session that will be reused + session = agent.create_session() + + # First conversation + query1 = "What's the weather like in Tokyo?" + print(f"User: {query1}") + result1 = await agent.run(query1, session=session, store=False) + print(f"Agent: {result1.text}") + + # Second conversation using the same session - maintains context + query2 = "How about London?" + print(f"\nUser: {query2}") + result2 = await agent.run(query2, session=session, store=False) + print(f"Agent: {result2.text}") + + # Third conversation - agent should remember both previous cities + query3 = "Which of the cities I asked about has better weather?" + print(f"\nUser: {query3}") + result3 = await agent.run(query3, session=session, store=False) + print(f"Agent: {result3.text}") + print("Note: The agent remembers context from previous messages in the same session.\n") + + +async def example_with_existing_session_id() -> None: + """ + Example showing how to work with an existing session ID from the service. + In this example, messages are stored on the server using OpenAI conversation state. + """ + print("=== Existing Session ID Example ===") + + # First, create a conversation and capture the session ID + existing_session_id = None + + agent = Agent( + client=OpenAIChatClient(credential=AzureCliCredential()), + instructions="You are a helpful weather agent.", + tools=get_weather, + ) + + # Start a conversation and get the session ID + session = agent.create_session() + + query1 = "What's the weather in Paris?" + print(f"User: {query1}") + result1 = await agent.run(query1, session=session) + print(f"Agent: {result1.text}") + + # The session ID is set after the first response + existing_session_id = session.service_session_id + print(f"Session ID: {existing_session_id}") + + if existing_session_id: + print("\n--- Continuing with the same session ID in a new agent instance ---") + + agent = Agent( + client=OpenAIChatClient(credential=AzureCliCredential()), + instructions="You are a helpful weather agent.", + tools=get_weather, + ) + + # Create a session with the existing ID + session = AgentSession(service_session_id=existing_session_id) + + query2 = "What was the last city I asked about?" + print(f"User: {query2}") + result2 = await agent.run(query2, session=session) + print(f"Agent: {result2.text}") + print("Note: The agent continues the conversation from the previous session by using session ID.\n") + + +async def main() -> None: + print("=== Azure OpenAI Chat Client Session Management Examples ===\n") + + await example_with_automatic_session_creation() + await example_with_session_persistence_in_memory() + await example_with_existing_session_id() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/02-agents/providers/azure/openai_client_with_structured_output.py b/python/samples/02-agents/providers/azure/openai_client_with_structured_output.py new file mode 100644 index 0000000000..3c09efd6d4 --- /dev/null +++ b/python/samples/02-agents/providers/azure/openai_client_with_structured_output.py @@ -0,0 +1,93 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from agent_framework import Agent, AgentResponse +from agent_framework.openai import OpenAIChatClient +from azure.identity import AzureCliCredential +from dotenv import load_dotenv +from pydantic import BaseModel + +# Load environment variables from .env file +load_dotenv() + +""" +Azure OpenAI Chat Client with Structured Output Example + +This sample demonstrates using structured output capabilities with Azure OpenAI Chat Client, +showing Pydantic model integration for type-safe response parsing and data extraction. +""" + + +class OutputStruct(BaseModel): + """A structured output for testing purposes.""" + + city: str + description: str + + +async def non_streaming_example() -> None: + print("=== Non-streaming example ===") + + # Create an Azure OpenAI Chat agent + agent = Agent( + client=OpenAIChatClient(credential=AzureCliCredential()), + name="CityAgent", + instructions="You are a helpful agent that describes cities in a structured format.", + ) + + # Ask the agent about a city + query = "Tell me about Paris, France" + print(f"User: {query}") + + # Get structured response from the agent using response_format parameter + result = await agent.run(query, options={"response_format": OutputStruct}) + + # Access the structured output using the parsed value + if structured_data := result.value: + print("Structured Output Agent:") + print(f"City: {structured_data.city}") + print(f"Description: {structured_data.description}") + else: + print(f"Failed to parse response: {result.text}") + + +async def streaming_example() -> None: + print("=== Streaming example ===") + + # Create an Azure OpenAI Chat agent + agent = Agent( + client=OpenAIChatClient(credential=AzureCliCredential()), + name="CityAgent", + instructions="You are a helpful agent that describes cities in a structured format.", + ) + + # Ask the agent about a city + query = "Tell me about Tokyo, Japan" + print(f"User: {query}") + + # Get structured response from streaming agent using AgentResponse.from_update_generator + # This method collects all streaming updates and combines them into a single AgentResponse + result = await AgentResponse.from_update_generator( + agent.run(query, stream=True, options={"response_format": OutputStruct}), + output_format_type=OutputStruct, + ) + + # Access the structured output using the parsed value + if structured_data := result.value: + print("Structured Output (from streaming with AgentResponse.from_update_generator):") + print(f"City: {structured_data.city}") + print(f"Description: {structured_data.description}") + else: + print(f"Failed to parse response: {result.text}") + + +async def main() -> None: + print("=== Azure OpenAI Chat Client Agent with Structured Output ===") + + await non_streaming_example() + await streaming_example() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/02-agents/providers/custom/README.md b/python/samples/02-agents/providers/custom/README.md index ac58a77e69..766e5e0269 100644 --- a/python/samples/02-agents/providers/custom/README.md +++ b/python/samples/02-agents/providers/custom/README.md @@ -27,7 +27,7 @@ Both approaches allow you to extend the framework for your specific use cases wh ## Understanding Raw Client Classes -The framework provides `Raw...Client` classes (e.g., `RawOpenAIChatClient`, `RawOpenAIResponsesClient`, `RawAzureAIClient`) that are intermediate implementations without middleware, telemetry, or function invocation support. +The framework provides `Raw...Client` classes (e.g., `RawOpenAIChatClient`, `RawOpenAIChatCompletionClient`, `RawAzureAIClient`) that are intermediate implementations without middleware, telemetry, or function invocation support. ### Warning: Raw Clients Should Not Normally Be Used Directly @@ -60,8 +60,8 @@ class MyCustomClient( For most use cases, use the fully-featured public client classes which already have all layers correctly composed: -- `OpenAIChatClient` - OpenAI Chat completions with all layers -- `OpenAIResponsesClient` - OpenAI Responses API with all layers +- `OpenAIChatCompletionClient` - OpenAI Chat Completions API with all layers +- `OpenAIChatClient` - OpenAI Responses API with all layers - `AzureOpenAIChatClient` - Azure OpenAI Chat with all layers - `AzureOpenAIResponsesClient` - Azure OpenAI Responses with all layers - `AzureAIClient` - Azure AI Project with all layers diff --git a/python/samples/02-agents/providers/openai/README.md b/python/samples/02-agents/providers/openai/README.md index 20e757d421..db71abfa89 100644 --- a/python/samples/02-agents/providers/openai/README.md +++ b/python/samples/02-agents/providers/openai/README.md @@ -1,67 +1,63 @@ -# OpenAI Agent Framework Examples +# OpenAI Provider Samples -This folder contains examples demonstrating different ways to create and use agents with the OpenAI clients from the `agent_framework.openai` package. +This folder contains OpenAI provider samples for the generic clients in +`agent_framework.openai`. -## Examples +## Chat Completions API samples (`OpenAIChatCompletionClient`) | File | Description | |------|-------------| -| [`openai_assistants_basic.py`](openai_assistants_basic.py) | Basic usage of `OpenAIAssistantProvider` with streaming and non-streaming responses. | -| [`openai_assistants_provider_methods.py`](openai_assistants_provider_methods.py) | Demonstrates all `OpenAIAssistantProvider` methods: `create_agent()`, `get_agent()`, and `as_agent()`. | -| [`openai_assistants_with_code_interpreter.py`](openai_assistants_with_code_interpreter.py) | Using `OpenAIAssistantsClient.get_code_interpreter_tool()` with `OpenAIAssistantProvider` to execute Python code. | -| [`openai_assistants_with_existing_assistant.py`](openai_assistants_with_existing_assistant.py) | Working with pre-existing assistants using `get_agent()` and `as_agent()` methods. | -| [`openai_assistants_with_explicit_settings.py`](openai_assistants_with_explicit_settings.py) | Configuring `OpenAIAssistantProvider` with explicit settings including API key and model ID. | -| [`openai_assistants_with_file_search.py`](openai_assistants_with_file_search.py) | Using `OpenAIAssistantsClient.get_file_search_tool()` with `OpenAIAssistantProvider` for file search capabilities. | -| [`openai_assistants_with_function_tools.py`](openai_assistants_with_function_tools.py) | Function tools with `OpenAIAssistantProvider` at both agent-level and query-level. | -| [`openai_assistants_with_response_format.py`](openai_assistants_with_response_format.py) | Structured outputs with `OpenAIAssistantProvider` using Pydantic models. | -| [`openai_assistants_with_session.py`](openai_assistants_with_session.py) | Session management with `OpenAIAssistantProvider` for conversation context persistence. | -| [`openai_chat_client_basic.py`](openai_chat_client_basic.py) | The simplest way to create an agent using `Agent` with `OpenAIChatClient`. Shows both streaming and non-streaming responses for chat-based interactions with OpenAI models. | -| [`openai_chat_client_with_explicit_settings.py`](openai_chat_client_with_explicit_settings.py) | Shows how to initialize an agent with a specific chat client, configuring settings explicitly including API key and model ID. | -| [`openai_chat_client_with_function_tools.py`](openai_chat_client_with_function_tools.py) | Demonstrates how to use function tools with agents. Shows both agent-level tools (defined when creating the agent) and query-level tools (provided with specific queries). | -| [`openai_chat_client_with_local_mcp.py`](openai_chat_client_with_local_mcp.py) | Shows how to integrate OpenAI agents with local Model Context Protocol (MCP) servers for enhanced functionality and tool integration. | -| [`openai_chat_client_with_session.py`](openai_chat_client_with_session.py) | Demonstrates session management with OpenAI agents, including automatic session creation for stateless conversations and explicit session management for maintaining conversation context across multiple interactions. | -| [`openai_chat_client_with_web_search.py`](openai_chat_client_with_web_search.py) | Shows how to use `OpenAIChatClient.get_web_search_tool()` for web search capabilities with OpenAI agents. | -| [`openai_chat_client_with_runtime_json_schema.py`](openai_chat_client_with_runtime_json_schema.py) | Shows how to supply a runtime JSON Schema via `additional_chat_options` for structured output without defining a Pydantic model. | -| [`openai_responses_client_basic.py`](openai_responses_client_basic.py) | The simplest way to create an agent using `Agent` with `OpenAIResponsesClient`. Shows both streaming and non-streaming responses for structured response generation with OpenAI models. | -| [`openai_responses_client_image_analysis.py`](openai_responses_client_image_analysis.py) | Demonstrates how to use vision capabilities with agents to analyze images. | -| [`openai_responses_client_image_generation.py`](openai_responses_client_image_generation.py) | Demonstrates how to use `OpenAIResponsesClient.get_image_generation_tool()` to create images based on text descriptions. | -| [`openai_responses_client_reasoning.py`](openai_responses_client_reasoning.py) | Demonstrates how to use reasoning capabilities with OpenAI agents, showing how the agent can provide detailed reasoning for its responses. | -| [`openai_responses_client_streaming_image_generation.py`](openai_responses_client_streaming_image_generation.py) | Demonstrates streaming image generation with partial images for real-time image creation feedback and improved user experience. | -| [`openai_responses_client_with_agent_as_tool.py`](openai_responses_client_with_agent_as_tool.py) | Shows how to use the agent-as-tool pattern with OpenAI Responses Client, where one agent delegates work to specialized sub-agents wrapped as tools using `as_tool()`. Demonstrates hierarchical agent architectures. | -| [`openai_responses_client_with_code_interpreter.py`](openai_responses_client_with_code_interpreter.py) | Shows how to use `OpenAIResponsesClient.get_code_interpreter_tool()` to write and execute Python code. | -| [`openai_responses_client_with_code_interpreter_files.py`](openai_responses_client_with_code_interpreter_files.py) | Shows how to use code interpreter with uploaded files for data analysis. | -| [`openai_responses_client_with_explicit_settings.py`](openai_responses_client_with_explicit_settings.py) | Shows how to initialize an agent with a specific responses client, configuring settings explicitly including API key and model ID. | -| [`openai_responses_client_with_file_search.py`](openai_responses_client_with_file_search.py) | Demonstrates how to use `OpenAIResponsesClient.get_file_search_tool()` for searching through uploaded files. | -| [`openai_responses_client_with_function_tools.py`](openai_responses_client_with_function_tools.py) | Demonstrates how to use function tools with agents. Shows both agent-level tools (defined when creating the agent) and run-level tools (provided with specific queries). | -| [`openai_responses_client_with_hosted_mcp.py`](openai_responses_client_with_hosted_mcp.py) | Shows how to use `OpenAIResponsesClient.get_mcp_tool()` for hosted MCP servers, including approval workflows. | -| [`openai_responses_client_with_local_mcp.py`](openai_responses_client_with_local_mcp.py) | Shows how to integrate OpenAI agents with local Model Context Protocol (MCP) servers for enhanced functionality and tool integration. | -| [`openai_responses_client_with_runtime_json_schema.py`](openai_responses_client_with_runtime_json_schema.py) | Shows how to supply a runtime JSON Schema via `additional_chat_options` for structured output without defining a Pydantic model. | -| [`openai_responses_client_with_structured_output.py`](openai_responses_client_with_structured_output.py) | Demonstrates how to use structured outputs with OpenAI agents to get structured data responses in predefined formats. | -| [`openai_responses_client_with_session.py`](openai_responses_client_with_session.py) | Demonstrates session management with OpenAI agents, including automatic session creation for stateless conversations and explicit session management for maintaining conversation context across multiple interactions. | -| [`openai_responses_client_with_web_search.py`](openai_responses_client_with_web_search.py) | Shows how to use `OpenAIResponsesClient.get_web_search_tool()` for web search capabilities. | +| [`chat_completion_client_basic.py`](chat_completion_client_basic.py) | Basic non-streaming and streaming chat completion sample with an explicit `gpt-5.4-nano` model and API key. | +| [`chat_completion_client_with_explicit_settings.py`](chat_completion_client_with_explicit_settings.py) | Chat completion sample with explicit model and API key settings. | +| [`chat_completion_client_with_function_tools.py`](chat_completion_client_with_function_tools.py) | Function tools with agent-level and run-level patterns. | +| [`chat_completion_client_with_local_mcp.py`](chat_completion_client_with_local_mcp.py) | Local MCP integration with the chat completions client. | +| [`chat_completion_client_with_runtime_json_schema.py`](chat_completion_client_with_runtime_json_schema.py) | Runtime JSON schema output with the chat completions client. | +| [`chat_completion_client_with_session.py`](chat_completion_client_with_session.py) | Session management with the chat completions client. | +| [`chat_completion_client_with_web_search.py`](chat_completion_client_with_web_search.py) | Web search with the chat completions client. | + +## Responses API samples (`OpenAIChatClient`) + +| File | Description | +|------|-------------| +| [`client_basic.py`](client_basic.py) | Basic non-streaming and streaming responses sample with an explicit `gpt-5.4-nano` model and API key. | +| [`client_image_analysis.py`](client_image_analysis.py) | Analyze images with the responses client. | +| [`client_image_generation.py`](client_image_generation.py) | Generate images from text prompts. | +| [`client_reasoning.py`](client_reasoning.py) | Reasoning-focused sample for models such as `gpt-5`. | +| [`client_streaming_image_generation.py`](client_streaming_image_generation.py) | Streaming image generation sample. | +| [`client_with_agent_as_tool.py`](client_with_agent_as_tool.py) | Agent-as-tool orchestration pattern. | +| [`client_with_code_interpreter.py`](client_with_code_interpreter.py) | Code interpreter sample. | +| [`client_with_code_interpreter_files.py`](client_with_code_interpreter_files.py) | Code interpreter sample with uploaded files. | +| [`client_with_explicit_settings.py`](client_with_explicit_settings.py) | Responses client with explicit model and API key settings. | +| [`client_with_file_search.py`](client_with_file_search.py) | Hosted file search sample. | +| [`client_with_function_tools.py`](client_with_function_tools.py) | Function tools with agent-level and run-level patterns. | +| [`client_with_hosted_mcp.py`](client_with_hosted_mcp.py) | Hosted MCP tools and approval workflows. | +| [`client_with_local_mcp.py`](client_with_local_mcp.py) | Local MCP integration with the responses client. | +| [`client_with_local_shell.py`](client_with_local_shell.py) | Local shell tool sample. | +| [`client_with_runtime_json_schema.py`](client_with_runtime_json_schema.py) | Runtime JSON schema output with the responses client. | +| [`client_with_session.py`](client_with_session.py) | Session management with the responses client. | +| [`client_with_shell.py`](client_with_shell.py) | Hosted shell tool sample. | +| [`client_with_structured_output.py`](client_with_structured_output.py) | Structured output with Pydantic models. | +| [`client_with_web_search.py`](client_with_web_search.py) | Web search with the responses client. | ## Environment Variables -Make sure to set the following environment variables before running the examples: +Set these before running the OpenAI provider samples: -- `OPENAI_API_KEY`: Your OpenAI API key -- `OPENAI_CHAT_MODEL_ID`: The OpenAI model to use (e.g., `gpt-4o`, `gpt-4o-mini`, `gpt-3.5-turbo`) -- `OPENAI_RESPONSES_MODEL_ID`: The OpenAI model to use (e.g., `gpt-4o`, `gpt-4o-mini`, `gpt-3.5-turbo`) -- For image processing examples, use a vision-capable model like `gpt-4o` or `gpt-4o-mini` +- `OPENAI_API_KEY` +- `OPENAI_MODEL` -Optionally, you can set: -- `OPENAI_ORG_ID`: Your OpenAI organization ID (if applicable) -- `OPENAI_API_BASE_URL`: Your OpenAI base URL (if using a different base URL) +Optionally, you can also set: -## Optional Dependencies +- `OPENAI_ORG_ID` +- `OPENAI_BASE_URL` + +If your shell also contains `AZURE_OPENAI_*` variables, these samples still stay on OpenAI as long as +`OPENAI_API_KEY` is present. To force Azure routing with the generic clients, pass an explicit Azure +input such as `credential`, `azure_endpoint`, or `api_version`, or use the Azure provider samples. -Some examples require additional dependencies: +## Optional Dependencies -- **Image Generation Example**: The `openai_responses_client_image_generation.py` example requires PIL (Pillow) for image display. Install with: - ```bash - # Using uv - uv add pillow +Some samples need extra packages: - # Or using pip - pip install pillow - ``` +- `client_image_generation.py` and `client_streaming_image_generation.py` use Pillow for image display. +- MCP samples require the relevant MCP server/tooling you configure locally. diff --git a/python/samples/02-agents/providers/openai/chat_completion_client_basic.py b/python/samples/02-agents/providers/openai/chat_completion_client_basic.py new file mode 100644 index 0000000000..9573ef0b16 --- /dev/null +++ b/python/samples/02-agents/providers/openai/chat_completion_client_basic.py @@ -0,0 +1,85 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import os +from random import randint +from typing import Annotated + +from agent_framework import Agent, tool +from agent_framework.openai import OpenAIChatCompletionClient +from dotenv import load_dotenv +from pydantic import Field + +# Load environment variables from .env file +load_dotenv() + +""" +OpenAI Chat Completion Client Basic Example + +This sample demonstrates basic usage of OpenAIChatCompletionClient with explicit model and +API key settings, showing both streaming and non-streaming responses. +""" + + +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production. +@tool(approval_mode="never_require") +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +async def non_streaming_example() -> None: + """Example of non-streaming response (get the complete result at once).""" + print("=== Non-streaming Response Example ===") + + agent = Agent( + client=OpenAIChatCompletionClient( + model="gpt-5.4-nano", + api_key=os.getenv("OPENAI_API_KEY"), + ), + name="WeatherAgent", + instructions="You are a helpful weather agent.", + tools=get_weather, + ) + + query = "What's the weather like in Seattle?" + print(f"User: {query}") + result = await agent.run(query) + print(f"Result: {result}\n") + + +async def streaming_example() -> None: + """Example of streaming response (get results as they are generated).""" + print("=== Streaming Response Example ===") + + agent = Agent( + client=OpenAIChatCompletionClient( + model="gpt-5.4-nano", + api_key=os.getenv("OPENAI_API_KEY"), + ), + name="WeatherAgent", + instructions="You are a helpful weather agent.", + tools=get_weather, + ) + + query = "What's the weather like in Portland?" + print(f"User: {query}") + print("Agent: ", end="", flush=True) + async for chunk in agent.run(query, stream=True): + if chunk.text: + print(chunk.text, end="", flush=True) + print("\n") + + +async def main() -> None: + print("=== Basic OpenAI Chat Completion Client Agent Example ===") + + await non_streaming_example() + await streaming_example() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_explicit_settings.py b/python/samples/02-agents/providers/openai/chat_completion_client_with_explicit_settings.py similarity index 74% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_explicit_settings.py rename to python/samples/02-agents/providers/openai/chat_completion_client_with_explicit_settings.py index 20a3f720d1..59d11a4fda 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_explicit_settings.py +++ b/python/samples/02-agents/providers/openai/chat_completion_client_with_explicit_settings.py @@ -6,7 +6,7 @@ from typing import Annotated from agent_framework import Agent, tool -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatCompletionClient from dotenv import load_dotenv from pydantic import Field @@ -14,9 +14,9 @@ load_dotenv() """ -OpenAI Responses Client with Explicit Settings Example +OpenAI Chat Completion Client with Explicit Settings Example -This sample demonstrates creating OpenAI Responses Client with explicit configuration +This sample demonstrates creating OpenAI Chat Completion Client with explicit configuration settings rather than relying on environment variable defaults. """ @@ -34,15 +34,13 @@ def get_weather( async def main() -> None: - print("=== OpenAI Responses Client with Explicit Settings ===") - - _client = OpenAIResponsesClient( - model=os.environ["OPENAI_MODEL"], - api_key=os.environ["OPENAI_API_KEY"], - ) + print("=== OpenAI Chat Completion Client with Explicit Settings ===") agent = Agent( - client=_client, + client=OpenAIChatCompletionClient( + model=os.environ["OPENAI_MODEL"], + api_key=os.environ["OPENAI_API_KEY"], + ), instructions="You are a helpful weather agent.", tools=get_weather, ) diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_function_tools.py b/python/samples/02-agents/providers/openai/chat_completion_client_with_function_tools.py similarity index 91% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_function_tools.py rename to python/samples/02-agents/providers/openai/chat_completion_client_with_function_tools.py index 55f0ed9e19..7bbc6b9744 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_function_tools.py +++ b/python/samples/02-agents/providers/openai/chat_completion_client_with_function_tools.py @@ -6,7 +6,7 @@ from typing import Annotated from agent_framework import Agent, tool -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatCompletionClient from dotenv import load_dotenv from pydantic import Field @@ -14,9 +14,9 @@ load_dotenv() """ -OpenAI Responses Client with Function Tools Example +OpenAI Chat Completion Client with Function Tools Example -This sample demonstrates function tool integration with OpenAI Responses Client, +This sample demonstrates function tool integration with OpenAI Chat Completion Client, showing both agent-level and query-level tool configuration patterns. """ @@ -47,7 +47,7 @@ async def tools_on_agent_level() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime agent = Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatCompletionClient(), instructions="You are a helpful assistant that can provide weather and time information.", tools=[get_weather, get_time], # Tools defined at agent creation ) @@ -77,7 +77,7 @@ async def tools_on_run_level() -> None: # Agent created without tools agent = Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatCompletionClient(), instructions="You are a helpful assistant.", # No tools defined here ) @@ -107,7 +107,7 @@ async def mixed_tools_example() -> None: # Agent created with some base tools agent = Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatCompletionClient(), instructions="You are a comprehensive assistant that can help with various information requests.", tools=[get_weather], # Base tool available for all queries ) @@ -125,7 +125,7 @@ async def mixed_tools_example() -> None: async def main() -> None: - print("=== OpenAI Responses Client Agent with Function Tools Examples ===\n") + print("=== OpenAI Chat Completion Client Agent with Function Tools Examples ===\n") await tools_on_agent_level() await tools_on_run_level() diff --git a/python/samples/02-agents/providers/openai/openai_chat_client_with_local_mcp.py b/python/samples/02-agents/providers/openai/chat_completion_client_with_local_mcp.py similarity index 88% rename from python/samples/02-agents/providers/openai/openai_chat_client_with_local_mcp.py rename to python/samples/02-agents/providers/openai/chat_completion_client_with_local_mcp.py index 00057dd76d..1c2387dc24 100644 --- a/python/samples/02-agents/providers/openai/openai_chat_client_with_local_mcp.py +++ b/python/samples/02-agents/providers/openai/chat_completion_client_with_local_mcp.py @@ -3,17 +3,17 @@ import asyncio from agent_framework import Agent, MCPStreamableHTTPTool -from agent_framework.openai import OpenAIChatClient +from agent_framework.openai import OpenAIChatCompletionClient from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Chat Client with Local MCP Example +OpenAI Chat Completion Client with Local MCP Example This sample demonstrates integrating Model Context Protocol (MCP) tools with -OpenAI Chat Client for extended functionality and external service access. +OpenAI Chat Completion Client for extended functionality and external service access. The Agent Framework now supports enhanced metadata extraction from MCP tool results, including error states, token usage, costs, and other arbitrary @@ -34,7 +34,7 @@ async def mcp_tools_on_run_level() -> None: url="https://learn.microsoft.com/api/mcp", ) as mcp_server, Agent( - client=OpenAIChatClient(), + client=OpenAIChatCompletionClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", ) as agent, @@ -60,7 +60,7 @@ async def mcp_tools_on_agent_level() -> None: # The agent can use these tools for any query during its lifetime # The agent will connect to the MCP server through its context manager. async with Agent( - client=OpenAIChatClient(), + client=OpenAIChatCompletionClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", tools=MCPStreamableHTTPTool( # Tools defined at agent creation @@ -82,7 +82,7 @@ async def mcp_tools_on_agent_level() -> None: async def main() -> None: - print("=== OpenAI Chat Client Agent with MCP Tools Examples ===\n") + print("=== OpenAI Chat Completion Client Agent with MCP Tools Examples ===\n") await mcp_tools_on_agent_level() await mcp_tools_on_run_level() diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_runtime_json_schema.py b/python/samples/02-agents/providers/openai/chat_completion_client_with_runtime_json_schema.py similarity index 89% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_runtime_json_schema.py rename to python/samples/02-agents/providers/openai/chat_completion_client_with_runtime_json_schema.py index cdc4ce13fb..4cf5dfb844 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_runtime_json_schema.py +++ b/python/samples/02-agents/providers/openai/chat_completion_client_with_runtime_json_schema.py @@ -4,14 +4,14 @@ import json from agent_framework import Agent -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatCompletionClient, OpenAIChatOptions from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Chat Client Runtime JSON Schema Example +OpenAI Chat Completion Client Runtime JSON Schema Example Demonstrates structured outputs when the schema is only known at runtime. Uses additional_chat_options to pass a JSON Schema payload directly to OpenAI @@ -38,7 +38,7 @@ async def non_streaming_example() -> None: print("=== Non-streaming runtime JSON schema example ===") agent = Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatCompletionClient[OpenAIChatOptions](), name="RuntimeSchemaAgent", instructions="Return only JSON that matches the provided schema. Do not add commentary.", ) @@ -72,7 +72,7 @@ async def streaming_example() -> None: print("=== Streaming runtime JSON schema example ===") agent = Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatCompletionClient(), name="RuntimeSchemaAgent", instructions="Return only JSON that matches the provided schema. Do not add commentary.", ) @@ -108,7 +108,7 @@ async def streaming_example() -> None: async def main() -> None: - print("=== OpenAI Chat Client with runtime JSON Schema ===") + print("=== OpenAI Chat Completion Client with runtime JSON Schema ===") await non_streaming_example() await streaming_example() diff --git a/python/samples/02-agents/providers/openai/openai_chat_client_with_session.py b/python/samples/02-agents/providers/openai/chat_completion_client_with_session.py similarity index 91% rename from python/samples/02-agents/providers/openai/openai_chat_client_with_session.py rename to python/samples/02-agents/providers/openai/chat_completion_client_with_session.py index 773b18b3cc..99aac09e36 100644 --- a/python/samples/02-agents/providers/openai/openai_chat_client_with_session.py +++ b/python/samples/02-agents/providers/openai/chat_completion_client_with_session.py @@ -5,7 +5,7 @@ from typing import Annotated from agent_framework import Agent, AgentSession, InMemoryHistoryProvider, tool -from agent_framework.openai import OpenAIChatClient +from agent_framework.openai import OpenAIChatCompletionClient from dotenv import load_dotenv from pydantic import Field @@ -13,9 +13,9 @@ load_dotenv() """ -OpenAI Chat Client with Session Management Example +OpenAI Chat Completion Client with Session Management Example -This sample demonstrates session management with OpenAI Chat Client, showing +This sample demonstrates session management with OpenAI Chat Completion Client, showing conversation sessions and message history preservation across interactions. """ @@ -37,7 +37,7 @@ async def example_with_automatic_session_creation() -> None: print("=== Automatic Session Creation Example ===") agent = Agent( - client=OpenAIChatClient(), + client=OpenAIChatCompletionClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -62,7 +62,7 @@ async def example_with_session_persistence() -> None: print("Using the same session across multiple conversations to maintain context.\n") agent = Agent( - client=OpenAIChatClient(), + client=OpenAIChatCompletionClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -95,7 +95,7 @@ async def example_with_existing_session_messages() -> None: print("=== Existing Session Messages Example ===") agent = Agent( - client=OpenAIChatClient(), + client=OpenAIChatCompletionClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -118,7 +118,7 @@ async def example_with_existing_session_messages() -> None: # Create a new agent instance but use the existing session with its message history new_agent = Agent( - client=OpenAIChatClient(), + client=OpenAIChatCompletionClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -142,7 +142,7 @@ async def example_with_existing_session_messages() -> None: async def main() -> None: - print("=== OpenAI Chat Client Agent Session Management Examples ===\n") + print("=== OpenAI Chat Completion Client Agent Session Management Examples ===\n") await example_with_automatic_session_creation() await example_with_session_persistence() diff --git a/python/samples/02-agents/providers/openai/openai_chat_client_with_web_search.py b/python/samples/02-agents/providers/openai/chat_completion_client_with_web_search.py similarity index 86% rename from python/samples/02-agents/providers/openai/openai_chat_client_with_web_search.py rename to python/samples/02-agents/providers/openai/chat_completion_client_with_web_search.py index 623dc25f43..f5da9ba633 100644 --- a/python/samples/02-agents/providers/openai/openai_chat_client_with_web_search.py +++ b/python/samples/02-agents/providers/openai/chat_completion_client_with_web_search.py @@ -3,22 +3,22 @@ import asyncio from agent_framework import Agent -from agent_framework.openai import OpenAIChatClient +from agent_framework.openai import OpenAIChatCompletionClient from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Chat Client with Web Search Example +OpenAI Chat Completion Client with Web Search Example -This sample demonstrates using get_web_search_tool() with OpenAI Chat Client +This sample demonstrates using get_web_search_tool() with OpenAI Chat Completion Client for real-time information retrieval and current data access. """ async def main() -> None: - client = OpenAIChatClient(model="gpt-4o-search-preview") + client = OpenAIChatCompletionClient(model="gpt-4o-search-preview") # Create web search tool with location context web_search_tool = client.get_web_search_tool( diff --git a/python/samples/02-agents/providers/openai/openai_chat_client_basic.py b/python/samples/02-agents/providers/openai/client_basic.py similarity index 73% rename from python/samples/02-agents/providers/openai/openai_chat_client_basic.py rename to python/samples/02-agents/providers/openai/client_basic.py index d2834fe1e9..138f1e57b6 100644 --- a/python/samples/02-agents/providers/openai/openai_chat_client_basic.py +++ b/python/samples/02-agents/providers/openai/client_basic.py @@ -1,12 +1,14 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from random import randint from typing import Annotated from agent_framework import Agent, tool from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv +from pydantic import Field # Load environment variables from .env file load_dotenv() @@ -14,17 +16,15 @@ """ OpenAI Chat Client Basic Example -This sample demonstrates basic usage of OpenAIChatClient for direct chat-based -interactions, showing both streaming and non-streaming responses. +This sample demonstrates basic usage of OpenAIChatClient with explicit model and +API key settings, showing both streaming and non-streaming responses. """ -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; -# see samples/02-agents/tools/function_tool_with_approval.py -# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production. @tool(approval_mode="never_require") def get_weather( - location: Annotated[str, "The location to get the weather for."], + location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: """Get the weather for a given location.""" conditions = ["sunny", "cloudy", "rainy", "stormy"] @@ -36,13 +36,16 @@ async def non_streaming_example() -> None: print("=== Non-streaming Response Example ===") agent = Agent( - client=OpenAIChatClient(), + client=OpenAIChatClient( + model="gpt-5.4-nano", + api_key=os.getenv("OPENAI_API_KEY"), + ), name="WeatherAgent", instructions="You are a helpful weather agent.", tools=get_weather, ) - query = "What's the weather like in Seattle?" + query = "What's the weather in Seattle?" print(f"User: {query}") result = await agent.run(query) print(f"Result: {result}\n") @@ -53,13 +56,16 @@ async def streaming_example() -> None: print("=== Streaming Response Example ===") agent = Agent( - client=OpenAIChatClient(), + client=OpenAIChatClient( + model="gpt-5.4-nano", + api_key=os.getenv("OPENAI_API_KEY"), + ), name="WeatherAgent", instructions="You are a helpful weather agent.", tools=get_weather, ) - query = "What's the weather like in Portland?" + query = "What's the weather in Portland?" print(f"User: {query}") print("Agent: ", end="", flush=True) async for chunk in agent.run(query, stream=True): diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_image_analysis.py b/python/samples/02-agents/providers/openai/client_image_analysis.py similarity index 70% rename from python/samples/02-agents/providers/openai/openai_responses_client_image_analysis.py rename to python/samples/02-agents/providers/openai/client_image_analysis.py index 82fee38455..00c0207528 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_image_analysis.py +++ b/python/samples/02-agents/providers/openai/client_image_analysis.py @@ -3,26 +3,26 @@ import asyncio from agent_framework import Agent, Content -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Responses Client Image Analysis Example +OpenAI Chat Client Image Analysis Example -This sample demonstrates using OpenAI Responses Client for image analysis and vision tasks, +This sample demonstrates using OpenAI Chat Client for image analysis and vision tasks, showing multi-modal content handling with text and images. """ async def main(): - print("=== OpenAI Responses Agent with Image Analysis ===") + print("=== OpenAI Chat Client Agent with Image Analysis ===") - # 1. Create an OpenAI Responses agent with vision capabilities + # 1. Create an OpenAI Chat agent with vision capabilities agent = Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatClient(), name="VisionAgent", instructions="You are a image analysist, you get a image and need to respond with what you see in the picture.", ) diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_image_generation.py b/python/samples/02-agents/providers/openai/client_image_generation.py similarity index 90% rename from python/samples/02-agents/providers/openai/openai_responses_client_image_generation.py rename to python/samples/02-agents/providers/openai/client_image_generation.py index 6e01a4dbbd..84e50674d4 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_image_generation.py +++ b/python/samples/02-agents/providers/openai/client_image_generation.py @@ -7,17 +7,17 @@ from pathlib import Path from agent_framework import Agent, Content -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Responses Client Image Generation Example +OpenAI Chat Client Image Generation Example This sample demonstrates how to generate images using OpenAI's DALL-E models -through the Responses Client. Image generation capabilities enable AI to create visual content from text, +through the Chat Client. Image generation capabilities enable AI to create visual content from text, making it ideal for creative applications, content creation, design prototyping, and automated visual asset generation. """ @@ -57,10 +57,10 @@ def save_image(output: Content) -> None: async def main() -> None: - print("=== OpenAI Responses Image Generation Agent Example ===") + print("=== OpenAI Chat Image Generation Agent Example ===") # Create an agent with customized image generation options - client = OpenAIResponsesClient() + client = OpenAIChatClient() agent = Agent( client=client, instructions="You are a helpful AI that can generate images.", diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_reasoning.py b/python/samples/02-agents/providers/openai/client_reasoning.py similarity index 91% rename from python/samples/02-agents/providers/openai/openai_responses_client_reasoning.py rename to python/samples/02-agents/providers/openai/client_reasoning.py index a4fc3849b8..2eea8d2106 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_reasoning.py +++ b/python/samples/02-agents/providers/openai/client_reasoning.py @@ -3,14 +3,14 @@ import asyncio from agent_framework import Agent -from agent_framework.openai import OpenAIResponsesClient, OpenAIResponsesOptions +from agent_framework.openai import OpenAIChatClient, OpenAIChatOptions from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Responses Client Reasoning Example +OpenAI Chat Client Reasoning Example This sample demonstrates advanced reasoning capabilities using OpenAI's gpt-5 models, showing step-by-step reasoning process visualization and complex problem-solving. @@ -25,7 +25,7 @@ agent = Agent( - client=OpenAIResponsesClient[OpenAIResponsesOptions](model_id="gpt-5"), + client=OpenAIChatClient[OpenAIChatOptions](model_id="gpt-5"), name="MathHelper", instructions="You are a personal math tutor. When asked a math question, " "reason over how best to approach the problem and share your thought process.", @@ -76,7 +76,7 @@ async def streaming_reasoning_example() -> None: async def main() -> None: - print("\033[92m=== Basic OpenAI Responses Reasoning Agent Example ===\033[0m") + print("\033[92m=== Basic OpenAI Chat Reasoning Agent Example ===\033[0m") await reasoning_example() await streaming_reasoning_example() diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_streaming_image_generation.py b/python/samples/02-agents/providers/openai/client_streaming_image_generation.py similarity index 96% rename from python/samples/02-agents/providers/openai/openai_responses_client_streaming_image_generation.py rename to python/samples/02-agents/providers/openai/client_streaming_image_generation.py index 7aafd6f704..412e6f8e6a 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_streaming_image_generation.py +++ b/python/samples/02-agents/providers/openai/client_streaming_image_generation.py @@ -7,12 +7,12 @@ import anyio from agent_framework import Agent, Content -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() -"""OpenAI Responses Client Streaming Image Generation Example +"""OpenAI Chat Client Streaming Image Generation Example Demonstrates streaming partial image generation using OpenAI's image generation tool. Shows progressive image rendering with partial images for improved user experience. Note: The number of partial images received depends on generation speed: @@ -42,7 +42,7 @@ async def main(): """Demonstrate streaming image generation with partial images.""" print("=== OpenAI Streaming Image Generation Example ===\n") # Create agent with streaming image generation enabled - client = OpenAIResponsesClient() + client = OpenAIChatClient() agent = Agent( client=client, instructions="You are a helpful agent that can generate images.", diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_agent_as_tool.py b/python/samples/02-agents/providers/openai/client_with_agent_as_tool.py similarity index 90% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_agent_as_tool.py rename to python/samples/02-agents/providers/openai/client_with_agent_as_tool.py index 567c7fcaef..d8a991242d 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_agent_as_tool.py +++ b/python/samples/02-agents/providers/openai/client_with_agent_as_tool.py @@ -4,14 +4,14 @@ from collections.abc import Awaitable, Callable from agent_framework import Agent, FunctionInvocationContext -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Responses Client Agent-as-Tool Example +OpenAI Chat Client Agent-as-Tool Example Demonstrates hierarchical agent architectures where one agent delegates work to specialized sub-agents wrapped as tools using as_tool(). @@ -35,9 +35,9 @@ async def logging_middleware( async def main() -> None: - print("=== OpenAI Responses Client Agent-as-Tool Pattern ===") + print("=== OpenAI Chat Client Agent-as-Tool Pattern ===") - client = OpenAIResponsesClient() + client = OpenAIChatClient() # Create a specialized writer agent writer = Agent( diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_code_interpreter.py b/python/samples/02-agents/providers/openai/client_with_code_interpreter.py similarity index 85% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_code_interpreter.py rename to python/samples/02-agents/providers/openai/client_with_code_interpreter.py index c3a8eba82a..f318c29419 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_code_interpreter.py +++ b/python/samples/02-agents/providers/openai/client_with_code_interpreter.py @@ -6,25 +6,25 @@ Agent, Content, ) -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Responses Client with Code Interpreter Example +OpenAI Chat Client with Code Interpreter Example -This sample demonstrates using get_code_interpreter_tool() with OpenAI Responses Client +This sample demonstrates using get_code_interpreter_tool() with OpenAI Chat Client for Python code execution and mathematical problem solving. """ async def main() -> None: - """Example showing how to use the code interpreter tool with OpenAI Responses.""" - print("=== OpenAI Responses Agent with Code Interpreter Example ===") + """Example showing how to use the code interpreter tool with OpenAI Chat.""" + print("=== OpenAI Chat Client Agent with Code Interpreter Example ===") - client = OpenAIResponsesClient() + client = OpenAIChatClient() agent = Agent( client=client, instructions="You are a helpful assistant that can write and execute Python code to solve problems.", diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_code_interpreter_files.py b/python/samples/02-agents/providers/openai/client_with_code_interpreter_files.py similarity index 92% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_code_interpreter_files.py rename to python/samples/02-agents/providers/openai/client_with_code_interpreter_files.py index 1636a22912..c286feff10 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_code_interpreter_files.py +++ b/python/samples/02-agents/providers/openai/client_with_code_interpreter_files.py @@ -5,7 +5,7 @@ import tempfile from agent_framework import Agent -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv from openai import AsyncOpenAI @@ -13,9 +13,9 @@ load_dotenv() """ -OpenAI Responses Client with Code Interpreter and Files Example +OpenAI Chat Client with Code Interpreter and Files Example -This sample demonstrates using get_code_interpreter_tool() with OpenAI Responses Client +This sample demonstrates using get_code_interpreter_tool() with OpenAI Chat Client for Python code execution and data analysis with uploaded files. """ @@ -69,8 +69,8 @@ async def main() -> None: temp_file_path, file_id = await create_sample_file_and_upload(openai_client) - # Create agent using OpenAI Responses client - client = OpenAIResponsesClient() + # Create agent using OpenAI Chat client + client = OpenAIChatClient() agent = Agent( client=client, instructions="You are a helpful assistant that can analyze data files using Python code.", diff --git a/python/samples/02-agents/providers/openai/openai_chat_client_with_explicit_settings.py b/python/samples/02-agents/providers/openai/client_with_explicit_settings.py similarity index 100% rename from python/samples/02-agents/providers/openai/openai_chat_client_with_explicit_settings.py rename to python/samples/02-agents/providers/openai/client_with_explicit_settings.py diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_file_search.py b/python/samples/02-agents/providers/openai/client_with_file_search.py similarity index 85% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_file_search.py rename to python/samples/02-agents/providers/openai/client_with_file_search.py index b6c9ac352f..042d888dff 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_file_search.py +++ b/python/samples/02-agents/providers/openai/client_with_file_search.py @@ -3,23 +3,23 @@ import asyncio from agent_framework import Agent -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Responses Client with File Search Example +OpenAI Chat Client with File Search Example -This sample demonstrates using get_file_search_tool() with OpenAI Responses Client +This sample demonstrates using get_file_search_tool() with OpenAI Chat Client for direct document-based question answering and information retrieval. """ # Helper functions -async def create_vector_store(client: OpenAIResponsesClient) -> tuple[str, str]: +async def create_vector_store(client: OpenAIChatClient) -> tuple[str, str]: """Create a vector store with sample documents.""" file = await client.client.files.create( file=("todays_weather.txt", b"The weather today is sunny with a high of 75F."), purpose="user_data" @@ -35,14 +35,14 @@ async def create_vector_store(client: OpenAIResponsesClient) -> tuple[str, str]: return file.id, vector_store.id -async def delete_vector_store(client: OpenAIResponsesClient, file_id: str, vector_store_id: str) -> None: +async def delete_vector_store(client: OpenAIChatClient, file_id: str, vector_store_id: str) -> None: """Delete the vector store after using it.""" await client.client.vector_stores.delete(vector_store_id=vector_store_id) await client.client.files.delete(file_id=file_id) async def main() -> None: - client = OpenAIResponsesClient() + client = OpenAIChatClient() message = "What is the weather today? Do a file search to find the answer." diff --git a/python/samples/02-agents/providers/openai/openai_chat_client_with_function_tools.py b/python/samples/02-agents/providers/openai/client_with_function_tools.py similarity index 100% rename from python/samples/02-agents/providers/openai/openai_chat_client_with_function_tools.py rename to python/samples/02-agents/providers/openai/client_with_function_tools.py diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_hosted_mcp.py b/python/samples/02-agents/providers/openai/client_with_hosted_mcp.py similarity index 95% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_hosted_mcp.py rename to python/samples/02-agents/providers/openai/client_with_hosted_mcp.py index 45e7ae736a..ffcdadb8da 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_hosted_mcp.py +++ b/python/samples/02-agents/providers/openai/client_with_hosted_mcp.py @@ -4,7 +4,7 @@ from typing import TYPE_CHECKING, Any from agent_framework import Agent -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv if TYPE_CHECKING: @@ -14,10 +14,10 @@ load_dotenv() """ -OpenAI Responses Client with Hosted MCP Example +OpenAI Chat Client with Hosted MCP Example This sample demonstrates integrating hosted Model Context Protocol (MCP) tools with -OpenAI Responses Client, including user approval workflows for function call security. +OpenAI Chat Client, including user approval workflows for function call security. """ @@ -102,7 +102,7 @@ async def run_hosted_mcp_without_session_and_specific_approval() -> None: """Example showing Mcp Tools with approvals without using a session.""" print("=== Mcp with approvals and without session ===") - client = OpenAIResponsesClient() + client = OpenAIChatClient() # Create MCP tool with specific approval mode mcp_tool = client.get_mcp_tool( name="Microsoft Learn MCP", @@ -135,7 +135,7 @@ async def run_hosted_mcp_without_approval() -> None: """Example showing Mcp Tools without approvals.""" print("=== Mcp without approvals ===") - client = OpenAIResponsesClient() + client = OpenAIChatClient() # Create MCP tool that never requires approval mcp_tool = client.get_mcp_tool( name="Microsoft Learn MCP", @@ -167,7 +167,7 @@ async def run_hosted_mcp_with_session() -> None: """Example showing Mcp Tools with approvals using a session.""" print("=== Mcp with approvals and with session ===") - client = OpenAIResponsesClient() + client = OpenAIChatClient() # Create MCP tool that always requires approval mcp_tool = client.get_mcp_tool( name="Microsoft Learn MCP", @@ -200,7 +200,7 @@ async def run_hosted_mcp_with_session_streaming() -> None: """Example showing Mcp Tools with approvals using a session.""" print("=== Mcp with approvals and with session ===") - client = OpenAIResponsesClient() + client = OpenAIChatClient() # Create MCP tool that always requires approval mcp_tool = client.get_mcp_tool( name="Microsoft Learn MCP", @@ -234,7 +234,7 @@ async def run_hosted_mcp_with_session_streaming() -> None: async def main() -> None: - print("=== OpenAI Responses Client Agent with Hosted Mcp Tools Examples ===\n") + print("=== OpenAI Chat Client Agent with Hosted Mcp Tools Examples ===\n") await run_hosted_mcp_without_approval() await run_hosted_mcp_without_session_and_specific_approval() diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_local_mcp.py b/python/samples/02-agents/providers/openai/client_with_local_mcp.py similarity index 90% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_local_mcp.py rename to python/samples/02-agents/providers/openai/client_with_local_mcp.py index 8f136021bb..f7b14a24b2 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_local_mcp.py +++ b/python/samples/02-agents/providers/openai/client_with_local_mcp.py @@ -3,17 +3,17 @@ import asyncio from agent_framework import Agent, MCPStreamableHTTPTool -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Responses Client with Local MCP Example +OpenAI Chat Client with Local MCP Example This sample demonstrates integrating local Model Context Protocol (MCP) tools with -OpenAI Responses Client for direct response generation with external capabilities. +OpenAI Chat Client for direct response generation with external capabilities. """ @@ -27,7 +27,7 @@ async def streaming_with_mcp(show_raw_stream: bool = False) -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime async with Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", tools=MCPStreamableHTTPTool( # Tools defined at agent creation @@ -65,7 +65,7 @@ async def run_with_mcp() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime async with Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", tools=MCPStreamableHTTPTool( # Tools defined at agent creation @@ -87,7 +87,7 @@ async def run_with_mcp() -> None: async def main() -> None: - print("=== OpenAI Responses Client Agent with Function Tools Examples ===\n") + print("=== OpenAI Chat Client Agent with Function Tools Examples ===\n") await run_with_mcp() await streaming_with_mcp() diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_local_shell.py b/python/samples/02-agents/providers/openai/client_with_local_shell.py similarity index 96% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_local_shell.py rename to python/samples/02-agents/providers/openai/client_with_local_shell.py index b3135702a7..f4829cc5e7 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_local_shell.py +++ b/python/samples/02-agents/providers/openai/client_with_local_shell.py @@ -5,14 +5,14 @@ from typing import Any from agent_framework import Agent, Message, tool -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Responses Client with Local Shell Tool Example +OpenAI Chat Client with Local Shell Tool Example This sample demonstrates implementing a local shell tool using get_shell_tool(func=...) that wraps Python's subprocess module. Unlike the hosted shell tool (get_shell_tool()), @@ -53,7 +53,7 @@ async def main() -> None: print("=== OpenAI Agent with Local Shell Tool Example ===") print("NOTE: Commands will execute on your local machine.\n") - client = OpenAIResponsesClient() + client = OpenAIChatClient() local_shell_tool = client.get_shell_tool( func=run_bash, ) diff --git a/python/samples/02-agents/providers/openai/openai_chat_client_with_runtime_json_schema.py b/python/samples/02-agents/providers/openai/client_with_runtime_json_schema.py similarity index 95% rename from python/samples/02-agents/providers/openai/openai_chat_client_with_runtime_json_schema.py rename to python/samples/02-agents/providers/openai/client_with_runtime_json_schema.py index ba21d0a325..3fcffeae6c 100644 --- a/python/samples/02-agents/providers/openai/openai_chat_client_with_runtime_json_schema.py +++ b/python/samples/02-agents/providers/openai/client_with_runtime_json_schema.py @@ -4,7 +4,7 @@ import json from agent_framework import Agent -from agent_framework.openai import OpenAIChatClient, OpenAIChatOptions +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv # Load environment variables from .env file @@ -38,7 +38,7 @@ async def non_streaming_example() -> None: print("=== Non-streaming runtime JSON schema example ===") agent = Agent( - client=OpenAIChatClient[OpenAIChatOptions](), + client=OpenAIChatClient(), name="RuntimeSchemaAgent", instructions="Return only JSON that matches the provided schema. Do not add commentary.", ) diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_session.py b/python/samples/02-agents/providers/openai/client_with_session.py similarity index 93% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_session.py rename to python/samples/02-agents/providers/openai/client_with_session.py index e62c3bdaea..0dffeaef6c 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_session.py +++ b/python/samples/02-agents/providers/openai/client_with_session.py @@ -5,7 +5,7 @@ from typing import Annotated from agent_framework import Agent, AgentSession, tool -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv from pydantic import Field @@ -13,9 +13,9 @@ load_dotenv() """ -OpenAI Responses Client with Session Management Example +OpenAI Chat Client with Session Management Example -This sample demonstrates session management with OpenAI Responses Client, showing +This sample demonstrates session management with OpenAI Chat Client, showing persistent conversation context and simplified response handling. """ @@ -37,7 +37,7 @@ async def example_with_automatic_session_creation() -> None: print("=== Automatic Session Creation Example ===") agent = Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -64,7 +64,7 @@ async def example_with_session_persistence_in_memory() -> None: print("=== Session Persistence Example (In-Memory) ===") agent = Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -103,7 +103,7 @@ async def example_with_existing_session_id() -> None: existing_session_id = None agent = Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -124,7 +124,7 @@ async def example_with_existing_session_id() -> None: print("\n--- Continuing with the same session ID in a new agent instance ---") agent = Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_shell.py b/python/samples/02-agents/providers/openai/client_with_shell.py similarity index 82% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_shell.py rename to python/samples/02-agents/providers/openai/client_with_shell.py index b86f36fde5..5043d8e4a1 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_shell.py +++ b/python/samples/02-agents/providers/openai/client_with_shell.py @@ -3,16 +3,16 @@ import asyncio from agent_framework import Agent -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Responses Client with Shell Tool Example +OpenAI Chat Client with Shell Tool Example -This sample demonstrates using get_shell_tool() with OpenAI Responses Client +This sample demonstrates using get_shell_tool() with OpenAI Chat Client for executing shell commands in a managed container environment hosted by OpenAI. The shell tool allows the model to run commands like listing files, running scripts, @@ -21,10 +21,10 @@ async def main() -> None: - """Example showing how to use the shell tool with OpenAI Responses.""" - print("=== OpenAI Responses Agent with Shell Tool Example ===") + """Example showing how to use the shell tool with OpenAI Chat.""" + print("=== OpenAI Chat Client Agent with Shell Tool Example ===") - client = OpenAIResponsesClient() + client = OpenAIChatClient() # Create a hosted shell tool with the default auto container environment shell_tool = client.get_shell_tool() diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_structured_output.py b/python/samples/02-agents/providers/openai/client_with_structured_output.py similarity index 87% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_structured_output.py rename to python/samples/02-agents/providers/openai/client_with_structured_output.py index d2599c0bd8..57bade4412 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_structured_output.py +++ b/python/samples/02-agents/providers/openai/client_with_structured_output.py @@ -3,7 +3,7 @@ import asyncio from agent_framework import Agent, AgentResponse -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv from pydantic import BaseModel @@ -11,9 +11,9 @@ load_dotenv() """ -OpenAI Responses Client with Structured Output Example +OpenAI Chat Client with Structured Output Example -This sample demonstrates using structured output capabilities with OpenAI Responses Client, +This sample demonstrates using structured output capabilities with OpenAI Chat Client, showing Pydantic model integration for type-safe response parsing and data extraction. """ @@ -28,9 +28,9 @@ class OutputStruct(BaseModel): async def non_streaming_example() -> None: print("=== Non-streaming example ===") - # Create an OpenAI Responses agent + # Create an OpenAI Chat agent agent = Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatClient(), name="CityAgent", instructions="You are a helpful agent that describes cities in a structured format.", ) @@ -54,9 +54,9 @@ async def non_streaming_example() -> None: async def streaming_example() -> None: print("=== Streaming example ===") - # Create an OpenAI Responses agent + # Create an OpenAI Chat agent agent = Agent( - client=OpenAIResponsesClient(), + client=OpenAIChatClient(), name="CityAgent", instructions="You are a helpful agent that describes cities in a structured format.", ) @@ -82,7 +82,7 @@ async def streaming_example() -> None: async def main() -> None: - print("=== OpenAI Responses Agent with Structured Output ===") + print("=== OpenAI Chat Client Agent with Structured Output ===") await non_streaming_example() await streaming_example() diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_with_web_search.py b/python/samples/02-agents/providers/openai/client_with_web_search.py similarity index 88% rename from python/samples/02-agents/providers/openai/openai_responses_client_with_web_search.py rename to python/samples/02-agents/providers/openai/client_with_web_search.py index 9f22807ba9..d0bab5a87a 100644 --- a/python/samples/02-agents/providers/openai/openai_responses_client_with_web_search.py +++ b/python/samples/02-agents/providers/openai/client_with_web_search.py @@ -3,22 +3,22 @@ import asyncio from agent_framework import Agent -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIChatClient from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -OpenAI Responses Client with Web Search Example +OpenAI Chat Client with Web Search Example -This sample demonstrates using get_web_search_tool() with OpenAI Responses Client +This sample demonstrates using get_web_search_tool() with OpenAI Chat Client for direct real-time information retrieval and current data access. """ async def main() -> None: - client = OpenAIResponsesClient() + client = OpenAIChatClient() # Create web search tool with location context web_search_tool = client.get_web_search_tool( diff --git a/python/samples/02-agents/providers/openai/openai_assistants_basic.py b/python/samples/02-agents/providers/openai/openai_assistants_basic.py deleted file mode 100644 index 5901b6ef38..0000000000 --- a/python/samples/02-agents/providers/openai/openai_assistants_basic.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os -from random import randint -from typing import Annotated - -from agent_framework import tool -from agent_framework.openai import OpenAIAssistantProvider -from dotenv import load_dotenv -from openai import AsyncOpenAI -from pydantic import Field - -# Load environment variables from .env file -load_dotenv() - -""" -OpenAI Assistants Basic Example - -This sample demonstrates basic usage of OpenAIAssistantProvider with automatic -assistant lifecycle management, showing both streaming and non-streaming responses. -""" - - -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; -# see samples/02-agents/tools/function_tool_with_approval.py -# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. -@tool(approval_mode="never_require") -def get_weather( - location: Annotated[str, Field(description="The location to get the weather for.")], -) -> str: - """Get the weather for a given location.""" - conditions = ["sunny", "cloudy", "rainy", "stormy"] - return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}C." - - -async def non_streaming_example() -> None: - """Example of non-streaming response (get the complete result at once).""" - print("=== Non-streaming Response Example ===") - - client = AsyncOpenAI() - provider = OpenAIAssistantProvider(client) - - # Create a new assistant via the provider - agent = await provider.create_agent( - name="WeatherAssistant", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a helpful weather agent.", - tools=[get_weather], - ) - - try: - query = "What's the weather like in Seattle?" - print(f"User: {query}") - result = await agent.run(query) - print(f"Agent: {result}\n") - finally: - # Clean up the assistant from OpenAI - await client.beta.assistants.delete(agent.id) - - -async def streaming_example() -> None: - """Example of streaming response (get results as they are generated).""" - print("=== Streaming Response Example ===") - - client = AsyncOpenAI() - provider = OpenAIAssistantProvider(client) - - # Create a new assistant via the provider - agent = await provider.create_agent( - name="WeatherAssistant", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a helpful weather agent.", - tools=[get_weather], - ) - - try: - query = "What's the weather like in Portland?" - print(f"User: {query}") - print("Agent: ", end="", flush=True) - async for chunk in agent.run(query, stream=True): - if chunk.text: - print(chunk.text, end="", flush=True) - print("\n") - finally: - # Clean up the assistant from OpenAI - await client.beta.assistants.delete(agent.id) - - -async def main() -> None: - print("=== Basic OpenAI Assistants Provider Example ===") - - await non_streaming_example() - await streaming_example() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/openai/openai_assistants_provider_methods.py b/python/samples/02-agents/providers/openai/openai_assistants_provider_methods.py deleted file mode 100644 index 0cc9d33f73..0000000000 --- a/python/samples/02-agents/providers/openai/openai_assistants_provider_methods.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os -from random import randint -from typing import Annotated - -from agent_framework import Agent, tool -from agent_framework.openai import OpenAIAssistantProvider -from dotenv import load_dotenv -from openai import AsyncOpenAI -from pydantic import Field - -# Load environment variables from .env file -load_dotenv() - -""" -OpenAI Assistant Provider Methods Example - -This sample demonstrates the methods available on the OpenAIAssistantProvider class: -- create_agent(): Create a new assistant on the service -- get_agent(): Retrieve an existing assistant by ID -- as_agent(): Wrap an SDK Assistant object without making HTTP calls -""" - - -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; -# see samples/02-agents/tools/function_tool_with_approval.py -# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. -@tool(approval_mode="never_require") -def get_weather( - location: Annotated[str, Field(description="The location to get the weather for.")], -) -> str: - """Get the weather for a given location.""" - conditions = ["sunny", "cloudy", "rainy", "stormy"] - return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}C." - - -async def create_agent_example() -> None: - """Create a new assistant using provider.create_agent().""" - print("\n--- create_agent() ---") - - async with ( - AsyncOpenAI() as client, - OpenAIAssistantProvider(client) as provider, - ): - agent = await provider.create_agent( - name="WeatherAssistant", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a helpful weather assistant.", - tools=[get_weather], - ) - - try: - print(f"Created: {agent.name} (ID: {agent.id})") - result = await agent.run("What's the weather in Seattle?") - print(f"Response: {result}") - finally: - await client.beta.assistants.delete(agent.id) - - -async def get_agent_example() -> None: - """Retrieve an existing assistant by ID using provider.get_agent().""" - print("\n--- get_agent() ---") - - async with ( - AsyncOpenAI() as client, - OpenAIAssistantProvider(client) as provider, - ): - # Create an assistant directly with SDK (simulating pre-existing assistant) - sdk_assistant = await client.beta.assistants.create( - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - name="ExistingAssistant", - instructions="You always respond with 'Hello!'", - ) - - try: - # Retrieve using provider - agent = await provider.get_agent(sdk_assistant.id) - print(f"Retrieved: {agent.name} (ID: {agent.id})") - - result = await agent.run("Hi there!") - print(f"Response: {result}") - finally: - await client.beta.assistants.delete(sdk_assistant.id) - - -async def as_agent_example() -> None: - """Wrap an SDK Assistant object using Agent(client=provider, ...).""" - print("\n--- as_agent() ---") - - async with ( - AsyncOpenAI() as client, - OpenAIAssistantProvider(client) as provider, - ): - # Create assistant using SDK - sdk_assistant = await client.beta.assistants.create( - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - name="WrappedAssistant", - instructions="You respond with poetry.", - ) - - try: - # Wrap synchronously (no HTTP call) - agent = Agent(client=provider, agent=sdk_assistant) - print(f"Wrapped: {agent.name} (ID: {agent.id})") - - result = await agent.run("Tell me about the sunset.") - print(f"Response: {result}") - finally: - await client.beta.assistants.delete(sdk_assistant.id) - - -async def multiple_agents_example() -> None: - """Create and manage multiple assistants with a single provider.""" - print("\n--- Multiple Agents ---") - - async with ( - AsyncOpenAI() as client, - OpenAIAssistantProvider(client) as provider, - ): - weather_agent = await provider.create_agent( - name="WeatherSpecialist", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a weather specialist.", - tools=[get_weather], - ) - - greeter_agent = await provider.create_agent( - name="GreeterAgent", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a friendly greeter.", - ) - - try: - print(f"Created: {weather_agent.name}, {greeter_agent.name}") - - greeting = await greeter_agent.run("Hello!") - print(f"Greeter: {greeting}") - - weather = await weather_agent.run("What's the weather in Tokyo?") - print(f"Weather: {weather}") - finally: - await client.beta.assistants.delete(weather_agent.id) - await client.beta.assistants.delete(greeter_agent.id) - - -async def main() -> None: - print("OpenAI Assistant Provider Methods") - - await create_agent_example() - await get_agent_example() - await as_agent_example() - await multiple_agents_example() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/openai/openai_assistants_with_code_interpreter.py b/python/samples/02-agents/providers/openai/openai_assistants_with_code_interpreter.py deleted file mode 100644 index 044804e3c5..0000000000 --- a/python/samples/02-agents/providers/openai/openai_assistants_with_code_interpreter.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os - -from agent_framework import AgentResponseUpdate, ChatResponseUpdate -from agent_framework.openai import OpenAIAssistantProvider, OpenAIAssistantsClient -from dotenv import load_dotenv -from openai import AsyncOpenAI -from openai.types.beta.threads.runs import ( - CodeInterpreterToolCallDelta, - RunStepDelta, - RunStepDeltaEvent, - ToolCallDeltaObject, -) -from openai.types.beta.threads.runs.code_interpreter_tool_call_delta import CodeInterpreter - -# Load environment variables from .env file -load_dotenv() - -""" -OpenAI Assistants with Code Interpreter Example - -This sample demonstrates using get_code_interpreter_tool() with OpenAI Assistants -for Python code execution and mathematical problem solving. -""" - - -def get_code_interpreter_chunk(chunk: AgentResponseUpdate) -> str | None: - """Helper method to access code interpreter data.""" - if ( - isinstance(chunk.raw_representation, ChatResponseUpdate) - and isinstance(chunk.raw_representation.raw_representation, RunStepDeltaEvent) - and isinstance(chunk.raw_representation.raw_representation.delta, RunStepDelta) - and isinstance(chunk.raw_representation.raw_representation.delta.step_details, ToolCallDeltaObject) - and chunk.raw_representation.raw_representation.delta.step_details.tool_calls - ): - for tool_call in chunk.raw_representation.raw_representation.delta.step_details.tool_calls: - if ( - isinstance(tool_call, CodeInterpreterToolCallDelta) - and isinstance(tool_call.code_interpreter, CodeInterpreter) - and tool_call.code_interpreter.input is not None - ): - return tool_call.code_interpreter.input - return None - - -async def main() -> None: - """Example showing how to use the code interpreter tool with OpenAI Assistants.""" - print("=== OpenAI Assistants Provider with Code Interpreter Example ===") - - client = AsyncOpenAI() - provider = OpenAIAssistantProvider(client) - chat_client = OpenAIAssistantsClient(client=client) - - agent = await provider.create_agent( - name="CodeHelper", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a helpful assistant that can write and execute Python code to solve problems.", - tools=[chat_client.get_code_interpreter_tool()], - ) - - try: - query = "Use code to get the factorial of 100?" - print(f"User: {query}") - print("Agent: ", end="", flush=True) - generated_code = "" - async for chunk in agent.run(query, stream=True): - if chunk.text: - print(chunk.text, end="", flush=True) - code_interpreter_chunk = get_code_interpreter_chunk(chunk) - if code_interpreter_chunk is not None: - generated_code += code_interpreter_chunk - - print(f"\nGenerated code:\n{generated_code}") - finally: - await client.beta.assistants.delete(agent.id) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/openai/openai_assistants_with_existing_assistant.py b/python/samples/02-agents/providers/openai/openai_assistants_with_existing_assistant.py deleted file mode 100644 index 563dbb38a4..0000000000 --- a/python/samples/02-agents/providers/openai/openai_assistants_with_existing_assistant.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os -from random import randint -from typing import Annotated - -from agent_framework import Agent, tool -from agent_framework.openai import OpenAIAssistantProvider -from dotenv import load_dotenv -from openai import AsyncOpenAI -from pydantic import Field - -# Load environment variables from .env file -load_dotenv() - -""" -OpenAI Assistants with Existing Assistant Example - -This sample demonstrates working with pre-existing OpenAI Assistants -using the provider's get_agent() and as_agent() methods. -""" - - -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; -# see samples/02-agents/tools/function_tool_with_approval.py -# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. -@tool(approval_mode="never_require") -def get_weather( - location: Annotated[str, Field(description="The location to get the weather for.")], -) -> str: - """Get the weather for a given location.""" - conditions = ["sunny", "cloudy", "rainy", "stormy"] - return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}C." - - -async def example_get_agent_by_id() -> None: - """Example: Using get_agent() to retrieve an existing assistant by ID.""" - print("=== Get Existing Assistant by ID ===") - - client = AsyncOpenAI() - provider = OpenAIAssistantProvider(client) - - # Create an assistant via SDK (simulating an existing assistant) - created_assistant = await client.beta.assistants.create( - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - name="WeatherAssistant", - tools=[ - { - "type": "function", - "function": { - "name": "get_weather", - "description": "Get the weather for a given location.", - "parameters": { - "type": "object", - "properties": {"location": {"type": "string", "description": "The location"}}, - "required": ["location"], - }, - }, - } - ], - ) - print(f"Created assistant: {created_assistant.id}") - - try: - # Use get_agent() to retrieve the existing assistant - agent = await provider.get_agent( - assistant_id=created_assistant.id, - tools=[get_weather], # Required: implementation for function tools - instructions="You are a helpful weather agent.", - ) - - result = await agent.run("What's the weather like in Tokyo?") - print(f"Agent: {result}\n") - finally: - await client.beta.assistants.delete(created_assistant.id) - print("Assistant deleted.\n") - - -async def example_as_agent_wrap_sdk_object() -> None: - """Example: Using as_agent() to wrap an existing SDK Assistant object.""" - print("=== Wrap Existing SDK Assistant Object ===") - - client = AsyncOpenAI() - provider = OpenAIAssistantProvider(client) - - # Create and fetch an assistant via SDK - created_assistant = await client.beta.assistants.create( - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - name="SimpleAssistant", - instructions="You are a friendly assistant.", - ) - print(f"Created assistant: {created_assistant.id}") - - try: - # Use as_agent() to wrap the SDK object - agent = Agent( - client=provider, - agent=created_assistant, - instructions="You are an extremely helpful assistant. Be enthusiastic!", - ) - - result = await agent.run("Hello! What can you help me with?") - print(f"Agent: {result}\n") - finally: - await client.beta.assistants.delete(created_assistant.id) - print("Assistant deleted.\n") - - -async def main() -> None: - print("=== OpenAI Assistants Provider with Existing Assistant Examples ===\n") - - await example_get_agent_by_id() - await example_as_agent_wrap_sdk_object() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/openai/openai_assistants_with_explicit_settings.py b/python/samples/02-agents/providers/openai/openai_assistants_with_explicit_settings.py deleted file mode 100644 index d7adef004c..0000000000 --- a/python/samples/02-agents/providers/openai/openai_assistants_with_explicit_settings.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os -from random import randint -from typing import Annotated - -from agent_framework import tool -from agent_framework.openai import OpenAIAssistantProvider -from dotenv import load_dotenv -from openai import AsyncOpenAI -from pydantic import Field - -# Load environment variables from .env file -load_dotenv() - -""" -OpenAI Assistants with Explicit Settings Example - -This sample demonstrates creating OpenAI Assistants with explicit configuration -settings rather than relying on environment variable defaults. -""" - - -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; -# see samples/02-agents/tools/function_tool_with_approval.py -# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. -@tool(approval_mode="never_require") -def get_weather( - location: Annotated[str, Field(description="The location to get the weather for.")], -) -> str: - """Get the weather for a given location.""" - conditions = ["sunny", "cloudy", "rainy", "stormy"] - return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}C." - - -async def main() -> None: - print("=== OpenAI Assistants Provider with Explicit Settings ===") - - # Create client with explicit API key - client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"]) - provider = OpenAIAssistantProvider(client) - - agent = await provider.create_agent( - name="WeatherAssistant", - model=os.environ["OPENAI_MODEL"], - instructions="You are a helpful weather agent.", - tools=[get_weather], - ) - - try: - query = "What's the weather like in New York?" - print(f"Query: {query}") - result = await agent.run(query) - print(f"Result: {result}\n") - finally: - await client.beta.assistants.delete(agent.id) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/openai/openai_assistants_with_file_search.py b/python/samples/02-agents/providers/openai/openai_assistants_with_file_search.py deleted file mode 100644 index ad67986d4e..0000000000 --- a/python/samples/02-agents/providers/openai/openai_assistants_with_file_search.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os - -from agent_framework import Content -from agent_framework.openai import OpenAIAssistantProvider, OpenAIAssistantsClient -from dotenv import load_dotenv -from openai import AsyncOpenAI - -# Load environment variables from .env file -load_dotenv() - -""" -OpenAI Assistants with File Search Example - -This sample demonstrates using get_file_search_tool() with OpenAI Assistants -for document-based question answering and information retrieval. -""" - - -async def create_vector_store(client: AsyncOpenAI) -> tuple[str, Content]: - """Create a vector store with sample documents.""" - file = await client.files.create( - file=("todays_weather.txt", b"The weather today is sunny with a high of 75F."), purpose="user_data" - ) - vector_store = await client.vector_stores.create( - name="knowledge_base", - expires_after={"anchor": "last_active_at", "days": 1}, - ) - result = await client.vector_stores.files.create_and_poll(vector_store_id=vector_store.id, file_id=file.id) - if result.last_error is not None: - raise Exception(f"Vector store file processing failed with status: {result.last_error.message}") - - return file.id, Content.from_hosted_vector_store(vector_store_id=vector_store.id) - - -async def delete_vector_store(client: AsyncOpenAI, file_id: str, vector_store_id: str) -> None: - """Delete the vector store after using it.""" - await client.vector_stores.delete(vector_store_id=vector_store_id) - await client.files.delete(file_id=file_id) - - -async def main() -> None: - print("=== OpenAI Assistants Provider with File Search Example ===\n") - - client = AsyncOpenAI() - provider = OpenAIAssistantProvider(client) - chat_client = OpenAIAssistantsClient(client=client) - - agent = await provider.create_agent( - name="SearchAssistant", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a helpful assistant that searches files in a knowledge base.", - tools=[chat_client.get_file_search_tool()], - ) - - try: - query = "What is the weather today? Do a file search to find the answer." - file_id, vector_store_content = await create_vector_store(client) - - print(f"User: {query}") - print("Agent: ", end="", flush=True) - async for chunk in agent.run( - query, - stream=True, - options={"tool_resources": {"file_search": {"vector_store_ids": [vector_store_content.vector_store_id]}}}, - ): - if chunk.text: - print(chunk.text, end="", flush=True) - - await delete_vector_store(client, file_id, vector_store_content.vector_store_id) - finally: - await client.beta.assistants.delete(agent.id) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/openai/openai_assistants_with_function_tools.py b/python/samples/02-agents/providers/openai/openai_assistants_with_function_tools.py deleted file mode 100644 index ffd64d9ca2..0000000000 --- a/python/samples/02-agents/providers/openai/openai_assistants_with_function_tools.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os -from datetime import datetime, timezone -from random import randint -from typing import Annotated - -from agent_framework import tool -from agent_framework.openai import OpenAIAssistantProvider -from dotenv import load_dotenv -from openai import AsyncOpenAI -from pydantic import Field - -# Load environment variables from .env file -load_dotenv() - -""" -OpenAI Assistants with Function Tools Example - -This sample demonstrates function tool integration with OpenAI Assistants, -showing both agent-level and query-level tool configuration patterns. -""" - - -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; -# see samples/02-agents/tools/function_tool_with_approval.py -# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. -@tool(approval_mode="never_require") -def get_weather( - location: Annotated[str, Field(description="The location to get the weather for.")], -) -> str: - """Get the weather for a given location.""" - conditions = ["sunny", "cloudy", "rainy", "stormy"] - return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}C." - - -@tool(approval_mode="never_require") -def get_time() -> str: - """Get the current UTC time.""" - current_time = datetime.now(timezone.utc) - return f"The current UTC time is {current_time.strftime('%Y-%m-%d %H:%M:%S')}." - - -async def tools_on_agent_level() -> None: - """Example showing tools defined when creating the agent.""" - print("=== Tools Defined on Agent Level ===") - - client = AsyncOpenAI() - provider = OpenAIAssistantProvider(client) - - # Tools are provided when creating the agent - # The agent can use these tools for any query during its lifetime - agent = await provider.create_agent( - name="InfoAssistant", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a helpful assistant that can provide weather and time information.", - tools=[get_weather, get_time], # Tools defined at agent creation - ) - - try: - # First query - agent can use weather tool - query1 = "What's the weather like in New York?" - print(f"User: {query1}") - result1 = await agent.run(query1) - print(f"Agent: {result1}\n") - - # Second query - agent can use time tool - query2 = "What's the current UTC time?" - print(f"User: {query2}") - result2 = await agent.run(query2) - print(f"Agent: {result2}\n") - - # Third query - agent can use both tools if needed - query3 = "What's the weather in London and what's the current UTC time?" - print(f"User: {query3}") - result3 = await agent.run(query3) - print(f"Agent: {result3}\n") - finally: - await client.beta.assistants.delete(agent.id) - - -async def tools_on_run_level() -> None: - """Example showing tools passed to the run method.""" - print("=== Tools Passed to Run Method ===") - - client = AsyncOpenAI() - provider = OpenAIAssistantProvider(client) - - # Agent created with base tools, additional tools can be passed at run time - agent = await provider.create_agent( - name="FlexibleAssistant", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a helpful assistant.", - tools=[get_weather], # Base tool - ) - - try: - # First query using base weather tool - query1 = "What's the weather like in Seattle?" - print(f"User: {query1}") - result1 = await agent.run(query1) - print(f"Agent: {result1}\n") - - # Second query with additional time tool - query2 = "What's the current UTC time?" - print(f"User: {query2}") - result2 = await agent.run(query2, tools=[get_time]) # Additional tool for this query - print(f"Agent: {result2}\n") - - # Third query with both tools - query3 = "What's the weather in Chicago and what's the current UTC time?" - print(f"User: {query3}") - result3 = await agent.run(query3, tools=[get_time]) # Time tool adds to weather - print(f"Agent: {result3}\n") - finally: - await client.beta.assistants.delete(agent.id) - - -async def mixed_tools_example() -> None: - """Example showing both agent-level tools and run-method tools.""" - print("=== Mixed Tools Example (Agent + Run Method) ===") - - client = AsyncOpenAI() - provider = OpenAIAssistantProvider(client) - - # Agent created with some base tools - agent = await provider.create_agent( - name="ComprehensiveAssistant", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a comprehensive assistant that can help with various information requests.", - tools=[get_weather], # Base tool available for all queries - ) - - try: - # Query using both agent tool and additional run-method tools - query = "What's the weather in Denver and what's the current UTC time?" - print(f"User: {query}") - - # Agent has access to get_weather (from creation) + additional tools from run method - result = await agent.run( - query, - tools=[get_time], # Additional tools for this specific query - ) - print(f"Agent: {result}\n") - finally: - await client.beta.assistants.delete(agent.id) - - -async def main() -> None: - print("=== OpenAI Assistants Provider with Function Tools Examples ===\n") - - await tools_on_agent_level() - await tools_on_run_level() - await mixed_tools_example() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/openai/openai_assistants_with_response_format.py b/python/samples/02-agents/providers/openai/openai_assistants_with_response_format.py deleted file mode 100644 index 740b36107d..0000000000 --- a/python/samples/02-agents/providers/openai/openai_assistants_with_response_format.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os - -from agent_framework.openai import OpenAIAssistantProvider -from dotenv import load_dotenv -from openai import AsyncOpenAI -from pydantic import BaseModel, ConfigDict - -# Load environment variables from .env file -load_dotenv() - -""" -OpenAI Assistant Provider Response Format Example - -This sample demonstrates using OpenAIAssistantProvider with response_format -for structured outputs in two ways: -1. Setting default response_format at agent creation time (default_options) -2. Overriding response_format at runtime (options parameter in agent.run) -""" - - -class WeatherInfo(BaseModel): - """Structured weather information.""" - - location: str - temperature: int - conditions: str - recommendation: str - model_config = ConfigDict(extra="forbid") - - -class CityInfo(BaseModel): - """Structured city information.""" - - city_name: str - population: int - country: str - model_config = ConfigDict(extra="forbid") - - -async def main() -> None: - """Example of using response_format at creation time and runtime.""" - - async with ( - AsyncOpenAI() as client, - OpenAIAssistantProvider(client) as provider, - ): - # Create agent with default response_format (WeatherInfo) - agent = await provider.create_agent( - name="StructuredReporter", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="Return structured JSON based on the requested format.", - default_options={"response_format": WeatherInfo}, - ) - - try: - # Request 1: Uses default response_format from agent creation - print("--- Request 1: Using default response_format (WeatherInfo) ---") - query1 = "What's the weather like in Paris today?" - print(f"User: {query1}") - - result1 = await agent.run(query1) - - try: - weather = result1.value - print("Agent:") - print(f" Location: {weather.location}") - print(f" Temperature: {weather.temperature}") - print(f" Conditions: {weather.conditions}") - print(f" Recommendation: {weather.recommendation}") - except Exception: - print(f"Failed to parse response: {result1.text}") - - # Request 2: Override response_format at runtime with CityInfo - print("\n--- Request 2: Runtime override with CityInfo ---") - query2 = "Tell me about Tokyo." - print(f"User: {query2}") - - result2 = await agent.run(query2, options={"response_format": CityInfo}) - - try: - city = result2.value - print("Agent:") - print(f" City: {city.city_name}") - print(f" Population: {city.population}") - print(f" Country: {city.country}") - except Exception: - print(f"Failed to parse response: {result2.text}") - finally: - await client.beta.assistants.delete(agent.id) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/openai/openai_assistants_with_session.py b/python/samples/02-agents/providers/openai/openai_assistants_with_session.py deleted file mode 100644 index 2259c5638d..0000000000 --- a/python/samples/02-agents/providers/openai/openai_assistants_with_session.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os -from random import randint -from typing import Annotated - -from agent_framework import AgentSession, tool -from agent_framework.openai import OpenAIAssistantProvider -from dotenv import load_dotenv -from openai import AsyncOpenAI -from pydantic import Field - -# Load environment variables from .env file -load_dotenv() - -""" -OpenAI Assistants with Session Management Example - -This sample demonstrates session management with OpenAI Assistants, showing -persistent conversation sessions and context preservation across interactions. -""" - - -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; -# see samples/02-agents/tools/function_tool_with_approval.py -# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. -@tool(approval_mode="never_require") -def get_weather( - location: Annotated[str, Field(description="The location to get the weather for.")], -) -> str: - """Get the weather for a given location.""" - conditions = ["sunny", "cloudy", "rainy", "stormy"] - return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}C." - - -async def example_with_automatic_session_creation() -> None: - """Example showing automatic session creation (service-managed session).""" - print("=== Automatic Session Creation Example ===") - - client = AsyncOpenAI() - provider = OpenAIAssistantProvider(client) - - agent = await provider.create_agent( - name="WeatherAssistant", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a helpful weather agent.", - tools=[get_weather], - ) - - try: - # First conversation - no session provided, will be created automatically - query1 = "What's the weather like in Seattle?" - print(f"User: {query1}") - result1 = await agent.run(query1) - print(f"Agent: {result1.text}") - - # Second conversation - still no session provided, will create another new session - query2 = "What was the last city I asked about?" - print(f"\nUser: {query2}") - result2 = await agent.run(query2) - print(f"Agent: {result2.text}") - print("Note: Each call creates a separate session, so the agent doesn't remember previous context.\n") - finally: - await client.beta.assistants.delete(agent.id) - - -async def example_with_session_persistence() -> None: - """Example showing session persistence across multiple conversations.""" - print("=== Session Persistence Example ===") - print("Using the same session across multiple conversations to maintain context.\n") - - client = AsyncOpenAI() - provider = OpenAIAssistantProvider(client) - - agent = await provider.create_agent( - name="WeatherAssistant", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a helpful weather agent.", - tools=[get_weather], - ) - - try: - # Create a new session that will be reused - session = agent.create_session() - - # First conversation - query1 = "What's the weather like in Tokyo?" - print(f"User: {query1}") - result1 = await agent.run(query1, session=session) - print(f"Agent: {result1.text}") - - # Second conversation using the same session - maintains context - query2 = "How about London?" - print(f"\nUser: {query2}") - result2 = await agent.run(query2, session=session) - print(f"Agent: {result2.text}") - - # Third conversation - agent should remember both previous cities - query3 = "Which of the cities I asked about has better weather?" - print(f"\nUser: {query3}") - result3 = await agent.run(query3, session=session) - print(f"Agent: {result3.text}") - print("Note: The agent remembers context from previous messages in the same session.\n") - finally: - await client.beta.assistants.delete(agent.id) - - -async def example_with_existing_session_id() -> None: - """Example showing how to work with an existing session ID from the service.""" - print("=== Existing Session ID Example ===") - print("Using a specific session ID to continue an existing conversation.\n") - - client = AsyncOpenAI() - provider = OpenAIAssistantProvider(client) - - # First, create a conversation and capture the session ID - existing_session_id = None - assistant_id = None - - agent = await provider.create_agent( - name="WeatherAssistant", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a helpful weather agent.", - tools=[get_weather], - ) - assistant_id = agent.id - - try: - # Start a conversation and get the session ID - session = agent.create_session() - query1 = "What's the weather in Paris?" - print(f"User: {query1}") - result1 = await agent.run(query1, session=session) - print(f"Agent: {result1.text}") - - # The session ID is set after the first response - existing_session_id = session.service_session_id - print(f"Session ID: {existing_session_id}") - - if existing_session_id: - print("\n--- Continuing with the same session ID using get_agent ---") - - # Get the existing assistant by ID - agent2 = await provider.get_agent( - assistant_id=assistant_id, - tools=[get_weather], # Must provide function implementations - ) - - # Create a session with the existing ID - session = AgentSession(service_session_id=existing_session_id) - - query2 = "What was the last city I asked about?" - print(f"User: {query2}") - result2 = await agent2.run(query2, session=session) - print(f"Agent: {result2.text}") - print("Note: The agent continues the conversation from the previous session.\n") - finally: - if assistant_id: - await client.beta.assistants.delete(assistant_id) - - -async def main() -> None: - print("=== OpenAI Assistants Provider Session Management Examples ===\n") - - await example_with_automatic_session_creation() - await example_with_session_persistence() - await example_with_existing_session_id() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/openai/openai_responses_client_basic.py b/python/samples/02-agents/providers/openai/openai_responses_client_basic.py deleted file mode 100644 index c615cf3252..0000000000 --- a/python/samples/02-agents/providers/openai/openai_responses_client_basic.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -from collections.abc import Awaitable, Callable -from random import randint -from typing import Annotated - -from agent_framework import ( - Agent, - ChatContext, - ChatResponse, - Message, - MiddlewareTermination, - Role, - chat_middleware, - tool, -) -from agent_framework.openai import OpenAIResponsesClient -from dotenv import load_dotenv -from pydantic import Field - -# Load environment variables from .env file -load_dotenv() - -""" -OpenAI Responses Client Basic Example - -This sample demonstrates basic usage of OpenAIResponsesClient for structured -response generation, showing both streaming and non-streaming responses. -""" - - -@chat_middleware -async def security_and_override_middleware( - context: ChatContext, - call_next: Callable[[], Awaitable[None]], -) -> None: - """Function-based middleware that implements security filtering and response override.""" - print("[SecurityMiddleware] Processing input...") - - # Security check - block sensitive information - blocked_terms = ["password", "secret", "api_key", "token"] - - for message in context.messages: - if message.text: - message_lower = message.text.lower() - for term in blocked_terms: - if term in message_lower: - print(f"[SecurityMiddleware] BLOCKED: Found '{term}' in message") - - # Override the response instead of calling AI - context.result = ChatResponse( - messages=[ - Message( - role=Role.ASSISTANT, - text="I cannot process requests containing sensitive information. " - "Please rephrase your question without including passwords, secrets, or other " - "sensitive data.", - ) - ] - ) - - # Terminate middleware execution with the blocked response - raise MiddlewareTermination(result=context.result) - - # Continue to next middleware or AI execution - await call_next() - - print("[SecurityMiddleware] Response generated.") - print(type(context.result)) - - -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; -# see samples/02-agents/tools/function_tool_with_approval.py -# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. -@tool(approval_mode="never_require") -def get_weather( - location: Annotated[str, Field(description="The location to get the weather for.")], -) -> str: - """Get the weather for a given location.""" - conditions = ["sunny", "cloudy", "rainy", "stormy"] - return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." - - -async def non_streaming_example() -> None: - """Example of non-streaming response (get the complete result at once).""" - print("=== Non-streaming Response Example ===") - - agent = Agent( - client=OpenAIResponsesClient(), - instructions="You are a helpful weather agent.", - tools=get_weather, - ) - - query = "What's the weather like in Seattle?" - print(f"User: {query}") - result = await agent.run(query) - print(f"Result: {result}\n") - - -async def streaming_example() -> None: - """Example of streaming response (get results as they are generated).""" - print("=== Streaming Response Example ===") - - agent = Agent( - client=OpenAIResponsesClient( - middleware=[security_and_override_middleware], - ), - instructions="You are a helpful weather agent.", - tools=get_weather, - ) - - query = "What's the weather like in Portland?" - print(f"User: {query}") - print("Agent: ", end="", flush=True) - response = agent.run(query, stream=True) - async for chunk in response: - if chunk.text: - print(chunk.text, end="", flush=True) - print("\n") - print(f"Final Result: {await response.get_final_response()}") - - -async def main() -> None: - print("=== Basic OpenAI Responses Client Agent Example ===") - - await streaming_example() - await non_streaming_example() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/README.md b/python/samples/README.md index fa091b78bc..82a008504c 100644 --- a/python/samples/README.md +++ b/python/samples/README.md @@ -61,6 +61,16 @@ client = OpenAIChatClient(env_file_path="path/to/custom.env") This allows different clients to use different configuration files if needed. +For the generic OpenAI clients (`OpenAIChatClient` and `OpenAIChatCompletionClient`), routing +precedence is: + +1. Explicit Azure inputs such as `credential`, `azure_endpoint`, or `api_version` +2. `OPENAI_API_KEY` / explicit OpenAI API-key parameters +3. Azure environment fallback such as `AZURE_OPENAI_ENDPOINT` and `AZURE_OPENAI_API_KEY` + +If you keep both OpenAI and Azure variables in your shell, the generic clients stay on OpenAI until +you pass an explicit Azure input. + For the getting-started samples, you'll need at minimum: ```bash AZURE_AI_PROJECT_ENDPOINT="your-foundry-project-endpoint" From 7660e5adc13722c907a3b4ed6255513bcd600bb6 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 11:05:41 +0100 Subject: [PATCH 02/30] fix bandit --- python/packages/openai/agent_framework_openai/_shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/openai/agent_framework_openai/_shared.py b/python/packages/openai/agent_framework_openai/_shared.py index 8f1d64f503..02c9cfa38f 100644 --- a/python/packages/openai/agent_framework_openai/_shared.py +++ b/python/packages/openai/agent_framework_openai/_shared.py @@ -39,7 +39,7 @@ logger: logging.Logger = logging.getLogger("agent_framework.openai") -AZURE_OPENAI_TOKEN_SCOPE = "https://cognitiveservices.azure.com/.default" # noqa: S105 +AZURE_OPENAI_TOKEN_SCOPE = "https://cognitiveservices.azure.com/.default" # noqa: S105 # nosec B105 RESPONSE_TYPE = Union[ From e4db4392baffb6b2a1e02a9682cc495ebd122dee Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 11:20:40 +0100 Subject: [PATCH 03/30] Python: align OpenAI embedding Azure routing Extend the shared OpenAI-vs-Azure routing and credential behavior to the embedding client, add Azure embedding regression coverage, and refresh the embedding samples to use the generic client path. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../_deprecated_azure_openai.py | 16 +- .../test_azure_embedding_client.py | 48 ++- python/packages/openai/AGENTS.md | 2 +- .../_embedding_client.py | 349 +++++++++++++----- .../openai/agent_framework_openai/_shared.py | 66 +++- .../packages/openai/tests/openai/conftest.py | 5 + .../openai/test_openai_embedding_client.py | 38 +- .../test_openai_embedding_client_azure.py | 180 +++++++++ .../search_context_semantic.py | 17 +- .../embeddings/azure_openai_embeddings.py | 80 ++-- .../02-agents/embeddings/openai_embeddings.py | 21 +- 11 files changed, 616 insertions(+), 206 deletions(-) create mode 100644 python/packages/openai/tests/openai/test_openai_embedding_client_azure.py diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_deprecated_azure_openai.py b/python/packages/azure-ai/agent_framework_azure_ai/_deprecated_azure_openai.py index f222e4792c..d0e8c4c157 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_deprecated_azure_openai.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_deprecated_azure_openai.py @@ -879,6 +879,8 @@ def __init__( "or 'AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME' environment variable." ) + endpoint_value = azure_openai_settings.get("endpoint") + base_url_value = azure_openai_settings.get("base_url") if not async_client: # Create the Azure OpenAI client directly merged_headers = dict(copy(default_headers)) if default_headers else {} @@ -896,8 +898,6 @@ def __init__( if not api_key_secret and not ad_token_provider: raise ValueError("Please provide either api_key, credential, or a client.") - endpoint_value = azure_openai_settings.get("endpoint") - base_url_value = azure_openai_settings.get("base_url") if not endpoint_value and not base_url_value: raise ValueError("Please provide an endpoint or a base_url") @@ -922,11 +922,13 @@ def __init__( self.api_version = azure_openai_settings.get("api_version") or "" self.deployment_name = embedding_deployment_name - super().__init__( - async_client=async_client, - model=embedding_deployment_name, - default_headers=default_headers, - ) + with _prefer_single_azure_endpoint_env(endpoint=endpoint_value, base_url=base_url_value): + super().__init__( + async_client=async_client, + model=embedding_deployment_name, + api_version=azure_openai_settings.get("api_version"), + default_headers=default_headers, + ) if otel_provider_name is not None: self.OTEL_PROVIDER_NAME = otel_provider_name # type: ignore[misc] diff --git a/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py b/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py index de78178df1..b27bc9fcd9 100644 --- a/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py +++ b/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py @@ -8,6 +8,7 @@ import pytest from agent_framework.azure import AzureOpenAIEmbeddingClient from agent_framework_openai import OpenAIEmbeddingOptions +from azure.identity.aio import AzureCliCredential from openai.types import CreateEmbeddingResponse from openai.types import Embedding as OpenAIEmbedding from openai.types.create_embedding_response import Usage @@ -106,20 +107,45 @@ def test_azure_otel_provider_name(azure_embedding_unit_test_env: None) -> None: skip_if_azure_openai_integration_tests_disabled = pytest.mark.skipif( - not os.getenv("AZURE_OPENAI_ENDPOINT") - or (not os.getenv("AZURE_OPENAI_API_KEY") and not os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME")), - reason="No Azure OpenAI credentials provided; skipping integration tests.", + os.getenv("AZURE_OPENAI_ENDPOINT", "") in ("", "https://test-endpoint.com") + or ( + os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "") == "" + and os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "") == "" + ), + reason="No Azure OpenAI endpoint or embedding deployment provided; skipping integration tests.", ) +def _get_azure_embedding_deployment_name() -> str: + return os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME") or os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"] + + +def _create_azure_openai_embedding_client( + *, + api_key: str | None = None, + credential: AzureCliCredential | None = None, +) -> AzureOpenAIEmbeddingClient: + resolved_api_key = ( + api_key if api_key is not None else None if credential is not None else os.getenv("AZURE_OPENAI_API_KEY") + ) + return AzureOpenAIEmbeddingClient( + deployment_name=_get_azure_embedding_deployment_name(), + api_key=resolved_api_key, + endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], + api_version=os.getenv("AZURE_OPENAI_API_VERSION"), + credential=credential, + ) + + @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled async def test_integration_azure_openai_get_embeddings() -> None: """End-to-end test of Azure OpenAI embedding generation.""" - client = AzureOpenAIEmbeddingClient() + async with AzureCliCredential() as credential: + client = _create_azure_openai_embedding_client(credential=credential) - result = await client.get_embeddings(["hello world"]) + result = await client.get_embeddings(["hello world"]) assert len(result) == 1 assert isinstance(result[0].vector, list) @@ -135,9 +161,10 @@ async def test_integration_azure_openai_get_embeddings() -> None: @skip_if_azure_openai_integration_tests_disabled async def test_integration_azure_openai_get_embeddings_multiple() -> None: """Test Azure OpenAI embedding generation for multiple inputs.""" - client = AzureOpenAIEmbeddingClient() + async with AzureCliCredential() as credential: + client = _create_azure_openai_embedding_client(credential=credential) - result = await client.get_embeddings(["hello", "world", "test"]) + result = await client.get_embeddings(["hello", "world", "test"]) assert len(result) == 3 dims = [len(e.vector) for e in result] @@ -149,10 +176,11 @@ async def test_integration_azure_openai_get_embeddings_multiple() -> None: @skip_if_azure_openai_integration_tests_disabled async def test_integration_azure_openai_get_embeddings_with_dimensions() -> None: """Test Azure OpenAI embedding generation with custom dimensions.""" - client = AzureOpenAIEmbeddingClient() + async with AzureCliCredential() as credential: + client = _create_azure_openai_embedding_client(credential=credential) - options: OpenAIEmbeddingOptions = {"dimensions": 256} - result = await client.get_embeddings(["hello world"], options=options) + options: OpenAIEmbeddingOptions = {"dimensions": 256} + result = await client.get_embeddings(["hello world"], options=options) assert len(result) == 1 assert len(result[0].vector) == 256 diff --git a/python/packages/openai/AGENTS.md b/python/packages/openai/AGENTS.md index 2c92e1d18b..48c3a306bd 100644 --- a/python/packages/openai/AGENTS.md +++ b/python/packages/openai/AGENTS.md @@ -27,7 +27,7 @@ agent_framework_openai/ All clients follow the Raw + Full-Featured pattern (e.g., `RawOpenAIChatClient` + `OpenAIChatClient`). -The generic OpenAI chat clients support both OpenAI and Azure OpenAI routing. Precedence is: +The generic OpenAI clients support both OpenAI and Azure OpenAI routing. Precedence is: explicit Azure inputs (`credential`, `azure_endpoint`, `api_version`) → OpenAI API key (`OPENAI_API_KEY`) → Azure environment fallback (`AZURE_OPENAI_*`). diff --git a/python/packages/openai/agent_framework_openai/_embedding_client.py b/python/packages/openai/agent_framework_openai/_embedding_client.py index ad959d5b39..8370413424 100644 --- a/python/packages/openai/agent_framework_openai/_embedding_client.py +++ b/python/packages/openai/agent_framework_openai/_embedding_client.py @@ -6,23 +6,31 @@ import struct import sys from collections.abc import Awaitable, Callable, Mapping, Sequence -from copy import copy -from typing import Any, ClassVar, Generic, Literal, TypedDict +from typing import TYPE_CHECKING, Any, ClassVar, Generic, Literal, TypedDict, overload from agent_framework._clients import BaseEmbeddingClient -from agent_framework._settings import SecretString, load_settings -from agent_framework._telemetry import APP_INFO, USER_AGENT_KEY, prepend_agent_framework_to_user_agent +from agent_framework._settings import SecretString +from agent_framework._telemetry import USER_AGENT_KEY from agent_framework._types import Embedding, EmbeddingGenerationOptions, GeneratedEmbeddings, UsageDetails from agent_framework.observability import EmbeddingTelemetryLayer -from openai import AsyncOpenAI +from openai import AsyncAzureOpenAI, AsyncOpenAI -from ._shared import OpenAISettings, get_api_key +from ._shared import AzureTokenProvider, load_openai_service_settings if sys.version_info >= (3, 13): from typing import TypeVar # type: ignore # pragma: no cover else: from typing_extensions import TypeVar # type: ignore # pragma: no cover +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + from azure.core.credentials_async import AsyncTokenCredential + + AzureCredentialTypes = TokenCredential | AsyncTokenCredential + + +DEFAULT_AZURE_OPENAI_EMBEDDING_API_VERSION = "2024-10-21" + class OpenAIEmbeddingOptions(EmbeddingGenerationOptions, total=False): """OpenAI-specific embedding options. @@ -61,11 +69,11 @@ class RawOpenAIEmbeddingClient( INJECTABLE: ClassVar[set[str]] = {"client"} + @overload def __init__( self, *, model: str | None = None, - model_id: str | None = None, api_key: str | SecretString | Callable[[], str | Awaitable[str]] | None = None, org_id: str | None = None, base_url: str | None = None, @@ -73,21 +81,99 @@ def __init__( async_client: AsyncOpenAI | None = None, env_file_path: str | None = None, env_file_encoding: str | None = None, + ) -> None: + """Initialize a raw OpenAI embedding client with OpenAI-only routing. + + This overload describes the OpenAI shape. Explicit keyword arguments are used first, + then ``OPENAI_*`` values from ``env_file_path`` or the process environment. + """ + ... + + @overload + def __init__( + self, + *, + model: str | None = None, + azure_endpoint: str | None = None, + credential: AzureCredentialTypes | AzureTokenProvider | None = None, + api_version: str | None = None, + api_key: str | SecretString | Callable[[], str | Awaitable[str]] | None = None, + base_url: str | None = None, + default_headers: Mapping[str, str] | None = None, + async_client: AsyncAzureOpenAI | AsyncOpenAI | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initialize a raw OpenAI embedding client with Azure routing. + + This overload describes the Azure shape. Explicit Azure inputs force Azure routing, + and missing Azure values fall back to ``AZURE_OPENAI_*`` values from ``env_file_path`` + or the process environment. + """ + ... + + def __init__( + self, + *, + model: str | None = None, + model_id: str | None = None, + api_key: str | SecretString | Callable[[], str | Awaitable[str]] | None = None, + credential: AzureCredentialTypes | AzureTokenProvider | None = None, + org_id: str | None = None, + base_url: str | None = None, + azure_endpoint: str | None = None, + api_version: str | None = None, + default_headers: Mapping[str, str] | None = None, + async_client: AsyncAzureOpenAI | AsyncOpenAI | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, **kwargs: Any, ) -> None: """Initialize a raw OpenAI embedding client. Keyword Args: - model: OpenAI embedding model name. + model: Embedding model or Azure OpenAI deployment name. When not provided, the + constructor reads ``OPENAI_EMBEDDING_MODEL`` for OpenAI routing. For Azure + routing it first checks ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`` and then + falls back to ``AZURE_OPENAI_DEPLOYMENT_NAME``. model_id: Deprecated alias for ``model``. - api_key: OpenAI API key, SecretString, or callable returning a key. - org_id: OpenAI organization ID. - base_url: Custom API base URL. + api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. + For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + auth. A callable token provider is also accepted for backwards compatibility, + but ``credential`` is the preferred Azure auth surface. + credential: Azure credential or token provider for Azure OpenAI auth. Passing this + is an explicit Azure signal, even when ``OPENAI_API_KEY`` is also configured. + Credential objects require the optional ``azure-identity`` package. + org_id: OpenAI organization ID. Used only for OpenAI routing and resolved from + ``OPENAI_ORG_ID`` when not provided. + base_url: Base URL override. For OpenAI routing this maps to ``OPENAI_BASE_URL``. + For Azure routing this may be used instead of ``azure_endpoint`` when you want + to pass the full ``.../openai/v1`` base URL directly. + azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing + falls back to ``AZURE_OPENAI_ENDPOINT``. + api_version: Azure API version. When not provided explicitly, Azure routing falls + back to ``AZURE_OPENAI_API_VERSION`` and then the embedding default. default_headers: Additional HTTP headers. - async_client: Pre-configured AsyncOpenAI client (skips client creation). - env_file_path: Path to .env file for settings. - env_file_encoding: Encoding for .env file. + async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on + Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI. + env_file_path: Optional ``.env`` file that is checked before process environment + variables. The same file is used for both ``OPENAI_*`` and ``AZURE_OPENAI_*`` + lookups. + env_file_encoding: Encoding for the ``.env`` file. kwargs: Additional keyword arguments forwarded to ``BaseEmbeddingClient``. + + Notes: + Environment resolution and routing precedence are: + + 1. Explicit Azure inputs (``azure_endpoint``, ``api_version``, or ``credential``) + 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` + 3. Azure environment fallback + + OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_EMBEDDING_MODEL``, + ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing reads + ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, + ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, ``AZURE_OPENAI_DEPLOYMENT_NAME``, + and ``AZURE_OPENAI_API_VERSION``. """ if model_id is not None and model is None: import warnings @@ -95,59 +181,43 @@ def __init__( warnings.warn("model_id is deprecated, use model instead", DeprecationWarning, stacklevel=2) model = model_id - if not async_client: - openai_settings = load_settings( - OpenAISettings, - env_prefix="OPENAI_", - api_key=api_key, - org_id=org_id, - base_url=base_url, - embedding_model=model, - env_file_path=env_file_path, - env_file_encoding=env_file_encoding, - ) - - api_key_value = openai_settings.get("api_key") - resolved_model = openai_settings.get("embedding_model") or model - - # Only create a client when we have enough configuration. - # Subclasses that manage their own client pass no args here - if api_key_value: - if not resolved_model: - raise ValueError( - "OpenAI embedding model is required. " - "Set via 'model' parameter or 'OPENAI_EMBEDDING_MODEL' environment variable." - ) - model = resolved_model - - resolved_api_key = get_api_key(api_key_value) - - # Merge APP_INFO into the headers - merged_headers = dict(copy(default_headers)) if default_headers else {} - if APP_INFO: - merged_headers.update(APP_INFO) - merged_headers = prepend_agent_framework_to_user_agent(merged_headers) - - client_args: dict[str, Any] = {"api_key": resolved_api_key, "default_headers": merged_headers} - if resolved_org_id := openai_settings.get("org_id"): - client_args["organization"] = resolved_org_id - if resolved_base_url := openai_settings.get("base_url"): - client_args["base_url"] = resolved_base_url - - async_client = AsyncOpenAI(**client_args) + settings, client, use_azure_client = load_openai_service_settings( + model=model, + api_key=api_key, + credential=credential, + org_id=org_id, + base_url=base_url, + endpoint=azure_endpoint, + api_version=api_version, + default_azure_api_version=DEFAULT_AZURE_OPENAI_EMBEDDING_API_VERSION, + default_headers=default_headers, + client=async_client, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + openai_model_field="embedding_model", + openai_model_env_var="OPENAI_EMBEDDING_MODEL", + azure_deployment_env_vars=( + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", + "AZURE_OPENAI_DEPLOYMENT_NAME", + ), + ) - self.client = async_client - self.model: str | None = model.strip() if model else None + self.client = client + resolved_model = settings.get("embedding_model") or settings.get("deployment_name") + self.model: str | None = resolved_model.strip() if isinstance(resolved_model, str) and resolved_model else None # Store configuration for serialization - self.org_id = org_id - self.base_url = str(base_url) if base_url else None + self.org_id = settings.get("org_id") + self.base_url = settings.get("base_url") + self.azure_endpoint = settings.get("endpoint") + self.api_version = settings.get("api_version") if default_headers: self.default_headers: dict[str, Any] | None = { k: v for k, v in default_headers.items() if k != USER_AGENT_KEY } else: self.default_headers = None + self._use_azure_client = use_azure_client super().__init__(**kwargs) @@ -225,79 +295,166 @@ class OpenAIEmbeddingClient( RawOpenAIEmbeddingClient[OpenAIEmbeddingOptionsT], Generic[OpenAIEmbeddingOptionsT], ): - """OpenAI embedding client with telemetry support. - - Keyword Args: - model: The embedding model (e.g. "text-embedding-3-small"). - Can also be set via environment variable OPENAI_EMBEDDING_MODEL. - model_id: Deprecated alias for ``model``. - api_key: OpenAI API key. - Can also be set via environment variable OPENAI_API_KEY. - org_id: OpenAI organization ID. - default_headers: Additional HTTP headers. - async_client: Pre-configured AsyncOpenAI client. - base_url: Custom API base URL. - otel_provider_name: Override the OpenTelemetry provider name for telemetry. - env_file_path: Path to .env file for settings. - env_file_encoding: Encoding for .env file. - - Examples: - .. code-block:: python + """OpenAI embedding client with telemetry support.""" - from agent_framework.openai import OpenAIEmbeddingClient + OTEL_PROVIDER_NAME: ClassVar[str] = "openai" # type: ignore[reportIncompatibleVariableOverride, misc] - # Using environment variables - # Set OPENAI_API_KEY=sk-... - # Set OPENAI_EMBEDDING_MODEL=text-embedding-3-small - client = OpenAIEmbeddingClient() + @overload + def __init__( + self, + *, + model: str | None = None, + api_key: str | Callable[[], str | Awaitable[str]] | None = None, + org_id: str | None = None, + default_headers: Mapping[str, str] | None = None, + async_client: AsyncOpenAI | None = None, + base_url: str | None = None, + otel_provider_name: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initialize an OpenAI embedding client with OpenAI-only routing. - # Or passing parameters directly - client = OpenAIEmbeddingClient( - model="text-embedding-3-small", - api_key="sk-...", - ) + This overload describes the OpenAI shape. Explicit keyword arguments are used first, + then ``OPENAI_*`` values from ``env_file_path`` or the process environment. + """ + ... - # Generate embeddings - result = await client.get_embeddings(["Hello, world!"]) - print(result[0].vector) - """ + @overload + def __init__( + self, + *, + model: str | None = None, + azure_endpoint: str | None = None, + credential: AzureCredentialTypes | AzureTokenProvider | None = None, + api_version: str | None = None, + api_key: str | Callable[[], str | Awaitable[str]] | None = None, + base_url: str | None = None, + default_headers: Mapping[str, str] | None = None, + async_client: AsyncAzureOpenAI | AsyncOpenAI | None = None, + otel_provider_name: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initialize an OpenAI embedding client with Azure routing. - OTEL_PROVIDER_NAME: ClassVar[str] = "openai" # type: ignore[reportIncompatibleVariableOverride, misc] + This overload describes the Azure shape. Explicit Azure inputs force Azure routing, + and missing Azure values fall back to ``AZURE_OPENAI_*`` values from ``env_file_path`` + or the process environment. + """ + ... def __init__( self, *, model: str | None = None, api_key: str | Callable[[], str | Awaitable[str]] | None = None, + credential: AzureCredentialTypes | AzureTokenProvider | None = None, org_id: str | None = None, default_headers: Mapping[str, str] | None = None, - async_client: AsyncOpenAI | None = None, + async_client: AsyncAzureOpenAI | AsyncOpenAI | None = None, base_url: str | None = None, + azure_endpoint: str | None = None, + api_version: str | None = None, otel_provider_name: str | None = None, env_file_path: str | None = None, env_file_encoding: str | None = None, ) -> None: - """Initialize an OpenAI embedding client.""" + """Initialize an OpenAI embedding client. + + Keyword Args: + model: Embedding model or Azure OpenAI deployment name. When not provided, the + constructor reads ``OPENAI_EMBEDDING_MODEL`` for OpenAI routing. For Azure + routing it first checks ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`` and then + falls back to ``AZURE_OPENAI_DEPLOYMENT_NAME``. + api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. + For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + auth. A callable token provider is also accepted for backwards compatibility, + but ``credential`` is the preferred Azure auth surface. + credential: Azure credential or token provider for Azure OpenAI auth. Passing this + is an explicit Azure signal, even when ``OPENAI_API_KEY`` is also configured. + Credential objects require the optional ``azure-identity`` package. + org_id: OpenAI organization ID. Used only for OpenAI routing and resolved from + ``OPENAI_ORG_ID`` when not provided. + default_headers: Additional HTTP headers. + async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on + Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI. + base_url: Base URL override. For OpenAI routing this maps to ``OPENAI_BASE_URL``. + For Azure routing this may be used instead of ``azure_endpoint`` when you want + to pass the full ``.../openai/v1`` base URL directly. + azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing + falls back to ``AZURE_OPENAI_ENDPOINT``. + api_version: Azure API version. When not provided explicitly, Azure routing falls + back to ``AZURE_OPENAI_API_VERSION`` and then the embedding default. + otel_provider_name: Override the OpenTelemetry provider name. + env_file_path: Optional ``.env`` file that is checked before process environment + variables. The same file is used for both ``OPENAI_*`` and ``AZURE_OPENAI_*`` + lookups. + env_file_encoding: Encoding for the ``.env`` file. + + Notes: + Environment resolution and routing precedence are: + + 1. Explicit Azure inputs (``azure_endpoint``, ``api_version``, or ``credential``) + 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` + 3. Azure environment fallback + + OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_EMBEDDING_MODEL``, + ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing reads + ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, + ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, ``AZURE_OPENAI_DEPLOYMENT_NAME``, + and ``AZURE_OPENAI_API_VERSION``. + + Examples: + .. code-block:: python + + from agent_framework.openai import OpenAIEmbeddingClient + + # Using environment variables + # Set OPENAI_API_KEY=sk-... + # Set OPENAI_EMBEDDING_MODEL=text-embedding-3-small + client = OpenAIEmbeddingClient() + + # Or passing OpenAI parameters directly + client = OpenAIEmbeddingClient( + model="text-embedding-3-small", + api_key="sk-...", + ) + + # Or using Azure OpenAI with an Azure credential + client = OpenAIEmbeddingClient( + model="text-embedding-3-small", + azure_endpoint="https://example-resource.openai.azure.com/", + credential=my_azure_credential, + ) + """ super().__init__( model=model, api_key=api_key, + credential=credential, org_id=org_id, base_url=base_url, + azure_endpoint=azure_endpoint, + api_version=api_version, default_headers=default_headers, async_client=async_client, env_file_path=env_file_path, env_file_encoding=env_file_encoding, ) + if isinstance(self.client, AsyncAzureOpenAI) or self._use_azure_client: + self.OTEL_PROVIDER_NAME = "azure.ai.openai" # type: ignore[misc] if otel_provider_name is not None: self.OTEL_PROVIDER_NAME = otel_provider_name # type: ignore[misc] # Validate that the client was created successfully (from explicit args or env vars) if self.client is None: raise ValueError( - "OpenAI API key is required. Set via 'api_key' parameter or 'OPENAI_API_KEY' environment variable." + "OpenAI or Azure OpenAI credentials are required. Set via the 'api_key' or 'credential' parameter, " + "or the 'OPENAI_API_KEY' or 'AZURE_OPENAI_API_KEY' environment variable." ) if not self.model: raise ValueError( - "OpenAI embedding model is required. " - "Set via 'model' parameter or 'OPENAI_EMBEDDING_MODEL' environment variable." + "An embedding model or Azure OpenAI deployment name is required. " + "Set via the 'model' parameter, 'OPENAI_EMBEDDING_MODEL', " + "'AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME', or 'AZURE_OPENAI_DEPLOYMENT_NAME'." ) diff --git a/python/packages/openai/agent_framework_openai/_shared.py b/python/packages/openai/agent_framework_openai/_shared.py index 02c9cfa38f..8aabe6cb19 100644 --- a/python/packages/openai/agent_framework_openai/_shared.py +++ b/python/packages/openai/agent_framework_openai/_shared.py @@ -7,7 +7,7 @@ import sys from collections.abc import Awaitable, Callable, Mapping, MutableMapping, Sequence from copy import copy -from typing import TYPE_CHECKING, Any, ClassVar, Union, cast +from typing import TYPE_CHECKING, Any, ClassVar, Literal, Union, cast import openai from agent_framework._serialization import SerializationMixin @@ -134,6 +134,23 @@ class AzureOpenAISettings(TypedDict, total=False): api_version: str | None +OpenAIModelSettingName = Literal["model", "embedding_model"] + + +def _get_env_setting( + env_var_name: str, + *, + env_file_path: str | None, + env_file_encoding: str | None, +) -> str | None: + """Read a setting from an optional ``.env`` file first, then the process environment.""" + if env_file_path: + dotenv_value = get_key(env_file_path, env_var_name, encoding=env_file_encoding) # type: ignore[reportArgumentType, arg-type] + if dotenv_value: + return dotenv_value + return os.getenv(env_var_name) + + def load_openai_service_settings( *, model: str | None, @@ -148,6 +165,9 @@ def load_openai_service_settings( client: AsyncOpenAI | None = None, env_file_path: str | None, env_file_encoding: str | None, + openai_model_field: OpenAIModelSettingName = "model", + openai_model_env_var: str = "OPENAI_MODEL", + azure_deployment_env_vars: Sequence[str] = ("AZURE_OPENAI_DEPLOYMENT_NAME",), ) -> tuple[dict[str, Any], AsyncOpenAI, bool]: """Load OpenAI settings, including Azure OpenAI aliases. @@ -168,22 +188,27 @@ def load_openai_service_settings( azure_client = isinstance(client, AsyncAzureOpenAI) use_azure = azure_client or endpoint is not None or api_version is not None or credential is not None if not use_azure: + openai_settings_kwargs: dict[str, Any] = { + "api_key": api_key_str, + "org_id": org_id, + "base_url": base_url, + "env_file_path": env_file_path, + "env_file_encoding": env_file_encoding, + } + openai_settings_kwargs[openai_model_field] = model openai_settings = load_settings( OpenAISettings, env_prefix="OPENAI_", - api_key=api_key_str, - org_id=org_id, - base_url=base_url, - model=model, - env_file_path=env_file_path, - env_file_encoding=env_file_encoding, + **openai_settings_kwargs, ) if client: return openai_settings, client, False # type: ignore[return-value] if openai_settings.get("api_key") is not None: - if not (model := openai_settings.get("model")): + resolved_model = openai_settings.get(openai_model_field) + if not resolved_model: raise SettingNotFoundError( - "Model must be specified via the 'model' parameter or the 'OPENAI_MODEL' environment variable." + "Model must be specified via the 'model' parameter or the " + f"'{openai_model_env_var}' environment variable." ) client_args: dict[str, Any] = { @@ -200,7 +225,7 @@ def load_openai_service_settings( azure_settings = load_settings( AzureOpenAISettings, env_prefix="AZURE_OPENAI_", - required_fields=[("base_url", "endpoint")], + required_fields=None if client else [("base_url", "endpoint")], api_key=api_key_str, endpoint=endpoint, base_url=base_url, @@ -210,19 +235,32 @@ def load_openai_service_settings( env_file_encoding=env_file_encoding, ) client_args = {} + if model is None: + for azure_deployment_env_var in azure_deployment_env_vars: + if deployment_name := _get_env_setting( + azure_deployment_env_var, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ): + azure_settings["deployment_name"] = deployment_name + break if ("deployment_name" not in azure_settings or not azure_settings["deployment_name"]) and ( - openai_model := ( - get_key(env_file_path, "OPENAI_MODEL", encoding=env_file_encoding) or os.getenv("OPENAI_MODEL") # type: ignore[reportArgumentType, arg-type] + openai_model := _get_env_setting( + openai_model_env_var, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, ) ): - # load `OPENAI_MODEL` from the environment as a fallback + # load the OpenAI model env var as a fallback for Azure routing azure_settings["deployment_name"] = openai_model if model := azure_settings.get("deployment_name"): client_args["azure_deployment"] = model else: + deployment_env_guidance = ", ".join(f"'{env_var}'" for env_var in azure_deployment_env_vars) raise ValueError( "Azure OpenAI client requires a deployment name, which can be provided via the 'model' parameter, " - "the 'AZURE_OPENAI_DEPLOYMENT_NAME' environment variable, or the 'OPENAI_MODEL' environment variable." + f"the {deployment_env_guidance} environment variable, or the '{openai_model_env_var}' " + "environment variable." ) if client: return azure_settings, client, True # type: ignore[return-value] diff --git a/python/packages/openai/tests/openai/conftest.py b/python/packages/openai/tests/openai/conftest.py index 07f6012209..0a578fac8e 100644 --- a/python/packages/openai/tests/openai/conftest.py +++ b/python/packages/openai/tests/openai/conftest.py @@ -55,6 +55,7 @@ def openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): # "AZURE_OPENAI_API_KEY", "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", "AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "AZURE_OPENAI_DEPLOYMENT_NAME", "AZURE_OPENAI_API_VERSION", ], @@ -109,6 +110,9 @@ def azure_openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dic "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_BASE_URL", "AZURE_OPENAI_API_KEY", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", + "AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "AZURE_OPENAI_DEPLOYMENT_NAME", "AZURE_OPENAI_API_VERSION", ], @@ -118,6 +122,7 @@ def azure_openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dic "AZURE_OPENAI_ENDPOINT": "https://test-endpoint.openai.azure.com", "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "test_chat_deployment", "AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME": "test_responses_deployment", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME": "test_embedding_deployment", "AZURE_OPENAI_DEPLOYMENT_NAME": "test_deployment", "AZURE_OPENAI_API_KEY": "test_api_key", "AZURE_OPENAI_API_VERSION": "2024-12-01-preview", diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client.py b/python/packages/openai/tests/openai/test_openai_embedding_client.py index 7117040ffc..80d5ca6899 100644 --- a/python/packages/openai/tests/openai/test_openai_embedding_client.py +++ b/python/packages/openai/tests/openai/test_openai_embedding_client.py @@ -6,6 +6,7 @@ from unittest.mock import AsyncMock, MagicMock import pytest +from agent_framework.exceptions import SettingNotFoundError from openai.types import CreateEmbeddingResponse from openai.types import Embedding as OpenAIEmbedding from openai.types.create_embedding_response import Usage @@ -32,13 +33,6 @@ def _make_openai_response( ) -@pytest.fixture -def openai_unit_test_env(monkeypatch: pytest.MonkeyPatch) -> None: - """Set up environment variables for OpenAI embedding client.""" - monkeypatch.setenv("OPENAI_API_KEY", "test-api-key") - monkeypatch.setenv("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small") - - # --- OpenAI unit tests --- @@ -50,24 +44,24 @@ def test_openai_construction_with_explicit_params() -> None: assert client.model == "text-embedding-3-small" -def test_openai_construction_from_env(openai_unit_test_env: None) -> None: +def test_openai_construction_from_env(openai_unit_test_env: dict[str, str]) -> None: client = OpenAIEmbeddingClient() - assert client.model == "text-embedding-3-small" + assert client.model == openai_unit_test_env["OPENAI_EMBEDDING_MODEL"] -def test_openai_construction_missing_api_key_raises(monkeypatch: pytest.MonkeyPatch) -> None: - monkeypatch.delenv("OPENAI_API_KEY", raising=False) - with pytest.raises(ValueError, match="API key is required"): +@pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) +def test_openai_construction_missing_api_key_raises(openai_unit_test_env: dict[str, str]) -> None: + with pytest.raises(SettingNotFoundError, match="Exactly one of 'base_url', 'endpoint'"): OpenAIEmbeddingClient(model="text-embedding-3-small") -def test_openai_construction_missing_model_raises(monkeypatch: pytest.MonkeyPatch) -> None: - monkeypatch.delenv("OPENAI_EMBEDDING_MODEL", raising=False) - with pytest.raises(ValueError, match="embedding model is required"): - OpenAIEmbeddingClient(api_key="test-key") +@pytest.mark.parametrize("exclude_list", [["OPENAI_EMBEDDING_MODEL"]], indirect=True) +def test_openai_construction_missing_model_raises(openai_unit_test_env: dict[str, str]) -> None: + with pytest.raises(SettingNotFoundError, match="OPENAI_EMBEDDING_MODEL"): + OpenAIEmbeddingClient() -async def test_openai_get_embeddings(openai_unit_test_env: None) -> None: +async def test_openai_get_embeddings(openai_unit_test_env: dict[str, str]) -> None: mock_response = _make_openai_response( embeddings=[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], ) @@ -85,7 +79,7 @@ async def test_openai_get_embeddings(openai_unit_test_env: None) -> None: assert result[0].dimensions == 3 -async def test_openai_get_embeddings_usage(openai_unit_test_env: None) -> None: +async def test_openai_get_embeddings_usage(openai_unit_test_env: dict[str, str]) -> None: mock_response = _make_openai_response( embeddings=[[0.1]], prompt_tokens=10, @@ -103,7 +97,7 @@ async def test_openai_get_embeddings_usage(openai_unit_test_env: None) -> None: assert result.usage["total_token_count"] == 10 -async def test_openai_options_passthrough_dimensions(openai_unit_test_env: None) -> None: +async def test_openai_options_passthrough_dimensions(openai_unit_test_env: dict[str, str]) -> None: mock_response = _make_openai_response(embeddings=[[0.1]]) client = OpenAIEmbeddingClient() client.client = MagicMock() @@ -118,7 +112,7 @@ async def test_openai_options_passthrough_dimensions(openai_unit_test_env: None) assert result.options is options -async def test_openai_options_passthrough_encoding_format(openai_unit_test_env: None) -> None: +async def test_openai_options_passthrough_encoding_format(openai_unit_test_env: dict[str, str]) -> None: mock_response = _make_openai_response(embeddings=[[0.1]]) client = OpenAIEmbeddingClient() client.client = MagicMock() @@ -132,7 +126,7 @@ async def test_openai_options_passthrough_encoding_format(openai_unit_test_env: assert call_kwargs["encoding_format"] == "base64" -async def test_openai_base64_decoding(openai_unit_test_env: None) -> None: +async def test_openai_base64_decoding(openai_unit_test_env: dict[str, str]) -> None: import base64 import struct @@ -176,7 +170,7 @@ async def test_openai_error_when_no_model_id() -> None: await client.get_embeddings(["test"]) -async def test_openai_empty_values_returns_empty(openai_unit_test_env: None) -> None: +async def test_openai_empty_values_returns_empty(openai_unit_test_env: dict[str, str]) -> None: client = OpenAIEmbeddingClient() client.client = MagicMock() client.client.embeddings = MagicMock() diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py new file mode 100644 index 0000000000..7a1befa3c7 --- /dev/null +++ b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py @@ -0,0 +1,180 @@ +# Copyright (c) Microsoft. All rights reserved. + +from __future__ import annotations + +import os +from unittest.mock import MagicMock, patch + +import pytest +from azure.core.credentials_async import AsyncTokenCredential +from azure.identity.aio import AzureCliCredential +from openai import AsyncAzureOpenAI + +from agent_framework_openai import OpenAIEmbeddingClient, OpenAIEmbeddingOptions + +pytestmark = pytest.mark.azure + +skip_if_azure_openai_integration_tests_disabled = pytest.mark.skipif( + os.getenv("AZURE_OPENAI_ENDPOINT", "") in ("", "https://test-endpoint.openai.azure.com") + or ( + os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "") == "" + and os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "") == "" + ), + reason="No real Azure OpenAI endpoint or embedding deployment provided; skipping integration tests.", +) + + +def _get_azure_embedding_deployment_name() -> str: + return os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME") or os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"] + + +def _create_azure_embedding_client( + *, + api_key: str | None = None, + credential: AsyncTokenCredential | None = None, +) -> OpenAIEmbeddingClient: + resolved_api_key = ( + api_key if api_key is not None else None if credential is not None else os.environ["AZURE_OPENAI_API_KEY"] + ) + return OpenAIEmbeddingClient( + model=_get_azure_embedding_deployment_name(), + api_key=resolved_api_key, + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], + api_version=os.getenv("AZURE_OPENAI_API_VERSION"), + credential=credential, + ) + + +def test_init_with_azure_endpoint(azure_openai_unit_test_env: dict[str, str]) -> None: + client = _create_azure_embedding_client() + + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"] + assert isinstance(client.client, AsyncAzureOpenAI) + assert client.OTEL_PROVIDER_NAME == "azure.ai.openai" + assert client.azure_endpoint == azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"] + assert client.api_version == azure_openai_unit_test_env["AZURE_OPENAI_API_VERSION"] + + +def test_init_auto_detects_azure_embedding_env(azure_openai_unit_test_env: dict[str, str]) -> None: + client = OpenAIEmbeddingClient() + + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"] + assert isinstance(client.client, AsyncAzureOpenAI) + assert client.azure_endpoint == azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"] + + +def test_init_falls_back_to_generic_azure_deployment_env( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.delenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", raising=False) + + client = OpenAIEmbeddingClient() + + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert isinstance(client.client, AsyncAzureOpenAI) + + +def test_openai_api_key_wins_over_azure_env(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small") + + client = OpenAIEmbeddingClient() + + assert client.model == "text-embedding-3-small" + assert not isinstance(client.client, AsyncAzureOpenAI) + assert client.azure_endpoint is None + + +def test_explicit_credential_wins_over_openai_api_key( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small") + + client = OpenAIEmbeddingClient(credential=lambda: "token") + + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"] + assert isinstance(client.client, AsyncAzureOpenAI) + assert client.azure_endpoint == azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"] + + +def test_init_with_credential_wraps_async_token_credential( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small") + credential = MagicMock(spec=AsyncTokenCredential) + token_provider = MagicMock() + + with patch("azure.identity.aio.get_bearer_token_provider", return_value=token_provider) as mock_provider: + client = OpenAIEmbeddingClient(credential=credential) + + assert isinstance(client.client, AsyncAzureOpenAI) + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"] + mock_provider.assert_called_once_with(credential, "https://cognitiveservices.azure.com/.default") + + +@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_VERSION"]], indirect=True) +def test_init_uses_default_azure_api_version(azure_openai_unit_test_env: dict[str, str]) -> None: + client = _create_azure_embedding_client() + + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"] + assert client.api_version == "2024-10-21" + + +def test_openai_base_url_wins_over_azure_aliases(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small") + monkeypatch.setenv("OPENAI_BASE_URL", "https://custom-openai-endpoint.com/v1") + + client = OpenAIEmbeddingClient() + + assert client.model == "text-embedding-3-small" + assert not isinstance(client.client, AsyncAzureOpenAI) + assert client.azure_endpoint is None + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_azure_openai_integration_tests_disabled +async def test_azure_openai_get_embeddings() -> None: + async with AzureCliCredential() as credential: + client = _create_azure_embedding_client(credential=credential) + + result = await client.get_embeddings(["hello world"]) + + assert len(result) == 1 + assert isinstance(result[0].vector, list) + assert len(result[0].vector) > 0 + assert all(isinstance(v, float) for v in result[0].vector) + assert result[0].model is not None + assert result.usage is not None + assert result.usage["input_token_count"] > 0 + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_azure_openai_integration_tests_disabled +async def test_azure_openai_get_embeddings_multiple() -> None: + async with AzureCliCredential() as credential: + client = _create_azure_embedding_client(credential=credential) + + result = await client.get_embeddings(["hello", "world", "test"]) + + assert len(result) == 3 + dims = [len(embedding.vector) for embedding in result] + assert all(dimension == dims[0] for dimension in dims) + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_azure_openai_integration_tests_disabled +async def test_azure_openai_get_embeddings_with_dimensions() -> None: + async with AzureCliCredential() as credential: + client = _create_azure_embedding_client(credential=credential) + + options: OpenAIEmbeddingOptions = {"dimensions": 256} + result = await client.get_embeddings(["hello world"], options=options) + + assert len(result) == 1 + assert len(result[0].vector) == 256 diff --git a/python/samples/02-agents/context_providers/azure_ai_search/search_context_semantic.py b/python/samples/02-agents/context_providers/azure_ai_search/search_context_semantic.py index 8cd8947ef6..98b7d66f88 100644 --- a/python/samples/02-agents/context_providers/azure_ai_search/search_context_semantic.py +++ b/python/samples/02-agents/context_providers/azure_ai_search/search_context_semantic.py @@ -4,8 +4,9 @@ import os from agent_framework import Agent -from agent_framework.azure import AzureAISearchContextProvider, AzureOpenAIEmbeddingClient +from agent_framework.azure import AzureAISearchContextProvider from agent_framework.foundry import FoundryChatClient +from agent_framework.openai import OpenAIEmbeddingClient from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv @@ -31,8 +32,8 @@ - AZURE_SEARCH_INDEX_NAME: Your search index name - FOUNDRY_PROJECT_ENDPOINT: Your Azure AI Foundry project endpoint - AZURE_AI_MODEL_DEPLOYMENT_NAME: Your model deployment name (e.g., "gpt-4o") - - AZURE_OPENAI_EMBEDDING_MODEL_ID: (Optional) Your embedding model for hybrid search (e.g., "text-embedding-3-small") - - AZURE_OPENAI_ENDPOINT: (Optional) Your Azure OpenAI resource URL, required if using an OpenAI embedding model for hybrid search + - AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: (Optional) Your Azure OpenAI embedding deployment for hybrid search + - AZURE_OPENAI_ENDPOINT: (Optional) Your Azure OpenAI resource URL, required if using Azure OpenAI embeddings """ # Sample queries to demonstrate RAG @@ -55,13 +56,13 @@ async def main() -> None: project_endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] model_deployment = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "gpt-4o") openai_endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT") - embedding_model = os.environ.get("AZURE_OPENAI_EMBEDDING_MODEL_ID", "text-embedding-3-small") + embedding_deployment = os.environ.get("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME") embedding_client = None - if openai_endpoint and embedding_model: - embedding_client = AzureOpenAIEmbeddingClient( - endpoint=openai_endpoint, - model=embedding_model, + if openai_endpoint and embedding_deployment: + embedding_client = OpenAIEmbeddingClient( + azure_endpoint=openai_endpoint, + model=embedding_deployment, credential=credential, ) diff --git a/python/samples/02-agents/embeddings/azure_openai_embeddings.py b/python/samples/02-agents/embeddings/azure_openai_embeddings.py index 16669eb51f..460bbcf7de 100644 --- a/python/samples/02-agents/embeddings/azure_openai_embeddings.py +++ b/python/samples/02-agents/embeddings/azure_openai_embeddings.py @@ -2,55 +2,59 @@ # Run with: uv run samples/02-agents/embeddings/azure_openai_embeddings.py - import asyncio +import os -from agent_framework.azure import AzureOpenAIEmbeddingClient +from agent_framework.openai import OpenAIEmbeddingClient +from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv -load_dotenv() - -"""Azure OpenAI Embedding Client Example - -This sample demonstrates how to generate embeddings using the Azure OpenAI embedding client. -It supports both API key and Azure credential authentication. +"""This sample demonstrates Azure OpenAI embedding generation with ``OpenAIEmbeddingClient``. Prerequisites: - Set the following environment variables or add them to a .env file: - - AZURE_OPENAI_ENDPOINT: Your Azure OpenAI endpoint URL - - AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: The embedding model deployment name - - AZURE_OPENAI_API_KEY: Your API key (or use Azure credential instead) + Set the following environment variables or add them to a local ``.env`` file: + - ``AZURE_OPENAI_ENDPOINT``: Your Azure OpenAI endpoint URL + - ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``: The embedding deployment name + - ``AZURE_OPENAI_API_VERSION``: Optional API version override + + Sign in with ``az login`` before running the sample. """ +load_dotenv() + async def main() -> None: """Generate embeddings with Azure OpenAI.""" - # 1. Create a client using environment variables. - # Reads AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME, - # and AZURE_OPENAI_API_KEY from environment. - client = AzureOpenAIEmbeddingClient() - - # 2. Generate a single embedding. - result = await client.get_embeddings(["Hello, world!"]) - print(f"Single embedding dimensions: {result[0].dimensions}") - print(f"First 5 values: {result[0].vector[:5]}") - print(f"Model: {result[0].model_id}") - print(f"Usage: {result.usage}") - print() - - # 3. Generate embeddings for multiple inputs. - texts = [ - "The weather is sunny today.", - "It is raining outside.", - "Machine learning is fascinating.", - ] - result = await client.get_embeddings(texts) - print(f"Batch of {len(result)} embeddings, each with {result[0].dimensions} dimensions") - print() - - # 4. Generate embeddings with custom dimensions. - result = await client.get_embeddings(["Custom dimensions example"], options={"dimensions": 256}) - print(f"Custom dimensions: {result[0].dimensions}") + async with AzureCliCredential() as credential: + client = OpenAIEmbeddingClient( + model=os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"), + azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), + api_version=os.getenv("AZURE_OPENAI_API_VERSION"), + credential=credential, + ) + + # 1. Generate a single embedding. + result = await client.get_embeddings(["Hello, world!"]) + print(f"Single embedding dimensions: {result[0].dimensions}") + print(f"First 5 values: {result[0].vector[:5]}") + print(f"Model: {result[0].model}") + print(f"Usage: {result.usage}") + print() + + # 2. Generate embeddings for multiple inputs. + texts = [ + "The weather is sunny today.", + "It is raining outside.", + "Machine learning is fascinating.", + ] + result = await client.get_embeddings(texts) + print(f"Batch of {len(result)} embeddings, each with {result[0].dimensions} dimensions") + print(f"First embedding vector: {result[0].vector[:5]}") + print() + + # 3. Generate embeddings with custom dimensions. + result = await client.get_embeddings(["Custom dimensions example"], options={"dimensions": 256}) + print(f"Custom dimensions: {result[0].dimensions}") if __name__ == "__main__": diff --git a/python/samples/02-agents/embeddings/openai_embeddings.py b/python/samples/02-agents/embeddings/openai_embeddings.py index 001b6593f5..d49b3034dd 100644 --- a/python/samples/02-agents/embeddings/openai_embeddings.py +++ b/python/samples/02-agents/embeddings/openai_embeddings.py @@ -3,31 +3,32 @@ # Run with: uv run samples/02-agents/embeddings/openai_embeddings.py import asyncio +import os from agent_framework.openai import OpenAIEmbeddingClient from dotenv import load_dotenv -load_dotenv() - -"""OpenAI Embedding Client Example - -This sample demonstrates how to generate embeddings using the OpenAI embedding client. -It shows single and batch embedding generation, as well as custom dimensions. +"""This sample demonstrates OpenAI embedding generation with explicit constructor settings. Prerequisites: - Set the OPENAI_API_KEY environment variable or add it to a .env file. + Set ``OPENAI_API_KEY`` in your environment or in a local ``.env`` file. """ +load_dotenv() + async def main() -> None: """Generate embeddings with OpenAI.""" - client = OpenAIEmbeddingClient(model="text-embedding-3-small") + client = OpenAIEmbeddingClient( + model="text-embedding-3-small", + api_key=os.getenv("OPENAI_API_KEY"), + ) # 1. Generate a single embedding. result = await client.get_embeddings(["Hello, world!"]) print(f"Single embedding dimensions: {result[0].dimensions}") print(f"First 5 values: {result[0].vector[:5]}") - print(f"Model: {result[0].model_id}") + print(f"Model: {result[0].model}") print(f"Usage: {result.usage}") print() @@ -39,7 +40,7 @@ async def main() -> None: ] result = await client.get_embeddings(texts) print(f"Batch of {len(result)} embeddings, each with {result[0].dimensions} dimensions") - print(f"First embedding vector: {result[0].vector[:5]}") # Print first 5 values of the first embedding + print(f"First embedding vector: {result[0].vector[:5]}") print() # 3. Generate embeddings with custom dimensions. From 0da48b64c1f2d1213bf963592f2953d91ac5c269 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 15:24:42 +0100 Subject: [PATCH 04/30] Python: fix embedding client pyright check Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../openai/agent_framework_openai/_embedding_client.py | 6 ------ .../tests/openai/test_openai_embedding_client_azure.py | 4 +--- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/python/packages/openai/agent_framework_openai/_embedding_client.py b/python/packages/openai/agent_framework_openai/_embedding_client.py index 8370413424..366f955a55 100644 --- a/python/packages/openai/agent_framework_openai/_embedding_client.py +++ b/python/packages/openai/agent_framework_openai/_embedding_client.py @@ -446,12 +446,6 @@ def __init__( if otel_provider_name is not None: self.OTEL_PROVIDER_NAME = otel_provider_name # type: ignore[misc] - # Validate that the client was created successfully (from explicit args or env vars) - if self.client is None: - raise ValueError( - "OpenAI or Azure OpenAI credentials are required. Set via the 'api_key' or 'credential' parameter, " - "or the 'OPENAI_API_KEY' or 'AZURE_OPENAI_API_KEY' environment variable." - ) if not self.model: raise ValueError( "An embedding model or Azure OpenAI deployment name is required. " diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py index 7a1befa3c7..d252d316cf 100644 --- a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py @@ -85,9 +85,7 @@ def test_openai_api_key_wins_over_azure_env(monkeypatch, azure_openai_unit_test_ assert client.azure_endpoint is None -def test_explicit_credential_wins_over_openai_api_key( - monkeypatch, azure_openai_unit_test_env: dict[str, str] -) -> None: +def test_explicit_credential_wins_over_openai_api_key(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") monkeypatch.setenv("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small") From a0ff8d295012786e2b3a7a44f58002d2ee63bcfc Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 15:27:17 +0100 Subject: [PATCH 05/30] Python: thin OpenAI embedding wrapper Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../agent_framework_openai/_embedding_client.py | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/python/packages/openai/agent_framework_openai/_embedding_client.py b/python/packages/openai/agent_framework_openai/_embedding_client.py index 366f955a55..c87c1f4991 100644 --- a/python/packages/openai/agent_framework_openai/_embedding_client.py +++ b/python/packages/openai/agent_framework_openai/_embedding_client.py @@ -217,7 +217,8 @@ def __init__( } else: self.default_headers = None - self._use_azure_client = use_azure_client + if use_azure_client: + self.OTEL_PROVIDER_NAME = "azure.ai.openai" # type: ignore[misc] super().__init__(**kwargs) @@ -438,17 +439,7 @@ def __init__( api_version=api_version, default_headers=default_headers, async_client=async_client, + otel_provider_name=otel_provider_name, env_file_path=env_file_path, env_file_encoding=env_file_encoding, ) - if isinstance(self.client, AsyncAzureOpenAI) or self._use_azure_client: - self.OTEL_PROVIDER_NAME = "azure.ai.openai" # type: ignore[misc] - if otel_provider_name is not None: - self.OTEL_PROVIDER_NAME = otel_provider_name # type: ignore[misc] - - if not self.model: - raise ValueError( - "An embedding model or Azure OpenAI deployment name is required. " - "Set via the 'model' parameter, 'OPENAI_EMBEDDING_MODEL', " - "'AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME', or 'AZURE_OPENAI_DEPLOYMENT_NAME'." - ) From 2c2a595cf1b4b341fc71faaf8bb1920a0886b198 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 15:28:46 +0100 Subject: [PATCH 06/30] Python: document embedding overload routing Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../_embedding_client.py | 54 +++++++++++++++---- 1 file changed, 44 insertions(+), 10 deletions(-) diff --git a/python/packages/openai/agent_framework_openai/_embedding_client.py b/python/packages/openai/agent_framework_openai/_embedding_client.py index c87c1f4991..7611397e69 100644 --- a/python/packages/openai/agent_framework_openai/_embedding_client.py +++ b/python/packages/openai/agent_framework_openai/_embedding_client.py @@ -84,8 +84,15 @@ def __init__( ) -> None: """Initialize a raw OpenAI embedding client with OpenAI-only routing. - This overload describes the OpenAI shape. Explicit keyword arguments are used first, - then ``OPENAI_*`` values from ``env_file_path`` or the process environment. + Use this overload when you want the generic OpenAI embeddings endpoint. The + constructor reads ``model`` from the explicit argument first and then from + ``OPENAI_EMBEDDING_MODEL``. Authentication and endpoint settings come from + the explicit ``api_key``, ``org_id``, and ``base_url`` arguments first and + then from ``OPENAI_API_KEY``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL`` in + ``env_file_path`` or the process environment. + + Azure-specific environment variables are ignored for this overload unless an + explicit Azure signal is provided via the Azure overload shape. """ ... @@ -106,9 +113,19 @@ def __init__( ) -> None: """Initialize a raw OpenAI embedding client with Azure routing. - This overload describes the Azure shape. Explicit Azure inputs force Azure routing, - and missing Azure values fall back to ``AZURE_OPENAI_*`` values from ``env_file_path`` - or the process environment. + Use this overload when you want Azure OpenAI embeddings. Passing + ``azure_endpoint``, ``api_version``, or ``credential`` is an explicit Azure + signal and forces Azure routing even when ``OPENAI_API_KEY`` is also present. + The constructor reads the deployment name from the explicit ``model`` + argument first and then from ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, + falling back to ``AZURE_OPENAI_DEPLOYMENT_NAME``. + + Authentication and endpoint settings come from the explicit Azure arguments + first and then from ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, + ``AZURE_OPENAI_API_KEY``, and ``AZURE_OPENAI_API_VERSION`` in + ``env_file_path`` or the process environment. ``credential`` is the + preferred Azure auth surface; ``api_key`` remains supported for Azure key + auth and callable token providers for compatibility. """ ... @@ -316,8 +333,15 @@ def __init__( ) -> None: """Initialize an OpenAI embedding client with OpenAI-only routing. - This overload describes the OpenAI shape. Explicit keyword arguments are used first, - then ``OPENAI_*`` values from ``env_file_path`` or the process environment. + Use this overload when you want the generic OpenAI embeddings endpoint. The + constructor reads ``model`` from the explicit argument first and then from + ``OPENAI_EMBEDDING_MODEL``. Authentication and endpoint settings come from + the explicit ``api_key``, ``org_id``, and ``base_url`` arguments first and + then from ``OPENAI_API_KEY``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL`` in + ``env_file_path`` or the process environment. + + Azure-specific environment variables are ignored for this overload unless an + explicit Azure signal is provided via the Azure overload shape. """ ... @@ -339,9 +363,19 @@ def __init__( ) -> None: """Initialize an OpenAI embedding client with Azure routing. - This overload describes the Azure shape. Explicit Azure inputs force Azure routing, - and missing Azure values fall back to ``AZURE_OPENAI_*`` values from ``env_file_path`` - or the process environment. + Use this overload when you want Azure OpenAI embeddings. Passing + ``azure_endpoint``, ``api_version``, or ``credential`` is an explicit Azure + signal and forces Azure routing even when ``OPENAI_API_KEY`` is also present. + The constructor reads the deployment name from the explicit ``model`` + argument first and then from ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, + falling back to ``AZURE_OPENAI_DEPLOYMENT_NAME``. + + Authentication and endpoint settings come from the explicit Azure arguments + first and then from ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, + ``AZURE_OPENAI_API_KEY``, and ``AZURE_OPENAI_API_VERSION`` in + ``env_file_path`` or the process environment. ``credential`` is the + preferred Azure auth surface; ``api_key`` remains supported for Azure key + auth and callable token providers for compatibility. """ ... From c9e2de12549b79f956c9d8cf0355c7d6c66f716e Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 15:32:54 +0100 Subject: [PATCH 07/30] Python: fix callable OpenAI key routing Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../openai/agent_framework_openai/_shared.py | 13 +++++++------ .../tests/openai/test_openai_embedding_client.py | 12 ++++++++++++ 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/python/packages/openai/agent_framework_openai/_shared.py b/python/packages/openai/agent_framework_openai/_shared.py index 8aabe6cb19..a22dc9188c 100644 --- a/python/packages/openai/agent_framework_openai/_shared.py +++ b/python/packages/openai/agent_framework_openai/_shared.py @@ -203,7 +203,7 @@ def load_openai_service_settings( ) if client: return openai_settings, client, False # type: ignore[return-value] - if openai_settings.get("api_key") is not None: + if openai_settings.get("api_key") is not None or api_key_callable is not None: resolved_model = openai_settings.get(openai_model_field) if not resolved_model: raise SettingNotFoundError( @@ -212,9 +212,9 @@ def load_openai_service_settings( ) client_args: dict[str, Any] = { - "api_key": openai_settings["api_key"].get_secret_value() # type: ignore[reportOptionalMemberAccess, union-attr] - if "api_key" in openai_settings - else api_key_callable, + "api_key": api_key_callable + if api_key_callable is not None + else openai_settings["api_key"].get_secret_value(), # type: ignore[reportOptionalMemberAccess, union-attr] "organization": openai_settings.get("org_id"), "default_headers": merged_headers, } @@ -289,6 +289,9 @@ def _resolve_azure_credential_to_token_provider( credential: AzureCredentialTypes | AzureTokenProvider, ) -> AzureTokenProvider: """Resolve an Azure credential or token provider for Azure OpenAI auth.""" + if callable(credential): + return credential + try: from azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential @@ -303,8 +306,6 @@ def _resolve_azure_credential_to_token_provider( return get_async_bearer_token_provider(credential, AZURE_OPENAI_TOKEN_SCOPE) if isinstance(credential, TokenCredential): return get_bearer_token_provider(credential, AZURE_OPENAI_TOKEN_SCOPE) # type: ignore[arg-type] - if callable(credential): - return credential raise ValueError( "The 'credential' parameter must be an Azure TokenCredential, AsyncTokenCredential, or a " "callable token provider." diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client.py b/python/packages/openai/tests/openai/test_openai_embedding_client.py index 80d5ca6899..2df4484847 100644 --- a/python/packages/openai/tests/openai/test_openai_embedding_client.py +++ b/python/packages/openai/tests/openai/test_openai_embedding_client.py @@ -49,6 +49,18 @@ def test_openai_construction_from_env(openai_unit_test_env: dict[str, str]) -> N assert client.model == openai_unit_test_env["OPENAI_EMBEDDING_MODEL"] +def test_with_callable_api_key() -> None: + """Test OpenAIEmbeddingClient initialization with callable API key.""" + + async def get_api_key() -> str: + return "test-api-key-123" + + client = OpenAIEmbeddingClient(model="text-embedding-3-small", api_key=get_api_key) + + assert client.model == "text-embedding-3-small" + assert client.client is not None + + @pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) def test_openai_construction_missing_api_key_raises(openai_unit_test_env: dict[str, str]) -> None: with pytest.raises(SettingNotFoundError, match="Exactly one of 'base_url', 'endpoint'"): From a361c873fa78b06d8658dfdd2f92c0a5cb079536 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 15:35:55 +0100 Subject: [PATCH 08/30] Python: fix Azure credential routing tests Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../openai/tests/openai/test_openai_chat_client_azure.py | 8 ++++++-- .../openai/test_openai_chat_completion_client_azure.py | 6 +++++- .../tests/openai/test_openai_embedding_client_azure.py | 6 +++++- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py index c364690cab..e134b2c72d 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py @@ -6,7 +6,7 @@ import os from pathlib import Path from typing import Any -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import MagicMock, patch import pytest from agent_framework import Agent, AgentResponse, ChatResponse, Content, Message, SupportsChatGetResponse, tool @@ -136,9 +136,13 @@ def test_explicit_credential_wins_over_openai_api_key(monkeypatch, azure_openai_ def test_init_with_credential_wraps_async_token_credential( monkeypatch, azure_openai_unit_test_env: dict[str, str] ) -> None: + class TestAsyncTokenCredential(AsyncTokenCredential): + async def get_token(self, *scopes: str, **kwargs: object): + raise NotImplementedError + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") monkeypatch.setenv("OPENAI_MODEL", "gpt-5") - credential = AsyncMock(spec=AsyncTokenCredential) + credential = TestAsyncTokenCredential() token_provider = MagicMock() with patch("azure.identity.aio.get_bearer_token_provider", return_value=token_provider) as mock_provider: diff --git a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py index 0a57a4b860..9f73343a42 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py @@ -116,9 +116,13 @@ def test_explicit_credential_wins_over_openai_api_key(monkeypatch, azure_openai_ def test_init_with_credential_wraps_async_token_credential( monkeypatch, azure_openai_unit_test_env: dict[str, str] ) -> None: + class TestAsyncTokenCredential(AsyncTokenCredential): + async def get_token(self, *scopes: str, **kwargs: object): + raise NotImplementedError + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") monkeypatch.setenv("OPENAI_MODEL", "gpt-5") - credential = MagicMock(spec=AsyncTokenCredential) + credential = TestAsyncTokenCredential() token_provider = MagicMock() with patch("azure.identity.aio.get_bearer_token_provider", return_value=token_provider) as mock_provider: diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py index d252d316cf..45c7771149 100644 --- a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py @@ -99,9 +99,13 @@ def test_explicit_credential_wins_over_openai_api_key(monkeypatch, azure_openai_ def test_init_with_credential_wraps_async_token_credential( monkeypatch, azure_openai_unit_test_env: dict[str, str] ) -> None: + class TestAsyncTokenCredential(AsyncTokenCredential): + async def get_token(self, *scopes: str, **kwargs: object): + raise NotImplementedError + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") monkeypatch.setenv("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small") - credential = MagicMock(spec=AsyncTokenCredential) + credential = TestAsyncTokenCredential() token_provider = MagicMock() with patch("azure.identity.aio.get_bearer_token_provider", return_value=token_provider) as mock_provider: From a52f0cc8802ff07c3729c8807ea337ecd8e5c292 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 15:40:33 +0100 Subject: [PATCH 09/30] Python: address OpenAI review feedback Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../_deprecated_azure_openai.py | 38 +++---------- .../openai/agent_framework_openai/_shared.py | 21 ++++++++ .../openai/test_openai_embedding_client.py | 6 ++- .../openai/tests/openai/test_openai_shared.py | 54 +++++++++++++++++++ 4 files changed, 87 insertions(+), 32 deletions(-) create mode 100644 python/packages/openai/tests/openai/test_openai_shared.py diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_deprecated_azure_openai.py b/python/packages/azure-ai/agent_framework_azure_ai/_deprecated_azure_openai.py index d0e8c4c157..21f50e930a 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_deprecated_azure_openai.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_deprecated_azure_openai.py @@ -11,7 +11,6 @@ import json import logging -import os import sys from collections.abc import Mapping, Sequence from contextlib import contextmanager @@ -113,35 +112,8 @@ def _apply_azure_defaults( @contextmanager def _prefer_single_azure_endpoint_env(*, endpoint: str | None, base_url: str | None) -> Any: - """Temporarily expose only the Azure endpoint setting that raw OpenAI clients accept. - - The deprecated Azure wrappers have historically tolerated both - ``AZURE_OPENAI_BASE_URL`` and ``AZURE_OPENAI_ENDPOINT`` being present and prefer - ``base_url`` when both are available. The raw OpenAI constructors now validate - that exactly one is set, so we temporarily hide the unused env var while - delegating to those constructors. - """ - original_base_url = os.environ.get("AZURE_OPENAI_BASE_URL") - original_endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT") - - try: - if base_url: - os.environ["AZURE_OPENAI_BASE_URL"] = str(base_url) - os.environ.pop("AZURE_OPENAI_ENDPOINT", None) - elif endpoint: - os.environ["AZURE_OPENAI_ENDPOINT"] = str(endpoint) - os.environ.pop("AZURE_OPENAI_BASE_URL", None) - yield - finally: - if original_base_url is None: - os.environ.pop("AZURE_OPENAI_BASE_URL", None) - else: - os.environ["AZURE_OPENAI_BASE_URL"] = original_base_url - - if original_endpoint is None: - os.environ.pop("AZURE_OPENAI_ENDPOINT", None) - else: - os.environ["AZURE_OPENAI_ENDPOINT"] = original_endpoint + """Preserve the legacy call shape without mutating process-wide environment state.""" + yield # endregion @@ -399,6 +371,8 @@ def __init__( super().__init__( async_client=async_client, model=responses_deployment_name, + azure_endpoint=str(endpoint_value) if endpoint_value else None, + base_url=str(client_base_url) if client_base_url else None, api_version=azure_openai_settings.get("api_version"), instruction_role=instruction_role, default_headers=default_headers, @@ -613,6 +587,8 @@ def __init__( super().__init__( async_client=async_client, model=chat_deployment_name, + azure_endpoint=str(endpoint_value) if endpoint_value else None, + base_url=str(base_url_value) if base_url_value else None, api_version=azure_openai_settings.get("api_version"), instruction_role=instruction_role, default_headers=default_headers, @@ -926,6 +902,8 @@ def __init__( super().__init__( async_client=async_client, model=embedding_deployment_name, + azure_endpoint=str(endpoint_value) if endpoint_value else None, + base_url=str(base_url_value) if base_url_value else None, api_version=azure_openai_settings.get("api_version"), default_headers=default_headers, ) diff --git a/python/packages/openai/agent_framework_openai/_shared.py b/python/packages/openai/agent_framework_openai/_shared.py index a22dc9188c..f2a51c579d 100644 --- a/python/packages/openai/agent_framework_openai/_shared.py +++ b/python/packages/openai/agent_framework_openai/_shared.py @@ -221,6 +221,27 @@ def load_openai_service_settings( if base_url := openai_settings.get("base_url"): client_args["base_url"] = base_url return openai_settings, AsyncOpenAI(**client_args), False # type: ignore[return-value] + if ( + endpoint is None + and credential is None + and _get_env_setting( + "AZURE_OPENAI_ENDPOINT", + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + is None + and _get_env_setting( + "AZURE_OPENAI_BASE_URL", + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + is None + ): + raise SettingNotFoundError( + "OpenAI credentials are required. Provide the 'api_key' parameter or set 'OPENAI_API_KEY'. " + "To use Azure OpenAI instead, pass 'azure_endpoint' or set 'AZURE_OPENAI_ENDPOINT' or " + "'AZURE_OPENAI_BASE_URL'." + ) azure_settings = load_settings( AzureOpenAISettings, diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client.py b/python/packages/openai/tests/openai/test_openai_embedding_client.py index 2df4484847..5715c488d2 100644 --- a/python/packages/openai/tests/openai/test_openai_embedding_client.py +++ b/python/packages/openai/tests/openai/test_openai_embedding_client.py @@ -62,8 +62,10 @@ async def get_api_key() -> str: @pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) -def test_openai_construction_missing_api_key_raises(openai_unit_test_env: dict[str, str]) -> None: - with pytest.raises(SettingNotFoundError, match="Exactly one of 'base_url', 'endpoint'"): +def test_openai_construction_without_openai_or_azure_config_raises_clear_error( + openai_unit_test_env: dict[str, str], +) -> None: + with pytest.raises(SettingNotFoundError, match="OPENAI_API_KEY"): OpenAIEmbeddingClient(model="text-embedding-3-small") diff --git a/python/packages/openai/tests/openai/test_openai_shared.py b/python/packages/openai/tests/openai/test_openai_shared.py new file mode 100644 index 0000000000..b69feb7314 --- /dev/null +++ b/python/packages/openai/tests/openai/test_openai_shared.py @@ -0,0 +1,54 @@ +# Copyright (c) Microsoft. All rights reserved. + +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +import pytest +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential + +from agent_framework_openai._shared import AZURE_OPENAI_TOKEN_SCOPE, _resolve_azure_credential_to_token_provider + + +class _AsyncTokenCredentialStub(AsyncTokenCredential): + async def get_token(self, *scopes: str, **kwargs: object): + raise NotImplementedError + + +class _TokenCredentialStub(TokenCredential): + def get_token(self, *scopes: str, **kwargs: object): + raise NotImplementedError + + +def test_resolve_azure_async_credential_wraps_provider() -> None: + credential = _AsyncTokenCredentialStub() + token_provider = MagicMock() + + with patch("azure.identity.aio.get_bearer_token_provider", return_value=token_provider) as mock_provider: + resolved = _resolve_azure_credential_to_token_provider(credential) + + assert resolved is token_provider + mock_provider.assert_called_once_with(credential, AZURE_OPENAI_TOKEN_SCOPE) + + +def test_resolve_azure_sync_credential_wraps_provider() -> None: + credential = _TokenCredentialStub() + token_provider = MagicMock() + + with patch("azure.identity.get_bearer_token_provider", return_value=token_provider) as mock_provider: + resolved = _resolve_azure_credential_to_token_provider(credential) + + assert resolved is token_provider + mock_provider.assert_called_once_with(credential, AZURE_OPENAI_TOKEN_SCOPE) + + +def test_resolve_azure_callable_token_provider_passthrough() -> None: + token_provider = MagicMock() + + assert _resolve_azure_credential_to_token_provider(token_provider) is token_provider + + +def test_resolve_azure_invalid_credential_raises() -> None: + with pytest.raises(ValueError, match="credential"): + _resolve_azure_credential_to_token_provider(object()) # type: ignore[arg-type] From 79bad84375bffb8cef8f2119087fff27af11dbb6 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 15:47:25 +0100 Subject: [PATCH 10/30] Python: narrow Azure routing markers Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../agent_framework_openai/_chat_client.py | 26 +++++++------- .../_chat_completion_client.py | 20 ++++++----- .../_embedding_client.py | 26 ++++++++------ .../openai/agent_framework_openai/_shared.py | 35 ++++++------------- .../openai/test_openai_chat_client_azure.py | 13 +++++++ ...est_openai_chat_completion_client_azure.py | 13 +++++++ .../test_openai_embedding_client_azure.py | 13 +++++++ 7 files changed, 91 insertions(+), 55 deletions(-) diff --git a/python/packages/openai/agent_framework_openai/_chat_client.py b/python/packages/openai/agent_framework_openai/_chat_client.py index 6e4f6eb579..8d7171089c 100644 --- a/python/packages/openai/agent_framework_openai/_chat_client.py +++ b/python/packages/openai/agent_framework_openai/_chat_client.py @@ -306,9 +306,9 @@ def __init__( ) -> None: """Initialize a raw OpenAI Responses client with Azure routing. - This overload describes the Azure shape. Explicit Azure inputs force Azure routing, - and missing Azure values fall back to ``AZURE_OPENAI_*`` values from ``env_file_path`` - or the process environment. + This overload describes the Azure shape. Passing ``azure_endpoint`` or + ``credential`` forces Azure routing, and missing Azure values fall back to + ``AZURE_OPENAI_*`` values from ``env_file_path`` or the process environment. """ ... @@ -351,8 +351,9 @@ def __init__( to pass the full ``.../openai/v1`` base URL directly. azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing falls back to ``AZURE_OPENAI_ENDPOINT``. - api_version: Azure API version. When not provided explicitly, Azure routing falls - back to ``AZURE_OPENAI_API_VERSION`` and then the Responses default. + api_version: Azure API version to use once Azure routing is selected. When + not provided explicitly, Azure routing falls back to + ``AZURE_OPENAI_API_VERSION`` and then the Responses default. default_headers: Additional HTTP headers. async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI and bypasses env lookup. @@ -366,7 +367,7 @@ def __init__( Notes: Environment resolution and routing precedence are: - 1. Explicit Azure inputs (``azure_endpoint``, ``api_version``, or ``credential``) + 1. Explicit Azure inputs (``azure_endpoint`` or ``credential``) 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` 3. Azure environment fallback @@ -2463,9 +2464,9 @@ def __init__( ) -> None: """Initialize an OpenAI Responses client with Azure routing. - This overload describes the Azure shape. Explicit Azure inputs force Azure routing, - and missing Azure values fall back to ``AZURE_OPENAI_*`` values from ``env_file_path`` - or the process environment. + This overload describes the Azure shape. Passing ``azure_endpoint`` or + ``credential`` forces Azure routing, and missing Azure values fall back to + ``AZURE_OPENAI_*`` values from ``env_file_path`` or the process environment. """ ... @@ -2508,8 +2509,9 @@ def __init__( to pass the full ``.../openai/v1`` base URL directly. azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing falls back to ``AZURE_OPENAI_ENDPOINT``. - api_version: Azure API version. When not provided explicitly, Azure routing falls - back to ``AZURE_OPENAI_API_VERSION`` and then the Responses default. + api_version: Azure API version to use once Azure routing is selected. When + not provided explicitly, Azure routing falls back to + ``AZURE_OPENAI_API_VERSION`` and then the Responses default. default_headers: Default HTTP headers that are merged into each request. async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI and bypasses env lookup. @@ -2525,7 +2527,7 @@ def __init__( Notes: Environment resolution and routing precedence are: - 1. Explicit Azure inputs (``azure_endpoint``, ``api_version``, or ``credential``) + 1. Explicit Azure inputs (``azure_endpoint`` or ``credential``) 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` 3. Azure environment fallback diff --git a/python/packages/openai/agent_framework_openai/_chat_completion_client.py b/python/packages/openai/agent_framework_openai/_chat_completion_client.py index 85f0167c45..2f8738b5e2 100644 --- a/python/packages/openai/agent_framework_openai/_chat_completion_client.py +++ b/python/packages/openai/agent_framework_openai/_chat_completion_client.py @@ -221,9 +221,9 @@ def __init__( ) -> None: """Initialize a raw OpenAI Chat Completions client with Azure routing. - This overload describes the Azure shape. Explicit Azure inputs force Azure routing, - and missing Azure values fall back to ``AZURE_OPENAI_*`` values from ``env_file_path`` - or the process environment. + This overload describes the Azure shape. Passing ``azure_endpoint`` or + ``credential`` forces Azure routing, and missing Azure values fall back to + ``AZURE_OPENAI_*`` values from ``env_file_path`` or the process environment. """ ... @@ -266,8 +266,9 @@ def __init__( to pass the full ``.../openai/v1`` base URL directly. azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing falls back to ``AZURE_OPENAI_ENDPOINT``. - api_version: Azure API version. When not provided explicitly, Azure routing falls - back to ``AZURE_OPENAI_API_VERSION`` and then the Chat Completions default. + api_version: Azure API version to use once Azure routing is selected. When + not provided explicitly, Azure routing falls back to + ``AZURE_OPENAI_API_VERSION`` and then the Chat Completions default. default_headers: Additional HTTP headers. async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI and bypasses env lookup. @@ -281,7 +282,7 @@ def __init__( Notes: Environment resolution and routing precedence are: - 1. Explicit Azure inputs (``azure_endpoint``, ``api_version``, or ``credential``) + 1. Explicit Azure inputs (``azure_endpoint`` or ``credential``) 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` 3. Azure environment fallback @@ -1043,8 +1044,9 @@ def __init__( to pass the full ``.../openai/v1`` base URL directly. azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing falls back to ``AZURE_OPENAI_ENDPOINT``. - api_version: Azure API version. When not provided explicitly, Azure routing falls - back to ``AZURE_OPENAI_API_VERSION`` and then the Chat Completions default. + api_version: Azure API version to use once Azure routing is selected. When + not provided explicitly, Azure routing falls back to + ``AZURE_OPENAI_API_VERSION`` and then the Chat Completions default. middleware: Optional sequence of ChatAndFunctionMiddlewareTypes to apply to requests. function_invocation_configuration: Optional configuration for function invocation support. env_file_path: Optional ``.env`` file that is checked before process environment @@ -1055,7 +1057,7 @@ def __init__( Notes: Environment resolution and routing precedence are: - 1. Explicit Azure inputs (``azure_endpoint``, ``api_version``, or ``credential``) + 1. Explicit Azure inputs (``azure_endpoint`` or ``credential``) 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` 3. Azure environment fallback diff --git a/python/packages/openai/agent_framework_openai/_embedding_client.py b/python/packages/openai/agent_framework_openai/_embedding_client.py index 7611397e69..3f09dbcf74 100644 --- a/python/packages/openai/agent_framework_openai/_embedding_client.py +++ b/python/packages/openai/agent_framework_openai/_embedding_client.py @@ -114,8 +114,10 @@ def __init__( """Initialize a raw OpenAI embedding client with Azure routing. Use this overload when you want Azure OpenAI embeddings. Passing - ``azure_endpoint``, ``api_version``, or ``credential`` is an explicit Azure - signal and forces Azure routing even when ``OPENAI_API_KEY`` is also present. + ``azure_endpoint`` or ``credential`` is an explicit Azure signal and forces + Azure routing even when ``OPENAI_API_KEY`` is also present. ``api_version`` + configures Azure requests after Azure routing is selected, but it does not + select Azure on its own. The constructor reads the deployment name from the explicit ``model`` argument first and then from ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, falling back to ``AZURE_OPENAI_DEPLOYMENT_NAME``. @@ -168,8 +170,9 @@ def __init__( to pass the full ``.../openai/v1`` base URL directly. azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing falls back to ``AZURE_OPENAI_ENDPOINT``. - api_version: Azure API version. When not provided explicitly, Azure routing falls - back to ``AZURE_OPENAI_API_VERSION`` and then the embedding default. + api_version: Azure API version to use once Azure routing is selected. When + not provided explicitly, Azure routing falls back to + ``AZURE_OPENAI_API_VERSION`` and then the embedding default. default_headers: Additional HTTP headers. async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI. @@ -182,7 +185,7 @@ def __init__( Notes: Environment resolution and routing precedence are: - 1. Explicit Azure inputs (``azure_endpoint``, ``api_version``, or ``credential``) + 1. Explicit Azure inputs (``azure_endpoint`` or ``credential``) 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` 3. Azure environment fallback @@ -364,8 +367,10 @@ def __init__( """Initialize an OpenAI embedding client with Azure routing. Use this overload when you want Azure OpenAI embeddings. Passing - ``azure_endpoint``, ``api_version``, or ``credential`` is an explicit Azure - signal and forces Azure routing even when ``OPENAI_API_KEY`` is also present. + ``azure_endpoint`` or ``credential`` is an explicit Azure signal and forces + Azure routing even when ``OPENAI_API_KEY`` is also present. ``api_version`` + configures Azure requests after Azure routing is selected, but it does not + select Azure on its own. The constructor reads the deployment name from the explicit ``model`` argument first and then from ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, falling back to ``AZURE_OPENAI_DEPLOYMENT_NAME``. @@ -419,8 +424,9 @@ def __init__( to pass the full ``.../openai/v1`` base URL directly. azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing falls back to ``AZURE_OPENAI_ENDPOINT``. - api_version: Azure API version. When not provided explicitly, Azure routing falls - back to ``AZURE_OPENAI_API_VERSION`` and then the embedding default. + api_version: Azure API version to use once Azure routing is selected. When + not provided explicitly, Azure routing falls back to + ``AZURE_OPENAI_API_VERSION`` and then the embedding default. otel_provider_name: Override the OpenTelemetry provider name. env_file_path: Optional ``.env`` file that is checked before process environment variables. The same file is used for both ``OPENAI_*`` and ``AZURE_OPENAI_*`` @@ -430,7 +436,7 @@ def __init__( Notes: Environment resolution and routing precedence are: - 1. Explicit Azure inputs (``azure_endpoint``, ``api_version``, or ``credential``) + 1. Explicit Azure inputs (``azure_endpoint`` or ``credential``) 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` 3. Azure environment fallback diff --git a/python/packages/openai/agent_framework_openai/_shared.py b/python/packages/openai/agent_framework_openai/_shared.py index f2a51c579d..30430d496d 100644 --- a/python/packages/openai/agent_framework_openai/_shared.py +++ b/python/packages/openai/agent_framework_openai/_shared.py @@ -173,7 +173,7 @@ def load_openai_service_settings( The generic OpenAI clients primarily read from ``OPENAI_*`` variables. Azure-specific environment variables are used only when an explicit Azure signal is present - (``azure_endpoint``, ``api_version``, or ``credential``) or when no explicit + (``azure_endpoint`` or ``credential``) or when no explicit OpenAI API key is available. """ # Merge APP_INFO into the headers @@ -186,7 +186,8 @@ def load_openai_service_settings( api_key_str = api_key if not callable(api_key) else None azure_client = isinstance(client, AsyncAzureOpenAI) - use_azure = azure_client or endpoint is not None or api_version is not None or credential is not None + use_azure = azure_client or endpoint is not None or credential is not None + checked_openai = False if not use_azure: openai_settings_kwargs: dict[str, Any] = { "api_key": api_key_str, @@ -221,27 +222,7 @@ def load_openai_service_settings( if base_url := openai_settings.get("base_url"): client_args["base_url"] = base_url return openai_settings, AsyncOpenAI(**client_args), False # type: ignore[return-value] - if ( - endpoint is None - and credential is None - and _get_env_setting( - "AZURE_OPENAI_ENDPOINT", - env_file_path=env_file_path, - env_file_encoding=env_file_encoding, - ) - is None - and _get_env_setting( - "AZURE_OPENAI_BASE_URL", - env_file_path=env_file_path, - env_file_encoding=env_file_encoding, - ) - is None - ): - raise SettingNotFoundError( - "OpenAI credentials are required. Provide the 'api_key' parameter or set 'OPENAI_API_KEY'. " - "To use Azure OpenAI instead, pass 'azure_endpoint' or set 'AZURE_OPENAI_ENDPOINT' or " - "'AZURE_OPENAI_BASE_URL'." - ) + checked_openai = True azure_settings = load_settings( AzureOpenAISettings, @@ -278,7 +259,13 @@ def load_openai_service_settings( client_args["azure_deployment"] = model else: deployment_env_guidance = ", ".join(f"'{env_var}'" for env_var in azure_deployment_env_vars) - raise ValueError( + if checked_openai: + raise SettingNotFoundError( + "OpenAI credentials are required. Provide the 'api_key' parameter or set 'OPENAI_API_KEY'. " + "To use Azure OpenAI instead, pass 'azure_endpoint' or set 'AZURE_OPENAI_ENDPOINT' or " + "'AZURE_OPENAI_BASE_URL'." + ) + raise SettingNotFoundError( "Azure OpenAI client requires a deployment name, which can be provided via the 'model' parameter, " f"the {deployment_env_guidance} environment variable, or the '{openai_model_env_var}' " "environment variable." diff --git a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py index e134b2c72d..39276e4839 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py @@ -122,6 +122,19 @@ def test_openai_api_key_wins_over_azure_env(monkeypatch, azure_openai_unit_test_ assert client.azure_endpoint is None +def test_api_version_alone_does_not_override_openai_api_key( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_MODEL", "gpt-5") + + client = OpenAIChatClient(api_version="2024-10-21") + + assert client.model == "gpt-5" + assert not isinstance(client.client, AsyncAzureOpenAI) + assert client.azure_endpoint is None + + def test_explicit_credential_wins_over_openai_api_key(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") monkeypatch.setenv("OPENAI_MODEL", "gpt-5") diff --git a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py index 9f73343a42..16fab5141a 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py @@ -102,6 +102,19 @@ def test_openai_api_key_wins_over_azure_env(monkeypatch, azure_openai_unit_test_ assert client.azure_endpoint is None +def test_api_version_alone_does_not_override_openai_api_key( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_MODEL", "gpt-5") + + client = OpenAIChatCompletionClient(api_version="2024-10-21") + + assert client.model == "gpt-5" + assert not isinstance(client.client, AsyncAzureOpenAI) + assert client.azure_endpoint is None + + def test_explicit_credential_wins_over_openai_api_key(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") monkeypatch.setenv("OPENAI_MODEL", "gpt-5") diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py index 45c7771149..0a2e9082d7 100644 --- a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py @@ -85,6 +85,19 @@ def test_openai_api_key_wins_over_azure_env(monkeypatch, azure_openai_unit_test_ assert client.azure_endpoint is None +def test_api_version_alone_does_not_override_openai_api_key( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") + monkeypatch.setenv("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small") + + client = OpenAIEmbeddingClient(api_version="2024-10-21") + + assert client.model == "text-embedding-3-small" + assert not isinstance(client.client, AsyncAzureOpenAI) + assert client.azure_endpoint is None + + def test_explicit_credential_wins_over_openai_api_key(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") monkeypatch.setenv("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small") From d82edc7152e0c77d7ef5103b861cc4d5e285c733 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 16:21:09 +0100 Subject: [PATCH 11/30] Python: refine OpenAI model fallback order Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../agent_framework_openai/_chat_client.py | 26 ++-- .../_chat_completion_client.py | 26 ++-- .../_embedding_client.py | 54 +++---- .../openai/agent_framework_openai/_shared.py | 135 +++++++++++------- .../packages/openai/tests/openai/conftest.py | 4 + .../tests/openai/test_openai_chat_client.py | 8 ++ .../openai/test_openai_chat_client_azure.py | 40 +++++- .../test_openai_chat_completion_client.py | 8 ++ ...est_openai_chat_completion_client_azure.py | 40 +++++- .../openai/test_openai_embedding_client.py | 9 +- .../test_openai_embedding_client_azure.py | 25 ++++ 11 files changed, 269 insertions(+), 106 deletions(-) diff --git a/python/packages/openai/agent_framework_openai/_chat_client.py b/python/packages/openai/agent_framework_openai/_chat_client.py index 8d7171089c..04a6a7edc9 100644 --- a/python/packages/openai/agent_framework_openai/_chat_client.py +++ b/python/packages/openai/agent_framework_openai/_chat_client.py @@ -334,8 +334,9 @@ def __init__( Keyword Args: model: Model identifier to use for the request. When not provided, the constructor - reads ``OPENAI_MODEL`` for OpenAI routing or ``AZURE_OPENAI_DEPLOYMENT_NAME`` - for Azure routing. + reads ``OPENAI_RESPONSES_MODEL`` and then ``OPENAI_MODEL`` for OpenAI + routing, or ``AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME`` for Azure routing. model_id: Deprecated alias for ``model``. api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key @@ -371,9 +372,10 @@ def __init__( 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` 3. Azure environment fallback - OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and - ``OPENAI_BASE_URL``. Azure routing reads ``AZURE_OPENAI_ENDPOINT``, - ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, + OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_RESPONSES_MODEL``, + ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing + reads ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, + ``AZURE_OPENAI_API_KEY``, ``AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME``, ``AZURE_OPENAI_DEPLOYMENT_NAME``, and ``AZURE_OPENAI_API_VERSION``. """ if model_id is not None and model is None: @@ -395,6 +397,8 @@ def __init__( client=async_client, env_file_path=env_file_path, env_file_encoding=env_file_encoding, + openai_model_fields=("responses_model", "model"), + azure_deployment_fields=("responses_deployment_name", "deployment_name"), ) self.client = client @@ -2493,8 +2497,9 @@ def __init__( Keyword Args: model: Model identifier to use for the request. When not provided, the constructor - reads ``OPENAI_MODEL`` for OpenAI routing or ``AZURE_OPENAI_DEPLOYMENT_NAME`` - for Azure routing. + reads ``OPENAI_RESPONSES_MODEL`` and then ``OPENAI_MODEL`` for OpenAI + routing, or ``AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME`` for Azure routing. api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key auth. A callable token provider is also accepted for backwards compatibility, @@ -2531,9 +2536,10 @@ def __init__( 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` 3. Azure environment fallback - OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and - ``OPENAI_BASE_URL``. Azure routing reads ``AZURE_OPENAI_ENDPOINT``, - ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, + OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_RESPONSES_MODEL``, + ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing + reads ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, + ``AZURE_OPENAI_API_KEY``, ``AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME``, ``AZURE_OPENAI_DEPLOYMENT_NAME``, and ``AZURE_OPENAI_API_VERSION``. Examples: diff --git a/python/packages/openai/agent_framework_openai/_chat_completion_client.py b/python/packages/openai/agent_framework_openai/_chat_completion_client.py index 2f8738b5e2..ee78892e54 100644 --- a/python/packages/openai/agent_framework_openai/_chat_completion_client.py +++ b/python/packages/openai/agent_framework_openai/_chat_completion_client.py @@ -249,8 +249,9 @@ def __init__( Keyword Args: model: Model identifier to use for the request. When not provided, the constructor - reads ``OPENAI_MODEL`` for OpenAI routing or ``AZURE_OPENAI_DEPLOYMENT_NAME`` - for Azure routing. + reads ``OPENAI_CHAT_MODEL`` and then ``OPENAI_MODEL`` for OpenAI routing, + or ``AZURE_OPENAI_CHAT_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME`` for Azure routing. model_id: Deprecated alias for ``model``. api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key @@ -286,9 +287,10 @@ def __init__( 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` 3. Azure environment fallback - OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and - ``OPENAI_BASE_URL``. Azure routing reads ``AZURE_OPENAI_ENDPOINT``, - ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, + OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_CHAT_MODEL``, + ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing + reads ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, + ``AZURE_OPENAI_API_KEY``, ``AZURE_OPENAI_CHAT_DEPLOYMENT_NAME``, ``AZURE_OPENAI_DEPLOYMENT_NAME``, and ``AZURE_OPENAI_API_VERSION``. """ if model_id is not None and model is None: @@ -310,6 +312,8 @@ def __init__( client=async_client, env_file_path=env_file_path, env_file_encoding=env_file_encoding, + openai_model_fields=("chat_model", "model"), + azure_deployment_fields=("chat_deployment_name", "deployment_name"), ) self.client = client @@ -1024,8 +1028,9 @@ def __init__( Keyword Args: model: Model identifier to use for the request. When not provided, the constructor - reads ``OPENAI_MODEL`` for OpenAI routing or ``AZURE_OPENAI_DEPLOYMENT_NAME`` - for Azure routing. + reads ``OPENAI_CHAT_MODEL`` and then ``OPENAI_MODEL`` for OpenAI routing, + or ``AZURE_OPENAI_CHAT_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME`` for Azure routing. api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key auth. A callable token provider is also accepted for backwards compatibility, @@ -1061,9 +1066,10 @@ def __init__( 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` 3. Azure environment fallback - OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and - ``OPENAI_BASE_URL``. Azure routing reads ``AZURE_OPENAI_ENDPOINT``, - ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, + OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_CHAT_MODEL``, + ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing + reads ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, + ``AZURE_OPENAI_API_KEY``, ``AZURE_OPENAI_CHAT_DEPLOYMENT_NAME``, ``AZURE_OPENAI_DEPLOYMENT_NAME``, and ``AZURE_OPENAI_API_VERSION``. Examples: diff --git a/python/packages/openai/agent_framework_openai/_embedding_client.py b/python/packages/openai/agent_framework_openai/_embedding_client.py index 3f09dbcf74..63968877c3 100644 --- a/python/packages/openai/agent_framework_openai/_embedding_client.py +++ b/python/packages/openai/agent_framework_openai/_embedding_client.py @@ -86,7 +86,7 @@ def __init__( Use this overload when you want the generic OpenAI embeddings endpoint. The constructor reads ``model`` from the explicit argument first and then from - ``OPENAI_EMBEDDING_MODEL``. Authentication and endpoint settings come from + ``OPENAI_EMBEDDING_MODEL``, falling back to ``OPENAI_MODEL``. Authentication and endpoint settings come from the explicit ``api_key``, ``org_id``, and ``base_url`` arguments first and then from ``OPENAI_API_KEY``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL`` in ``env_file_path`` or the process environment. @@ -120,7 +120,8 @@ def __init__( select Azure on its own. The constructor reads the deployment name from the explicit ``model`` argument first and then from ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, - falling back to ``AZURE_OPENAI_DEPLOYMENT_NAME``. + falling back to ``AZURE_OPENAI_DEPLOYMENT_NAME``, ``OPENAI_EMBEDDING_MODEL``, + and then ``OPENAI_MODEL``. Authentication and endpoint settings come from the explicit Azure arguments first and then from ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, @@ -152,9 +153,11 @@ def __init__( Keyword Args: model: Embedding model or Azure OpenAI deployment name. When not provided, the - constructor reads ``OPENAI_EMBEDDING_MODEL`` for OpenAI routing. For Azure - routing it first checks ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`` and then - falls back to ``AZURE_OPENAI_DEPLOYMENT_NAME``. + constructor reads ``OPENAI_EMBEDDING_MODEL`` and then ``OPENAI_MODEL`` + for OpenAI routing. For Azure routing it first checks + ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, then + ``AZURE_OPENAI_DEPLOYMENT_NAME``, then ``OPENAI_EMBEDDING_MODEL``, and + finally ``OPENAI_MODEL``. model_id: Deprecated alias for ``model``. api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key @@ -190,10 +193,11 @@ def __init__( 3. Azure environment fallback OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_EMBEDDING_MODEL``, - ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing reads - ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, - ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, ``AZURE_OPENAI_DEPLOYMENT_NAME``, - and ``AZURE_OPENAI_API_VERSION``. + ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing + reads ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, + ``AZURE_OPENAI_API_KEY``, ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, + ``AZURE_OPENAI_DEPLOYMENT_NAME``, ``OPENAI_EMBEDDING_MODEL``, + ``OPENAI_MODEL``, and ``AZURE_OPENAI_API_VERSION``. """ if model_id is not None and model is None: import warnings @@ -214,16 +218,12 @@ def __init__( client=async_client, env_file_path=env_file_path, env_file_encoding=env_file_encoding, - openai_model_field="embedding_model", - openai_model_env_var="OPENAI_EMBEDDING_MODEL", - azure_deployment_env_vars=( - "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", - "AZURE_OPENAI_DEPLOYMENT_NAME", - ), + openai_model_fields=("embedding_model", "model"), + azure_deployment_fields=("embedding_deployment_name", "deployment_name"), ) self.client = client - resolved_model = settings.get("embedding_model") or settings.get("deployment_name") + resolved_model = settings.get("model") or settings.get("deployment_name") self.model: str | None = resolved_model.strip() if isinstance(resolved_model, str) and resolved_model else None # Store configuration for serialization @@ -338,7 +338,7 @@ def __init__( Use this overload when you want the generic OpenAI embeddings endpoint. The constructor reads ``model`` from the explicit argument first and then from - ``OPENAI_EMBEDDING_MODEL``. Authentication and endpoint settings come from + ``OPENAI_EMBEDDING_MODEL``, falling back to ``OPENAI_MODEL``. Authentication and endpoint settings come from the explicit ``api_key``, ``org_id``, and ``base_url`` arguments first and then from ``OPENAI_API_KEY``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL`` in ``env_file_path`` or the process environment. @@ -373,7 +373,8 @@ def __init__( select Azure on its own. The constructor reads the deployment name from the explicit ``model`` argument first and then from ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, - falling back to ``AZURE_OPENAI_DEPLOYMENT_NAME``. + falling back to ``AZURE_OPENAI_DEPLOYMENT_NAME``, ``OPENAI_EMBEDDING_MODEL``, + and then ``OPENAI_MODEL``. Authentication and endpoint settings come from the explicit Azure arguments first and then from ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, @@ -404,9 +405,11 @@ def __init__( Keyword Args: model: Embedding model or Azure OpenAI deployment name. When not provided, the - constructor reads ``OPENAI_EMBEDDING_MODEL`` for OpenAI routing. For Azure - routing it first checks ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`` and then - falls back to ``AZURE_OPENAI_DEPLOYMENT_NAME``. + constructor reads ``OPENAI_EMBEDDING_MODEL`` and then ``OPENAI_MODEL`` + for OpenAI routing. For Azure routing it first checks + ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, then + ``AZURE_OPENAI_DEPLOYMENT_NAME``, then ``OPENAI_EMBEDDING_MODEL``, and + finally ``OPENAI_MODEL``. api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key auth. A callable token provider is also accepted for backwards compatibility, @@ -441,10 +444,11 @@ def __init__( 3. Azure environment fallback OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_EMBEDDING_MODEL``, - ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing reads - ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, - ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, ``AZURE_OPENAI_DEPLOYMENT_NAME``, - and ``AZURE_OPENAI_API_VERSION``. + ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing + reads ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, + ``AZURE_OPENAI_API_KEY``, ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, + ``AZURE_OPENAI_DEPLOYMENT_NAME``, ``OPENAI_EMBEDDING_MODEL``, + ``OPENAI_MODEL``, and ``AZURE_OPENAI_API_VERSION``. Examples: .. code-block:: python diff --git a/python/packages/openai/agent_framework_openai/_shared.py b/python/packages/openai/agent_framework_openai/_shared.py index 30430d496d..d92a5afda9 100644 --- a/python/packages/openai/agent_framework_openai/_shared.py +++ b/python/packages/openai/agent_framework_openai/_shared.py @@ -3,7 +3,6 @@ from __future__ import annotations import logging -import os import sys from collections.abc import Awaitable, Callable, Mapping, MutableMapping, Sequence from copy import copy @@ -15,7 +14,6 @@ from agent_framework._telemetry import APP_INFO, USER_AGENT_KEY, prepend_agent_framework_to_user_agent from agent_framework._tools import FunctionTool from agent_framework.exceptions import SettingNotFoundError -from dotenv import get_key from openai import AsyncAzureOpenAI, AsyncOpenAI, AsyncStream, _legacy_response # type: ignore from openai.types import Completion from openai.types.audio import Transcription @@ -99,6 +97,10 @@ class OpenAISettings(TypedDict, total=False): Can be set via environment variable OPENAI_MODEL. embedding_model: The OpenAI embedding model to use, for example, text-embedding-3-small. Can be set via environment variable OPENAI_EMBEDDING_MODEL. + chat_model: The OpenAI chat-completions model to prefer before OPENAI_MODEL. + Can be set via environment variable OPENAI_CHAT_MODEL. + responses_model: The OpenAI responses model to prefer before OPENAI_MODEL. + Can be set via environment variable OPENAI_RESPONSES_MODEL. Examples: .. code-block:: python @@ -122,6 +124,8 @@ class OpenAISettings(TypedDict, total=False): org_id: str | None model: str | None embedding_model: str | None + chat_model: str | None + responses_model: str | None class AzureOpenAISettings(TypedDict, total=False): @@ -131,24 +135,47 @@ class AzureOpenAISettings(TypedDict, total=False): base_url: str | None api_key: SecretString | None deployment_name: str | None + embedding_deployment_name: str | None + chat_deployment_name: str | None + responses_deployment_name: str | None api_version: str | None -OpenAIModelSettingName = Literal["model", "embedding_model"] +OpenAIModelSettingName = Literal["model", "embedding_model", "chat_model", "responses_model"] +AzureDeploymentSettingName = Literal[ + "deployment_name", "embedding_deployment_name", "chat_deployment_name", "responses_deployment_name" +] +OPENAI_MODEL_ENV_VARS: dict[OpenAIModelSettingName, str] = { + "model": "OPENAI_MODEL", + "embedding_model": "OPENAI_EMBEDDING_MODEL", + "chat_model": "OPENAI_CHAT_MODEL", + "responses_model": "OPENAI_RESPONSES_MODEL", +} -def _get_env_setting( - env_var_name: str, - *, - env_file_path: str | None, - env_file_encoding: str | None, +AZURE_DEPLOYMENT_ENV_VARS: dict[AzureDeploymentSettingName, str] = { + "deployment_name": "AZURE_OPENAI_DEPLOYMENT_NAME", + "embedding_deployment_name": "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", + "chat_deployment_name": "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", + "responses_deployment_name": "AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME", +} + + +def _resolve_named_setting( + settings: Mapping[str, Any], + fields: Sequence[OpenAIModelSettingName | AzureDeploymentSettingName], ) -> str | None: - """Read a setting from an optional ``.env`` file first, then the process environment.""" - if env_file_path: - dotenv_value = get_key(env_file_path, env_var_name, encoding=env_file_encoding) # type: ignore[reportArgumentType, arg-type] - if dotenv_value: - return dotenv_value - return os.getenv(env_var_name) + """Return the first populated value from ``fields``.""" + for field in fields: + value = settings.get(field) + if isinstance(value, str) and value: + return value + return None + + +def _join_env_names(env_names: Sequence[str]) -> str: + """Format env var names for user-facing error messages.""" + return ", ".join(f"'{env_name}'" for env_name in env_names) def load_openai_service_settings( @@ -165,9 +192,8 @@ def load_openai_service_settings( client: AsyncOpenAI | None = None, env_file_path: str | None, env_file_encoding: str | None, - openai_model_field: OpenAIModelSettingName = "model", - openai_model_env_var: str = "OPENAI_MODEL", - azure_deployment_env_vars: Sequence[str] = ("AZURE_OPENAI_DEPLOYMENT_NAME",), + openai_model_fields: Sequence[OpenAIModelSettingName] = ("model",), + azure_deployment_fields: Sequence[AzureDeploymentSettingName] = ("deployment_name",), ) -> tuple[dict[str, Any], AsyncOpenAI, bool]: """Load OpenAI settings, including Azure OpenAI aliases. @@ -184,32 +210,36 @@ def load_openai_service_settings( api_key_callable = api_key if callable(api_key) else None api_key_str = api_key if not callable(api_key) else None - azure_client = isinstance(client, AsyncAzureOpenAI) use_azure = azure_client or endpoint is not None or credential is not None checked_openai = False + openai_settings_kwargs: dict[str, Any] = { + "api_key": api_key_str, + "org_id": org_id, + "base_url": base_url, + "env_file_path": env_file_path, + "env_file_encoding": env_file_encoding, + } + if model is not None: + openai_settings_kwargs[openai_model_fields[0]] = model + openai_settings: OpenAISettings | None = None if not use_azure: - openai_settings_kwargs: dict[str, Any] = { - "api_key": api_key_str, - "org_id": org_id, - "base_url": base_url, - "env_file_path": env_file_path, - "env_file_encoding": env_file_encoding, - } - openai_settings_kwargs[openai_model_field] = model openai_settings = load_settings( OpenAISettings, env_prefix="OPENAI_", **openai_settings_kwargs, ) + if resolved_model := _resolve_named_setting(openai_settings, openai_model_fields): + openai_settings["model"] = resolved_model if client: return openai_settings, client, False # type: ignore[return-value] if openai_settings.get("api_key") is not None or api_key_callable is not None: - resolved_model = openai_settings.get(openai_model_field) + resolved_model = _resolve_named_setting(openai_settings, openai_model_fields) if not resolved_model: raise SettingNotFoundError( "Model must be specified via the 'model' parameter or the " - f"'{openai_model_env_var}' environment variable." + f"{_join_env_names([OPENAI_MODEL_ENV_VARS[field] for field in openai_model_fields])} " + "environment variable." ) client_args: dict[str, Any] = { @@ -223,7 +253,6 @@ def load_openai_service_settings( client_args["base_url"] = base_url return openai_settings, AsyncOpenAI(**client_args), False # type: ignore[return-value] checked_openai = True - azure_settings = load_settings( AzureOpenAISettings, env_prefix="AZURE_OPENAI_", @@ -231,34 +260,34 @@ def load_openai_service_settings( api_key=api_key_str, endpoint=endpoint, base_url=base_url, - deployment_name=model, api_version=api_version or default_azure_api_version, env_file_path=env_file_path, env_file_encoding=env_file_encoding, ) + if model is not None: + azure_settings[azure_deployment_fields[0]] = model client_args = {} - if model is None: - for azure_deployment_env_var in azure_deployment_env_vars: - if deployment_name := _get_env_setting( - azure_deployment_env_var, - env_file_path=env_file_path, - env_file_encoding=env_file_encoding, - ): - azure_settings["deployment_name"] = deployment_name - break - if ("deployment_name" not in azure_settings or not azure_settings["deployment_name"]) and ( - openai_model := _get_env_setting( - openai_model_env_var, - env_file_path=env_file_path, - env_file_encoding=env_file_encoding, - ) - ): - # load the OpenAI model env var as a fallback for Azure routing - azure_settings["deployment_name"] = openai_model - if model := azure_settings.get("deployment_name"): - client_args["azure_deployment"] = model + resolved_azure_deployment = _resolve_named_setting(azure_settings, azure_deployment_fields) + if resolved_azure_deployment is None and client: + azure_deployment = getattr(client, "_azure_deployment", None) + if isinstance(azure_deployment, str) and azure_deployment: + resolved_azure_deployment = azure_deployment + if resolved_azure_deployment is None: + if openai_settings is None: + openai_settings = load_settings( + OpenAISettings, + env_prefix="OPENAI_", + **openai_settings_kwargs, + ) + resolved_azure_deployment = _resolve_named_setting(openai_settings, openai_model_fields) + if resolved_azure_deployment: + azure_settings["deployment_name"] = resolved_azure_deployment + client_args["azure_deployment"] = resolved_azure_deployment else: - deployment_env_guidance = ", ".join(f"'{env_var}'" for env_var in azure_deployment_env_vars) + deployment_env_guidance = _join_env_names([ + AZURE_DEPLOYMENT_ENV_VARS[field] for field in azure_deployment_fields + ]) + openai_model_guidance = _join_env_names([OPENAI_MODEL_ENV_VARS[field] for field in openai_model_fields]) if checked_openai: raise SettingNotFoundError( "OpenAI credentials are required. Provide the 'api_key' parameter or set 'OPENAI_API_KEY'. " @@ -267,7 +296,7 @@ def load_openai_service_settings( ) raise SettingNotFoundError( "Azure OpenAI client requires a deployment name, which can be provided via the 'model' parameter, " - f"the {deployment_env_guidance} environment variable, or the '{openai_model_env_var}' " + f"the {deployment_env_guidance} environment variable, or the {openai_model_guidance} " "environment variable." ) if client: @@ -286,7 +315,7 @@ def load_openai_service_settings( if credential: client_args["azure_ad_token_provider"] = _resolve_azure_credential_to_token_provider(credential) if "api_key" not in client_args and "azure_ad_token_provider" not in client_args: - raise ValueError( + raise SettingNotFoundError( "Azure OpenAI client requires either an API key or an Azure AD token provider." " This can be provided either as a callable api_key or via the credential parameter." ) diff --git a/python/packages/openai/tests/openai/conftest.py b/python/packages/openai/tests/openai/conftest.py index 0a578fac8e..34ea878a19 100644 --- a/python/packages/openai/tests/openai/conftest.py +++ b/python/packages/openai/tests/openai/conftest.py @@ -43,6 +43,8 @@ def openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): # "OPENAI_ORG_ID", "OPENAI_MODEL", "OPENAI_EMBEDDING_MODEL", + "OPENAI_CHAT_MODEL", + "OPENAI_RESPONSES_MODEL", "OPENAI_TEXT_MODEL_ID", "OPENAI_TEXT_TO_IMAGE_MODEL_ID", "OPENAI_AUDIO_TO_TEXT_MODEL_ID", @@ -100,6 +102,8 @@ def azure_openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dic "OPENAI_ORG_ID", "OPENAI_MODEL", "OPENAI_EMBEDDING_MODEL", + "OPENAI_CHAT_MODEL", + "OPENAI_RESPONSES_MODEL", "OPENAI_TEXT_MODEL_ID", "OPENAI_TEXT_TO_IMAGE_MODEL_ID", "OPENAI_AUDIO_TO_TEXT_MODEL_ID", diff --git a/python/packages/openai/tests/openai/test_openai_chat_client.py b/python/packages/openai/tests/openai/test_openai_chat_client.py index 15c64d7e89..cfba5b57a4 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client.py @@ -110,6 +110,14 @@ def test_init(openai_unit_test_env: dict[str, str]) -> None: assert isinstance(openai_responses_client, SupportsChatGetResponse) +def test_init_prefers_openai_responses_model(monkeypatch, openai_unit_test_env: dict[str, str]) -> None: + monkeypatch.setenv("OPENAI_RESPONSES_MODEL", "test_responses_model_id") + + openai_responses_client = OpenAIChatClient() + + assert openai_responses_client.model == "test_responses_model_id" + + def test_init_validation_fail() -> None: # Test successful initialization with pytest.raises(ValueError): diff --git a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py index 39276e4839..e1c34d41c5 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py @@ -106,7 +106,7 @@ def test_init_with_azure_endpoint(azure_openai_unit_test_env: dict[str, str]) -> def test_init_auto_detects_azure_env(azure_openai_unit_test_env: dict[str, str]) -> None: client = OpenAIChatClient() - assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] assert isinstance(client.client, AsyncAzureOpenAI) assert client.azure_endpoint == azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"] @@ -141,11 +141,47 @@ def test_explicit_credential_wins_over_openai_api_key(monkeypatch, azure_openai_ client = OpenAIChatClient(credential=lambda: "token") - assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] assert isinstance(client.client, AsyncAzureOpenAI) assert client.azure_endpoint == azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"] +def test_init_falls_back_to_generic_azure_deployment_env( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.delenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME", raising=False) + + client = OpenAIChatClient() + + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert isinstance(client.client, AsyncAzureOpenAI) + + +def test_init_falls_back_to_openai_responses_model_for_azure_env( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.delenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME", raising=False) + monkeypatch.delenv("AZURE_OPENAI_DEPLOYMENT_NAME", raising=False) + monkeypatch.setenv("OPENAI_RESPONSES_MODEL", "test_responses_model") + + client = OpenAIChatClient() + + assert client.model == "test_responses_model" + assert isinstance(client.client, AsyncAzureOpenAI) + + +def test_init_falls_back_to_openai_model_for_azure_env(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: + monkeypatch.delenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME", raising=False) + monkeypatch.delenv("AZURE_OPENAI_DEPLOYMENT_NAME", raising=False) + monkeypatch.delenv("OPENAI_RESPONSES_MODEL", raising=False) + monkeypatch.setenv("OPENAI_MODEL", "gpt-5") + + client = OpenAIChatClient() + + assert client.model == "gpt-5" + assert isinstance(client.client, AsyncAzureOpenAI) + + def test_init_with_credential_wraps_async_token_credential( monkeypatch, azure_openai_unit_test_env: dict[str, str] ) -> None: diff --git a/python/packages/openai/tests/openai/test_openai_chat_completion_client.py b/python/packages/openai/tests/openai/test_openai_chat_completion_client.py index d48ab7d476..1a2d333c47 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_completion_client.py +++ b/python/packages/openai/tests/openai/test_openai_chat_completion_client.py @@ -37,6 +37,14 @@ def test_init(openai_unit_test_env: dict[str, str]) -> None: assert isinstance(open_ai_chat_completion, SupportsChatGetResponse) +def test_init_prefers_openai_chat_model(monkeypatch, openai_unit_test_env: dict[str, str]) -> None: + monkeypatch.setenv("OPENAI_CHAT_MODEL", "test_chat_model_id") + + open_ai_chat_completion = OpenAIChatCompletionClient() + + assert open_ai_chat_completion.model == "test_chat_model_id" + + def test_init_validation_fail() -> None: # Test successful initialization with pytest.raises(ValueError): diff --git a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py index 16fab5141a..36e0c52cd8 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py @@ -86,7 +86,7 @@ def test_init_with_azure_endpoint(azure_openai_unit_test_env: dict[str, str]) -> def test_init_auto_detects_azure_env(azure_openai_unit_test_env: dict[str, str]) -> None: client = OpenAIChatCompletionClient() - assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] assert isinstance(client.client, AsyncAzureOpenAI) assert client.azure_endpoint == azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"] @@ -121,11 +121,47 @@ def test_explicit_credential_wins_over_openai_api_key(monkeypatch, azure_openai_ client = OpenAIChatCompletionClient(credential=lambda: "token") - assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] assert isinstance(client.client, AsyncAzureOpenAI) assert client.azure_endpoint == azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"] +def test_init_falls_back_to_generic_azure_deployment_env( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.delenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", raising=False) + + client = OpenAIChatCompletionClient() + + assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_DEPLOYMENT_NAME"] + assert isinstance(client.client, AsyncAzureOpenAI) + + +def test_init_falls_back_to_openai_chat_model_for_azure_env( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.delenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", raising=False) + monkeypatch.delenv("AZURE_OPENAI_DEPLOYMENT_NAME", raising=False) + monkeypatch.setenv("OPENAI_CHAT_MODEL", "test_chat_model") + + client = OpenAIChatCompletionClient() + + assert client.model == "test_chat_model" + assert isinstance(client.client, AsyncAzureOpenAI) + + +def test_init_falls_back_to_openai_model_for_azure_env(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: + monkeypatch.delenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", raising=False) + monkeypatch.delenv("AZURE_OPENAI_DEPLOYMENT_NAME", raising=False) + monkeypatch.delenv("OPENAI_CHAT_MODEL", raising=False) + monkeypatch.setenv("OPENAI_MODEL", "gpt-5") + + client = OpenAIChatCompletionClient() + + assert client.model == "gpt-5" + assert isinstance(client.client, AsyncAzureOpenAI) + + def test_init_with_credential_wraps_async_token_credential( monkeypatch, azure_openai_unit_test_env: dict[str, str] ) -> None: diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client.py b/python/packages/openai/tests/openai/test_openai_embedding_client.py index 5715c488d2..4ef39697d6 100644 --- a/python/packages/openai/tests/openai/test_openai_embedding_client.py +++ b/python/packages/openai/tests/openai/test_openai_embedding_client.py @@ -65,14 +65,15 @@ async def get_api_key() -> str: def test_openai_construction_without_openai_or_azure_config_raises_clear_error( openai_unit_test_env: dict[str, str], ) -> None: - with pytest.raises(SettingNotFoundError, match="OPENAI_API_KEY"): + with pytest.raises(SettingNotFoundError): OpenAIEmbeddingClient(model="text-embedding-3-small") @pytest.mark.parametrize("exclude_list", [["OPENAI_EMBEDDING_MODEL"]], indirect=True) -def test_openai_construction_missing_model_raises(openai_unit_test_env: dict[str, str]) -> None: - with pytest.raises(SettingNotFoundError, match="OPENAI_EMBEDDING_MODEL"): - OpenAIEmbeddingClient() +def test_openai_construction_falls_back_to_openai_model(openai_unit_test_env: dict[str, str]) -> None: + client = OpenAIEmbeddingClient() + + assert client.model == openai_unit_test_env["OPENAI_MODEL"] async def test_openai_get_embeddings(openai_unit_test_env: dict[str, str]) -> None: diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py index 0a2e9082d7..00fdea5e6b 100644 --- a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py @@ -74,6 +74,31 @@ def test_init_falls_back_to_generic_azure_deployment_env( assert isinstance(client.client, AsyncAzureOpenAI) +def test_init_falls_back_to_openai_embedding_model_for_azure_env( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: + monkeypatch.delenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", raising=False) + monkeypatch.delenv("AZURE_OPENAI_DEPLOYMENT_NAME", raising=False) + monkeypatch.setenv("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small") + + client = OpenAIEmbeddingClient() + + assert client.model == "text-embedding-3-small" + assert isinstance(client.client, AsyncAzureOpenAI) + + +def test_init_falls_back_to_openai_model_for_azure_env(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: + monkeypatch.delenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", raising=False) + monkeypatch.delenv("AZURE_OPENAI_DEPLOYMENT_NAME", raising=False) + monkeypatch.delenv("OPENAI_EMBEDDING_MODEL", raising=False) + monkeypatch.setenv("OPENAI_MODEL", "gpt-5") + + client = OpenAIEmbeddingClient() + + assert client.model == "gpt-5" + assert isinstance(client.client, AsyncAzureOpenAI) + + def test_openai_api_key_wins_over_azure_env(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") monkeypatch.setenv("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small") From fa015221f26c4608510493d6c8ff57c6419d7a1d Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 16:32:25 +0100 Subject: [PATCH 12/30] Python: narrow Azure deployment docs Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../agent_framework_openai/_chat_client.py | 116 +++++++++++--- .../_chat_completion_client.py | 99 ++++++++++-- .../_embedding_client.py | 150 ++++++++++-------- .../openai/agent_framework_openai/_shared.py | 19 +-- .../openai/test_openai_chat_client_azure.py | 19 ++- ...est_openai_chat_completion_client_azure.py | 19 ++- .../test_openai_embedding_client_azure.py | 19 ++- 7 files changed, 300 insertions(+), 141 deletions(-) diff --git a/python/packages/openai/agent_framework_openai/_chat_client.py b/python/packages/openai/agent_framework_openai/_chat_client.py index 04a6a7edc9..bbec08390b 100644 --- a/python/packages/openai/agent_framework_openai/_chat_client.py +++ b/python/packages/openai/agent_framework_openai/_chat_client.py @@ -281,10 +281,23 @@ def __init__( env_file_path: str | None = None, env_file_encoding: str | None = None, ) -> None: - """Initialize a raw OpenAI Responses client with OpenAI-only routing. + """Initialize a raw OpenAI Chat client. - This overload describes the OpenAI shape. Explicit keyword arguments are used first, - then ``OPENAI_*`` values from ``env_file_path`` or the process environment. + Keyword Args: + model: Model identifier to use for the request. When not provided, the constructor + reads ``OPENAI_RESPONSES_MODEL`` and then ``OPENAI_MODEL``. + api_key: API key. When not provided explicitly, the constructor reads + ``OPENAI_API_KEY``. A callable API key is also supported. + org_id: OpenAI organization ID. When not provided explicitly, the constructor reads + ``OPENAI_ORG_ID``. + base_url: Base URL override. When not provided explicitly, the constructor reads + ``OPENAI_BASE_URL``. + default_headers: Additional HTTP headers. + async_client: Pre-configured OpenAI client. + instruction_role: Role for instruction messages (for example ``"system"``). + env_file_path: Optional ``.env`` file that is checked before the process environment + for ``OPENAI_*`` values. + env_file_encoding: Encoding for the ``.env`` file. """ ... @@ -304,11 +317,30 @@ def __init__( env_file_path: str | None = None, env_file_encoding: str | None = None, ) -> None: - """Initialize a raw OpenAI Responses client with Azure routing. + """Initialize a raw OpenAI Chat client. - This overload describes the Azure shape. Passing ``azure_endpoint`` or - ``credential`` forces Azure routing, and missing Azure values fall back to - ``AZURE_OPENAI_*`` values from ``env_file_path`` or the process environment. + Keyword Args: + model: Model identifier to use for the request. When not provided, the constructor + reads ``AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME``. + azure_endpoint: Azure resource endpoint. When not provided explicitly, the constructor + reads ``AZURE_OPENAI_ENDPOINT``. + credential: Azure credential or token provider for Entra auth. + api_version: Azure API version. When not provided explicitly, the constructor reads + ``AZURE_OPENAI_API_VERSION`` and then uses the Responses default. + api_key: API key. For Azure this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + auth. A callable token provider is also accepted, + but ``credential`` is the preferred Azure auth surface. + base_url: Base URL override. When not provided explicitly, the constructor reads + ``AZURE_OPENAI_BASE_URL``. Use this instead of ``azure_endpoint`` when you want + to pass the full ``.../openai/v1`` base URL directly. + default_headers: Additional HTTP headers. + async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on + Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI and bypasses env lookup. + instruction_role: Role for instruction messages (for example ``"system"``). + env_file_path: Optional ``.env`` file that is checked before process environment + variables for ``AZURE_OPENAI_*`` values. + env_file_encoding: Encoding for the ``.env`` file. """ ... @@ -330,27 +362,27 @@ def __init__( env_file_encoding: str | None = None, **kwargs: Any, ) -> None: - """Initialize a raw OpenAI Responses client. + """Initialize a raw OpenAI Chat client. Keyword Args: model: Model identifier to use for the request. When not provided, the constructor - reads ``OPENAI_RESPONSES_MODEL`` and then ``OPENAI_MODEL`` for OpenAI - routing, or ``AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME`` and then - ``AZURE_OPENAI_DEPLOYMENT_NAME`` for Azure routing. + reads ``OPENAI_RESPONSES_MODEL`` and then ``OPENAI_MODEL`` for OpenAI, + or ``AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME`` for Azure. model_id: Deprecated alias for ``model``. - api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. - For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + api_key: API key override. For OpenAI this maps to ``OPENAI_API_KEY``. + For Azure this can be used instead of ``AZURE_OPENAI_API_KEY`` for key auth. A callable token provider is also accepted for backwards compatibility, but ``credential`` is the preferred Azure auth surface. credential: Azure credential or token provider for Azure OpenAI auth. Passing this is an explicit Azure signal, even when ``OPENAI_API_KEY`` is also configured. Credential objects require the optional ``azure-identity`` package. - org_id: OpenAI organization ID. Used only for OpenAI routing and resolved from + org_id: OpenAI organization ID. Used only for OpenAI and resolved from ``OPENAI_ORG_ID`` when not provided. - base_url: Base URL override. For OpenAI routing this maps to ``OPENAI_BASE_URL``. - For Azure routing this may be used instead of ``azure_endpoint`` when you want + base_url: Base URL override. For OpenAI this maps to ``OPENAI_BASE_URL``. + For Azure this may be used instead of ``azure_endpoint`` when you want to pass the full ``.../openai/v1`` base URL directly. - azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing + azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure falls back to ``AZURE_OPENAI_ENDPOINT``. api_version: Azure API version to use once Azure routing is selected. When not provided explicitly, Azure routing falls back to @@ -2441,10 +2473,25 @@ def __init__( middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, function_invocation_configuration: FunctionInvocationConfiguration | None = None, ) -> None: - """Initialize an OpenAI Responses client with OpenAI-only routing. + """Initialize an OpenAI Responses client. - This overload describes the OpenAI shape. Explicit keyword arguments are used first, - then ``OPENAI_*`` values from ``env_file_path`` or the process environment. + Keyword Args: + model: Model identifier to use for the request. When not provided, the constructor + reads ``OPENAI_RESPONSES_MODEL`` and then ``OPENAI_MODEL``. + api_key: API key. When not provided explicitly, the constructor reads + ``OPENAI_API_KEY``. A callable API key is also supported. + org_id: OpenAI organization ID. When not provided explicitly, the constructor reads + ``OPENAI_ORG_ID``. + base_url: Base URL override. When not provided explicitly, the constructor reads + ``OPENAI_BASE_URL``. + default_headers: Additional HTTP headers. + async_client: Pre-configured OpenAI client. + instruction_role: Role for instruction messages (for example ``"system"``). + env_file_path: Optional ``.env`` file that is checked before the process environment + for ``OPENAI_*`` values. + env_file_encoding: Encoding for the ``.env`` file. + middleware: Optional middleware to apply to the client. + function_invocation_configuration: Optional function invocation configuration override. """ ... @@ -2466,11 +2513,32 @@ def __init__( middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, function_invocation_configuration: FunctionInvocationConfiguration | None = None, ) -> None: - """Initialize an OpenAI Responses client with Azure routing. + """Initialize an OpenAI Responses client. - This overload describes the Azure shape. Passing ``azure_endpoint`` or - ``credential`` forces Azure routing, and missing Azure values fall back to - ``AZURE_OPENAI_*`` values from ``env_file_path`` or the process environment. + Keyword Args: + model: Model identifier to use for the request. When not provided, the constructor + reads ``AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME``. + azure_endpoint: Azure resource endpoint. When not provided explicitly, the constructor + reads ``AZURE_OPENAI_ENDPOINT``. + credential: Azure credential or token provider for Entra auth. + api_version: Azure API version. When not provided explicitly, the constructor reads + ``AZURE_OPENAI_API_VERSION`` and then uses the Responses default. + api_key: API key. For Azure this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + auth. A callable token provider is also accepted, but ``credential`` is the preferred + Azure auth surface. + base_url: Base URL override. When not provided explicitly, the constructor reads + ``AZURE_OPENAI_BASE_URL``. Use this instead of ``azure_endpoint`` when you want + to pass the full ``.../openai/v1`` base URL directly. + default_headers: Additional HTTP headers. + async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on + Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI and bypasses env lookup. + instruction_role: Role for instruction messages (for example ``"system"``). + env_file_path: Optional ``.env`` file that is checked before process environment + variables for ``AZURE_OPENAI_*`` values. + env_file_encoding: Encoding for the ``.env`` file. + middleware: Optional middleware to apply to the client. + function_invocation_configuration: Optional function invocation configuration override. """ ... diff --git a/python/packages/openai/agent_framework_openai/_chat_completion_client.py b/python/packages/openai/agent_framework_openai/_chat_completion_client.py index ee78892e54..4bddc1eea1 100644 --- a/python/packages/openai/agent_framework_openai/_chat_completion_client.py +++ b/python/packages/openai/agent_framework_openai/_chat_completion_client.py @@ -196,10 +196,23 @@ def __init__( env_file_path: str | None = None, env_file_encoding: str | None = None, ) -> None: - """Initialize a raw OpenAI Chat Completions client with OpenAI-only routing. + """Initialize a raw OpenAI Chat completion client. - This overload describes the OpenAI shape. Explicit keyword arguments are used first, - then ``OPENAI_*`` values from ``env_file_path`` or the process environment. + Keyword Args: + model: Model identifier to use for the request. When not provided, the constructor + reads ``OPENAI_CHAT_MODEL`` and then ``OPENAI_MODEL``. + api_key: API key. When not provided explicitly, the constructor reads + ``OPENAI_API_KEY``. A callable API key is also supported. + org_id: OpenAI organization ID. When not provided explicitly, the constructor reads + ``OPENAI_ORG_ID``. + base_url: Base URL override. When not provided explicitly, the constructor reads + ``OPENAI_BASE_URL``. + default_headers: Additional HTTP headers. + async_client: Pre-configured OpenAI client. + instruction_role: Role for instruction messages (for example ``"system"``). + env_file_path: Optional ``.env`` file that is checked before the process environment + for ``OPENAI_*`` values. + env_file_encoding: Encoding for the ``.env`` file. """ ... @@ -219,11 +232,30 @@ def __init__( env_file_path: str | None = None, env_file_encoding: str | None = None, ) -> None: - """Initialize a raw OpenAI Chat Completions client with Azure routing. + """Initialize a raw OpenAI Chat completion client. - This overload describes the Azure shape. Passing ``azure_endpoint`` or - ``credential`` forces Azure routing, and missing Azure values fall back to - ``AZURE_OPENAI_*`` values from ``env_file_path`` or the process environment. + Keyword Args: + model: Model identifier to use for the request. When not provided, the constructor + reads ``AZURE_OPENAI_CHAT_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME``. + azure_endpoint: Azure resource endpoint. When not provided explicitly, the constructor + reads ``AZURE_OPENAI_ENDPOINT``. + credential: Azure credential or token provider for Entra auth. + api_version: Azure API version. When not provided explicitly, the constructor reads + ``AZURE_OPENAI_API_VERSION`` and then uses the Chat Completions default. + api_key: API key. For Azure this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + auth. A callable token provider is also accepted, but ``credential`` is the preferred + Azure auth surface. + base_url: Base URL override. When not provided explicitly, the constructor reads + ``AZURE_OPENAI_BASE_URL``. Use this instead of ``azure_endpoint`` when you want + to pass the full ``.../openai/v1`` base URL directly. + default_headers: Additional HTTP headers. + async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on + Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI and bypasses env lookup. + instruction_role: Role for instruction messages (for example ``"system"``). + env_file_path: Optional ``.env`` file that is checked before process environment + variables for ``AZURE_OPENAI_*`` values. + env_file_encoding: Encoding for the ``.env`` file. """ ... @@ -985,7 +1017,28 @@ def __init__( env_file_encoding: str | None = None, middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, function_invocation_configuration: FunctionInvocationConfiguration | None = None, - ) -> None: ... + ) -> None: + """Initialize an OpenAI Chat completion client. + + Keyword Args: + model: Model identifier to use for the request. When not provided, the constructor + reads ``OPENAI_CHAT_MODEL`` and then ``OPENAI_MODEL``. + api_key: API key. When not provided explicitly, the constructor reads + ``OPENAI_API_KEY``. A callable API key is also supported. + org_id: OpenAI organization ID. When not provided explicitly, the constructor reads + ``OPENAI_ORG_ID``. + default_headers: Additional HTTP headers. + async_client: Pre-configured OpenAI client. + instruction_role: Role for instruction messages (for example ``"system"``). + base_url: Base URL override. When not provided explicitly, the constructor reads + ``OPENAI_BASE_URL``. + env_file_path: Optional ``.env`` file that is checked before the process environment + for ``OPENAI_*`` values. + env_file_encoding: Encoding for the ``.env`` file. + middleware: Optional sequence of ChatAndFunctionMiddlewareTypes to apply to requests. + function_invocation_configuration: Optional configuration for function invocation support. + """ + ... @overload def __init__( @@ -1004,7 +1057,35 @@ def __init__( env_file_encoding: str | None = None, middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, function_invocation_configuration: FunctionInvocationConfiguration | None = None, - ) -> None: ... + ) -> None: + """Initialize an OpenAI Chat completion client. + + Keyword Args: + model: Model identifier to use for the request. When not provided, the constructor + reads ``AZURE_OPENAI_CHAT_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME``. + azure_endpoint: Azure resource endpoint. When not provided explicitly, the constructor + reads ``AZURE_OPENAI_ENDPOINT``. + credential: Azure credential or token provider for Entra auth. + api_version: Azure API version. When not provided explicitly, the constructor reads + ``AZURE_OPENAI_API_VERSION`` and then uses the Chat Completions default. + api_key: API key. For Azure this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + auth. A callable token provider is also accepted, but ``credential`` is the preferred + Azure auth surface. + base_url: Base URL override. When not provided explicitly, the constructor reads + ``AZURE_OPENAI_BASE_URL``. Use this instead of ``azure_endpoint`` when you want + to pass the full ``.../openai/v1`` base URL directly. + default_headers: Additional HTTP headers. + async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on + Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI and bypasses env lookup. + instruction_role: Role for instruction messages (for example ``"system"``). + env_file_path: Optional ``.env`` file that is checked before process environment + variables for ``AZURE_OPENAI_*`` values. + env_file_encoding: Encoding for the ``.env`` file. + middleware: Optional sequence of ChatAndFunctionMiddlewareTypes to apply to requests. + function_invocation_configuration: Optional configuration for function invocation support. + """ + ... def __init__( self, diff --git a/python/packages/openai/agent_framework_openai/_embedding_client.py b/python/packages/openai/agent_framework_openai/_embedding_client.py index 63968877c3..76d0628b99 100644 --- a/python/packages/openai/agent_framework_openai/_embedding_client.py +++ b/python/packages/openai/agent_framework_openai/_embedding_client.py @@ -82,17 +82,22 @@ def __init__( env_file_path: str | None = None, env_file_encoding: str | None = None, ) -> None: - """Initialize a raw OpenAI embedding client with OpenAI-only routing. - - Use this overload when you want the generic OpenAI embeddings endpoint. The - constructor reads ``model`` from the explicit argument first and then from - ``OPENAI_EMBEDDING_MODEL``, falling back to ``OPENAI_MODEL``. Authentication and endpoint settings come from - the explicit ``api_key``, ``org_id``, and ``base_url`` arguments first and - then from ``OPENAI_API_KEY``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL`` in - ``env_file_path`` or the process environment. + """Initialize a raw OpenAI embedding client. - Azure-specific environment variables are ignored for this overload unless an - explicit Azure signal is provided via the Azure overload shape. + Keyword Args: + model: Embedding model identifier. When not provided, the constructor reads + ``OPENAI_EMBEDDING_MODEL`` and then ``OPENAI_MODEL``. + api_key: API key. When not provided explicitly, the constructor reads + ``OPENAI_API_KEY``. A callable API key is also supported. + org_id: OpenAI organization ID. When not provided explicitly, the constructor reads + ``OPENAI_ORG_ID``. + base_url: Base URL override. When not provided explicitly, the constructor reads + ``OPENAI_BASE_URL``. + default_headers: Additional HTTP headers. + async_client: Pre-configured OpenAI client. + env_file_path: Optional ``.env`` file that is checked before the process environment + for ``OPENAI_*`` values. + env_file_encoding: Encoding for the ``.env`` file. """ ... @@ -111,24 +116,29 @@ def __init__( env_file_path: str | None = None, env_file_encoding: str | None = None, ) -> None: - """Initialize a raw OpenAI embedding client with Azure routing. - - Use this overload when you want Azure OpenAI embeddings. Passing - ``azure_endpoint`` or ``credential`` is an explicit Azure signal and forces - Azure routing even when ``OPENAI_API_KEY`` is also present. ``api_version`` - configures Azure requests after Azure routing is selected, but it does not - select Azure on its own. - The constructor reads the deployment name from the explicit ``model`` - argument first and then from ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, - falling back to ``AZURE_OPENAI_DEPLOYMENT_NAME``, ``OPENAI_EMBEDDING_MODEL``, - and then ``OPENAI_MODEL``. - - Authentication and endpoint settings come from the explicit Azure arguments - first and then from ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, - ``AZURE_OPENAI_API_KEY``, and ``AZURE_OPENAI_API_VERSION`` in - ``env_file_path`` or the process environment. ``credential`` is the - preferred Azure auth surface; ``api_key`` remains supported for Azure key - auth and callable token providers for compatibility. + """Initialize a raw OpenAI embedding client. + + Keyword Args: + model: Embedding deployment name. When not provided, the constructor reads + ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME``. + azure_endpoint: Azure resource endpoint. When not provided explicitly, the constructor + reads ``AZURE_OPENAI_ENDPOINT``. + credential: Azure credential or token provider for Entra auth. + api_version: Azure API version. When not provided explicitly, the constructor reads + ``AZURE_OPENAI_API_VERSION`` and then uses the embedding default. + api_key: API key. For Azure this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + auth. A callable token provider is also accepted, but ``credential`` is the preferred + Azure auth surface. + base_url: Base URL override. When not provided explicitly, the constructor reads + ``AZURE_OPENAI_BASE_URL``. Use this instead of ``azure_endpoint`` when you want + to pass the full ``.../openai/v1`` base URL directly. + default_headers: Additional HTTP headers. + async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on + Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI. + env_file_path: Optional ``.env`` file that is checked before process environment + variables for ``AZURE_OPENAI_*`` values. + env_file_encoding: Encoding for the ``.env`` file. """ ... @@ -155,9 +165,8 @@ def __init__( model: Embedding model or Azure OpenAI deployment name. When not provided, the constructor reads ``OPENAI_EMBEDDING_MODEL`` and then ``OPENAI_MODEL`` for OpenAI routing. For Azure routing it first checks - ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, then - ``AZURE_OPENAI_DEPLOYMENT_NAME``, then ``OPENAI_EMBEDDING_MODEL``, and - finally ``OPENAI_MODEL``. + ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME``. model_id: Deprecated alias for ``model``. api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key @@ -196,8 +205,7 @@ def __init__( ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing reads ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, - ``AZURE_OPENAI_DEPLOYMENT_NAME``, ``OPENAI_EMBEDDING_MODEL``, - ``OPENAI_MODEL``, and ``AZURE_OPENAI_API_VERSION``. + ``AZURE_OPENAI_DEPLOYMENT_NAME``, and ``AZURE_OPENAI_API_VERSION``. """ if model_id is not None and model is None: import warnings @@ -334,17 +342,23 @@ def __init__( env_file_path: str | None = None, env_file_encoding: str | None = None, ) -> None: - """Initialize an OpenAI embedding client with OpenAI-only routing. - - Use this overload when you want the generic OpenAI embeddings endpoint. The - constructor reads ``model`` from the explicit argument first and then from - ``OPENAI_EMBEDDING_MODEL``, falling back to ``OPENAI_MODEL``. Authentication and endpoint settings come from - the explicit ``api_key``, ``org_id``, and ``base_url`` arguments first and - then from ``OPENAI_API_KEY``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL`` in - ``env_file_path`` or the process environment. + """Initialize an OpenAI embedding client. - Azure-specific environment variables are ignored for this overload unless an - explicit Azure signal is provided via the Azure overload shape. + Keyword Args: + model: Embedding model identifier. When not provided, the constructor reads + ``OPENAI_EMBEDDING_MODEL`` and then ``OPENAI_MODEL``. + api_key: API key. When not provided explicitly, the constructor reads + ``OPENAI_API_KEY``. A callable API key is also supported. + org_id: OpenAI organization ID. When not provided explicitly, the constructor reads + ``OPENAI_ORG_ID``. + default_headers: Additional HTTP headers. + async_client: Pre-configured OpenAI client. + base_url: Base URL override. When not provided explicitly, the constructor reads + ``OPENAI_BASE_URL``. + otel_provider_name: Optional telemetry provider name override. + env_file_path: Optional ``.env`` file that is checked before the process environment + for ``OPENAI_*`` values. + env_file_encoding: Encoding for the ``.env`` file. """ ... @@ -364,24 +378,30 @@ def __init__( env_file_path: str | None = None, env_file_encoding: str | None = None, ) -> None: - """Initialize an OpenAI embedding client with Azure routing. - - Use this overload when you want Azure OpenAI embeddings. Passing - ``azure_endpoint`` or ``credential`` is an explicit Azure signal and forces - Azure routing even when ``OPENAI_API_KEY`` is also present. ``api_version`` - configures Azure requests after Azure routing is selected, but it does not - select Azure on its own. - The constructor reads the deployment name from the explicit ``model`` - argument first and then from ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, - falling back to ``AZURE_OPENAI_DEPLOYMENT_NAME``, ``OPENAI_EMBEDDING_MODEL``, - and then ``OPENAI_MODEL``. - - Authentication and endpoint settings come from the explicit Azure arguments - first and then from ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, - ``AZURE_OPENAI_API_KEY``, and ``AZURE_OPENAI_API_VERSION`` in - ``env_file_path`` or the process environment. ``credential`` is the - preferred Azure auth surface; ``api_key`` remains supported for Azure key - auth and callable token providers for compatibility. + """Initialize an OpenAI embedding client. + + Keyword Args: + model: Embedding deployment name. When not provided, the constructor reads + ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME``. + azure_endpoint: Azure resource endpoint. When not provided explicitly, the constructor + reads ``AZURE_OPENAI_ENDPOINT``. + credential: Azure credential or token provider for Entra auth. + api_version: Azure API version. When not provided explicitly, the constructor reads + ``AZURE_OPENAI_API_VERSION`` and then uses the embedding default. + api_key: API key. For Azure this can be used instead of ``AZURE_OPENAI_API_KEY`` for key + auth. A callable token provider is also accepted, but ``credential`` is the preferred + Azure auth surface. + base_url: Base URL override. When not provided explicitly, the constructor reads + ``AZURE_OPENAI_BASE_URL``. Use this instead of ``azure_endpoint`` when you want + to pass the full ``.../openai/v1`` base URL directly. + default_headers: Additional HTTP headers. + async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on + Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI. + otel_provider_name: Optional telemetry provider name override. + env_file_path: Optional ``.env`` file that is checked before process environment + variables for ``AZURE_OPENAI_*`` values. + env_file_encoding: Encoding for the ``.env`` file. """ ... @@ -407,9 +427,8 @@ def __init__( model: Embedding model or Azure OpenAI deployment name. When not provided, the constructor reads ``OPENAI_EMBEDDING_MODEL`` and then ``OPENAI_MODEL`` for OpenAI routing. For Azure routing it first checks - ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, then - ``AZURE_OPENAI_DEPLOYMENT_NAME``, then ``OPENAI_EMBEDDING_MODEL``, and - finally ``OPENAI_MODEL``. + ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`` and then + ``AZURE_OPENAI_DEPLOYMENT_NAME``. api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key auth. A callable token provider is also accepted for backwards compatibility, @@ -447,8 +466,7 @@ def __init__( ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing reads ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, - ``AZURE_OPENAI_DEPLOYMENT_NAME``, ``OPENAI_EMBEDDING_MODEL``, - ``OPENAI_MODEL``, and ``AZURE_OPENAI_API_VERSION``. + ``AZURE_OPENAI_DEPLOYMENT_NAME``, and ``AZURE_OPENAI_API_VERSION``. Examples: .. code-block:: python diff --git a/python/packages/openai/agent_framework_openai/_shared.py b/python/packages/openai/agent_framework_openai/_shared.py index d92a5afda9..f9c1c6e6ae 100644 --- a/python/packages/openai/agent_framework_openai/_shared.py +++ b/python/packages/openai/agent_framework_openai/_shared.py @@ -272,14 +272,6 @@ def load_openai_service_settings( azure_deployment = getattr(client, "_azure_deployment", None) if isinstance(azure_deployment, str) and azure_deployment: resolved_azure_deployment = azure_deployment - if resolved_azure_deployment is None: - if openai_settings is None: - openai_settings = load_settings( - OpenAISettings, - env_prefix="OPENAI_", - **openai_settings_kwargs, - ) - resolved_azure_deployment = _resolve_named_setting(openai_settings, openai_model_fields) if resolved_azure_deployment: azure_settings["deployment_name"] = resolved_azure_deployment client_args["azure_deployment"] = resolved_azure_deployment @@ -287,8 +279,12 @@ def load_openai_service_settings( deployment_env_guidance = _join_env_names([ AZURE_DEPLOYMENT_ENV_VARS[field] for field in azure_deployment_fields ]) - openai_model_guidance = _join_env_names([OPENAI_MODEL_ENV_VARS[field] for field in openai_model_fields]) - if checked_openai: + has_azure_configuration = ( + client is not None + or azure_settings.get("endpoint") is not None + or azure_settings.get("base_url") is not None + ) + if checked_openai and not has_azure_configuration: raise SettingNotFoundError( "OpenAI credentials are required. Provide the 'api_key' parameter or set 'OPENAI_API_KEY'. " "To use Azure OpenAI instead, pass 'azure_endpoint' or set 'AZURE_OPENAI_ENDPOINT' or " @@ -296,8 +292,7 @@ def load_openai_service_settings( ) raise SettingNotFoundError( "Azure OpenAI client requires a deployment name, which can be provided via the 'model' parameter, " - f"the {deployment_env_guidance} environment variable, or the {openai_model_guidance} " - "environment variable." + f"or the {deployment_env_guidance} environment variable." ) if client: return azure_settings, client, True # type: ignore[return-value] diff --git a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py index e1c34d41c5..21b37e8684 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py @@ -10,6 +10,7 @@ import pytest from agent_framework import Agent, AgentResponse, ChatResponse, Content, Message, SupportsChatGetResponse, tool +from agent_framework.exceptions import SettingNotFoundError from azure.core.credentials_async import AsyncTokenCredential from azure.identity.aio import AzureCliCredential from openai import AsyncAzureOpenAI @@ -157,29 +158,27 @@ def test_init_falls_back_to_generic_azure_deployment_env( assert isinstance(client.client, AsyncAzureOpenAI) -def test_init_falls_back_to_openai_responses_model_for_azure_env( +def test_init_does_not_fall_back_to_openai_responses_model_for_azure_env( monkeypatch, azure_openai_unit_test_env: dict[str, str] ) -> None: monkeypatch.delenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME", raising=False) monkeypatch.delenv("AZURE_OPENAI_DEPLOYMENT_NAME", raising=False) monkeypatch.setenv("OPENAI_RESPONSES_MODEL", "test_responses_model") - client = OpenAIChatClient() - - assert client.model == "test_responses_model" - assert isinstance(client.client, AsyncAzureOpenAI) + with pytest.raises(SettingNotFoundError, match="Azure OpenAI client requires a deployment name"): + OpenAIChatClient() -def test_init_falls_back_to_openai_model_for_azure_env(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: +def test_init_does_not_fall_back_to_openai_model_for_azure_env( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: monkeypatch.delenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME", raising=False) monkeypatch.delenv("AZURE_OPENAI_DEPLOYMENT_NAME", raising=False) monkeypatch.delenv("OPENAI_RESPONSES_MODEL", raising=False) monkeypatch.setenv("OPENAI_MODEL", "gpt-5") - client = OpenAIChatClient() - - assert client.model == "gpt-5" - assert isinstance(client.client, AsyncAzureOpenAI) + with pytest.raises(SettingNotFoundError, match="Azure OpenAI client requires a deployment name"): + OpenAIChatClient() def test_init_with_credential_wraps_async_token_credential( diff --git a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py index 36e0c52cd8..3ae3b80602 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py @@ -17,6 +17,7 @@ SupportsChatGetResponse, tool, ) +from agent_framework.exceptions import SettingNotFoundError from azure.core.credentials_async import AsyncTokenCredential from azure.identity.aio import AzureCliCredential from openai import AsyncAzureOpenAI @@ -137,29 +138,27 @@ def test_init_falls_back_to_generic_azure_deployment_env( assert isinstance(client.client, AsyncAzureOpenAI) -def test_init_falls_back_to_openai_chat_model_for_azure_env( +def test_init_does_not_fall_back_to_openai_chat_model_for_azure_env( monkeypatch, azure_openai_unit_test_env: dict[str, str] ) -> None: monkeypatch.delenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", raising=False) monkeypatch.delenv("AZURE_OPENAI_DEPLOYMENT_NAME", raising=False) monkeypatch.setenv("OPENAI_CHAT_MODEL", "test_chat_model") - client = OpenAIChatCompletionClient() - - assert client.model == "test_chat_model" - assert isinstance(client.client, AsyncAzureOpenAI) + with pytest.raises(SettingNotFoundError, match="Azure OpenAI client requires a deployment name"): + OpenAIChatCompletionClient() -def test_init_falls_back_to_openai_model_for_azure_env(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: +def test_init_does_not_fall_back_to_openai_model_for_azure_env( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: monkeypatch.delenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", raising=False) monkeypatch.delenv("AZURE_OPENAI_DEPLOYMENT_NAME", raising=False) monkeypatch.delenv("OPENAI_CHAT_MODEL", raising=False) monkeypatch.setenv("OPENAI_MODEL", "gpt-5") - client = OpenAIChatCompletionClient() - - assert client.model == "gpt-5" - assert isinstance(client.client, AsyncAzureOpenAI) + with pytest.raises(SettingNotFoundError, match="Azure OpenAI client requires a deployment name"): + OpenAIChatCompletionClient() def test_init_with_credential_wraps_async_token_credential( diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py index 00fdea5e6b..be832231df 100644 --- a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py @@ -6,6 +6,7 @@ from unittest.mock import MagicMock, patch import pytest +from agent_framework.exceptions import SettingNotFoundError from azure.core.credentials_async import AsyncTokenCredential from azure.identity.aio import AzureCliCredential from openai import AsyncAzureOpenAI @@ -74,29 +75,27 @@ def test_init_falls_back_to_generic_azure_deployment_env( assert isinstance(client.client, AsyncAzureOpenAI) -def test_init_falls_back_to_openai_embedding_model_for_azure_env( +def test_init_does_not_fall_back_to_openai_embedding_model_for_azure_env( monkeypatch, azure_openai_unit_test_env: dict[str, str] ) -> None: monkeypatch.delenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", raising=False) monkeypatch.delenv("AZURE_OPENAI_DEPLOYMENT_NAME", raising=False) monkeypatch.setenv("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small") - client = OpenAIEmbeddingClient() + with pytest.raises(SettingNotFoundError, match="Azure OpenAI client requires a deployment name"): + OpenAIEmbeddingClient() - assert client.model == "text-embedding-3-small" - assert isinstance(client.client, AsyncAzureOpenAI) - -def test_init_falls_back_to_openai_model_for_azure_env(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: +def test_init_does_not_fall_back_to_openai_model_for_azure_env( + monkeypatch, azure_openai_unit_test_env: dict[str, str] +) -> None: monkeypatch.delenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", raising=False) monkeypatch.delenv("AZURE_OPENAI_DEPLOYMENT_NAME", raising=False) monkeypatch.delenv("OPENAI_EMBEDDING_MODEL", raising=False) monkeypatch.setenv("OPENAI_MODEL", "gpt-5") - client = OpenAIEmbeddingClient() - - assert client.model == "gpt-5" - assert isinstance(client.client, AsyncAzureOpenAI) + with pytest.raises(SettingNotFoundError, match="Azure OpenAI client requires a deployment name"): + OpenAIEmbeddingClient() def test_openai_api_key_wins_over_azure_env(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: From 44fd8799c95c3e25884925594c1ac217b14a15da Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 16:33:24 +0100 Subject: [PATCH 13/30] Python: remove embedding routing wording Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../_embedding_client.py | 62 +++++++++---------- 1 file changed, 30 insertions(+), 32 deletions(-) diff --git a/python/packages/openai/agent_framework_openai/_embedding_client.py b/python/packages/openai/agent_framework_openai/_embedding_client.py index 76d0628b99..9cb37ad4df 100644 --- a/python/packages/openai/agent_framework_openai/_embedding_client.py +++ b/python/packages/openai/agent_framework_openai/_embedding_client.py @@ -164,26 +164,25 @@ def __init__( Keyword Args: model: Embedding model or Azure OpenAI deployment name. When not provided, the constructor reads ``OPENAI_EMBEDDING_MODEL`` and then ``OPENAI_MODEL`` - for OpenAI routing. For Azure routing it first checks - ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`` and then - ``AZURE_OPENAI_DEPLOYMENT_NAME``. + for OpenAI. For Azure it first checks ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`` + and then ``AZURE_OPENAI_DEPLOYMENT_NAME``. model_id: Deprecated alias for ``model``. - api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. - For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key - auth. A callable token provider is also accepted for backwards compatibility, + api_key: API key override. For OpenAI this maps to ``OPENAI_API_KEY``. + For Azure this can be used instead of ``AZURE_OPENAI_API_KEY`` for key auth. + A callable token provider is also accepted for backwards compatibility, but ``credential`` is the preferred Azure auth surface. credential: Azure credential or token provider for Azure OpenAI auth. Passing this is an explicit Azure signal, even when ``OPENAI_API_KEY`` is also configured. Credential objects require the optional ``azure-identity`` package. - org_id: OpenAI organization ID. Used only for OpenAI routing and resolved from + org_id: OpenAI organization ID. Used only for OpenAI and resolved from ``OPENAI_ORG_ID`` when not provided. - base_url: Base URL override. For OpenAI routing this maps to ``OPENAI_BASE_URL``. - For Azure routing this may be used instead of ``azure_endpoint`` when you want + base_url: Base URL override. For OpenAI this maps to ``OPENAI_BASE_URL``. + For Azure this may be used instead of ``azure_endpoint`` when you want to pass the full ``.../openai/v1`` base URL directly. - azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing + azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure falls back to ``AZURE_OPENAI_ENDPOINT``. - api_version: Azure API version to use once Azure routing is selected. When - not provided explicitly, Azure routing falls back to + api_version: Azure API version to use for Azure requests. When not provided explicitly, + Azure falls back to ``AZURE_OPENAI_API_VERSION`` and then the embedding default. default_headers: Additional HTTP headers. async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on @@ -195,15 +194,15 @@ def __init__( kwargs: Additional keyword arguments forwarded to ``BaseEmbeddingClient``. Notes: - Environment resolution and routing precedence are: + Environment resolution precedence is: 1. Explicit Azure inputs (``azure_endpoint`` or ``credential``) 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` 3. Azure environment fallback - OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_EMBEDDING_MODEL``, - ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing - reads ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, + OpenAI reads ``OPENAI_API_KEY``, ``OPENAI_EMBEDDING_MODEL``, + ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure reads + ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, ``AZURE_OPENAI_DEPLOYMENT_NAME``, and ``AZURE_OPENAI_API_VERSION``. """ @@ -426,28 +425,27 @@ def __init__( Keyword Args: model: Embedding model or Azure OpenAI deployment name. When not provided, the constructor reads ``OPENAI_EMBEDDING_MODEL`` and then ``OPENAI_MODEL`` - for OpenAI routing. For Azure routing it first checks - ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`` and then - ``AZURE_OPENAI_DEPLOYMENT_NAME``. - api_key: API key override. For OpenAI routing this maps to ``OPENAI_API_KEY``. - For Azure routing this can be used instead of ``AZURE_OPENAI_API_KEY`` for key - auth. A callable token provider is also accepted for backwards compatibility, + for OpenAI. For Azure it first checks ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`` + and then ``AZURE_OPENAI_DEPLOYMENT_NAME``. + api_key: API key override. For OpenAI this maps to ``OPENAI_API_KEY``. + For Azure this can be used instead of ``AZURE_OPENAI_API_KEY`` for key auth. + A callable token provider is also accepted for backwards compatibility, but ``credential`` is the preferred Azure auth surface. credential: Azure credential or token provider for Azure OpenAI auth. Passing this is an explicit Azure signal, even when ``OPENAI_API_KEY`` is also configured. Credential objects require the optional ``azure-identity`` package. - org_id: OpenAI organization ID. Used only for OpenAI routing and resolved from + org_id: OpenAI organization ID. Used only for OpenAI and resolved from ``OPENAI_ORG_ID`` when not provided. default_headers: Additional HTTP headers. async_client: Pre-configured client. Passing ``AsyncAzureOpenAI`` keeps the client on Azure; passing ``AsyncOpenAI`` keeps the client on OpenAI. - base_url: Base URL override. For OpenAI routing this maps to ``OPENAI_BASE_URL``. - For Azure routing this may be used instead of ``azure_endpoint`` when you want + base_url: Base URL override. For OpenAI this maps to ``OPENAI_BASE_URL``. + For Azure this may be used instead of ``azure_endpoint`` when you want to pass the full ``.../openai/v1`` base URL directly. - azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure routing + azure_endpoint: Azure resource endpoint. When not provided explicitly, Azure falls back to ``AZURE_OPENAI_ENDPOINT``. - api_version: Azure API version to use once Azure routing is selected. When - not provided explicitly, Azure routing falls back to + api_version: Azure API version to use for Azure requests. When not provided explicitly, + Azure falls back to ``AZURE_OPENAI_API_VERSION`` and then the embedding default. otel_provider_name: Override the OpenTelemetry provider name. env_file_path: Optional ``.env`` file that is checked before process environment @@ -456,15 +454,15 @@ def __init__( env_file_encoding: Encoding for the ``.env`` file. Notes: - Environment resolution and routing precedence are: + Environment resolution precedence is: 1. Explicit Azure inputs (``azure_endpoint`` or ``credential``) 2. Explicit OpenAI API key or ``OPENAI_API_KEY`` 3. Azure environment fallback - OpenAI routing reads ``OPENAI_API_KEY``, ``OPENAI_EMBEDDING_MODEL``, - ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure routing - reads ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, + OpenAI reads ``OPENAI_API_KEY``, ``OPENAI_EMBEDDING_MODEL``, + ``OPENAI_MODEL``, ``OPENAI_ORG_ID``, and ``OPENAI_BASE_URL``. Azure reads + ``AZURE_OPENAI_ENDPOINT``, ``AZURE_OPENAI_BASE_URL``, ``AZURE_OPENAI_API_KEY``, ``AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME``, ``AZURE_OPENAI_DEPLOYMENT_NAME``, and ``AZURE_OPENAI_API_VERSION``. From 4fb0df0bbd7dbedd05723220b0c6fa3e6d6367a1 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 17:09:07 +0100 Subject: [PATCH 14/30] Python: run embedding Azure integration tests Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .github/workflows/python-integration-tests.yml | 1 + .github/workflows/python-merge-tests.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/python-integration-tests.yml b/.github/workflows/python-integration-tests.yml index 1b1c8066c6..d809bead5a 100644 --- a/.github/workflows/python-integration-tests.yml +++ b/.github/workflows/python-integration-tests.yml @@ -126,6 +126,7 @@ jobs: uv run pytest --import-mode=importlib packages/openai/tests/openai/test_openai_chat_completion_client_azure.py packages/openai/tests/openai/test_openai_chat_client_azure.py + packages/openai/tests/openai/test_openai_embedding_client_azure.py packages/azure-ai/tests/azure_openai -m integration -n logical --dist worksteal diff --git a/.github/workflows/python-merge-tests.yml b/.github/workflows/python-merge-tests.yml index a46beb40cb..70474e1fe3 100644 --- a/.github/workflows/python-merge-tests.yml +++ b/.github/workflows/python-merge-tests.yml @@ -223,6 +223,7 @@ jobs: uv run pytest --import-mode=importlib packages/openai/tests/openai/test_openai_chat_completion_client_azure.py packages/openai/tests/openai/test_openai_chat_client_azure.py + packages/openai/tests/openai/test_openai_embedding_client_azure.py packages/azure-ai/tests/azure_openai -m integration -n logical --dist worksteal From d452ba37981398715cb413fd1bef517f677f985b Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 17:50:31 +0100 Subject: [PATCH 15/30] changed variable name --- python/packages/openai/agent_framework_openai/_shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/openai/agent_framework_openai/_shared.py b/python/packages/openai/agent_framework_openai/_shared.py index f9c1c6e6ae..9f23bdfca8 100644 --- a/python/packages/openai/agent_framework_openai/_shared.py +++ b/python/packages/openai/agent_framework_openai/_shared.py @@ -199,7 +199,7 @@ def load_openai_service_settings( The generic OpenAI clients primarily read from ``OPENAI_*`` variables. Azure-specific environment variables are used only when an explicit Azure signal is present - (``azure_endpoint`` or ``credential``) or when no explicit + (``endpoint`` or ``credential``) or when no explicit OpenAI API key is available. """ # Merge APP_INFO into the headers From 97b2bdd5c5809c7da698d238a25798ee0d1dba2e Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 17:54:43 +0100 Subject: [PATCH 16/30] Python: expand OpenAI package README Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- python/packages/openai/README.md | 101 ++++++++++++++++++++++++++++--- 1 file changed, 94 insertions(+), 7 deletions(-) diff --git a/python/packages/openai/README.md b/python/packages/openai/README.md index d9cf09dde9..a2d0b0cddd 100644 --- a/python/packages/openai/README.md +++ b/python/packages/openai/README.md @@ -1,21 +1,108 @@ # agent-framework-openai -OpenAI integration for Microsoft Agent Framework. Provides chat clients for the OpenAI Responses API and Chat Completions API. +OpenAI integration for Microsoft Agent Framework. + +This package provides: + +- `OpenAIChatClient` for the OpenAI Responses API +- `OpenAIChatCompletionClient` for the Chat Completions API +- `OpenAIEmbeddingClient` for embeddings ## Installation ```bash -pip install agent-framework-openai +pip install agent-framework-openai --pre +``` + +## Which chat client should I use? + +Use `OpenAIChatClient` for new work unless you specifically need the Chat Completions API. + +- `OpenAIChatClient` uses the Responses API and is the preferred general-purpose chat client. +- `OpenAIChatCompletionClient` uses the Chat Completions API and is mainly for compatibility with + existing Chat Completions-based integrations. + +The deprecated `OpenAIResponsesClient` alias points to `OpenAIChatClient`. + +## Environment variables + +### OpenAI + +These variables are used when the client is configured for OpenAI: + +| Variable | Purpose | +| --- | --- | +| `OPENAI_API_KEY` | OpenAI API key | +| `OPENAI_ORG_ID` | OpenAI organization ID | +| `OPENAI_BASE_URL` | Custom OpenAI-compatible base URL | +| `OPENAI_MODEL` | Generic fallback model | +| `OPENAI_RESPONSES_MODEL` | Preferred model for `OpenAIChatClient` | +| `OPENAI_CHAT_MODEL` | Preferred model for `OpenAIChatCompletionClient` | +| `OPENAI_EMBEDDING_MODEL` | Preferred model for `OpenAIEmbeddingClient` | + +Model lookup order: + +- `OpenAIChatClient`: `OPENAI_RESPONSES_MODEL` -> `OPENAI_MODEL` +- `OpenAIChatCompletionClient`: `OPENAI_CHAT_MODEL` -> `OPENAI_MODEL` +- `OpenAIEmbeddingClient`: `OPENAI_EMBEDDING_MODEL` -> `OPENAI_MODEL` + +### Azure OpenAI + +These variables are used when the client is configured for Azure OpenAI: + +| Variable | Purpose | +| --- | --- | +| `AZURE_OPENAI_ENDPOINT` | Azure OpenAI resource endpoint | +| `AZURE_OPENAI_BASE_URL` | Full Azure OpenAI base URL (`.../openai/v1`) | +| `AZURE_OPENAI_API_KEY` | Azure OpenAI API key | +| `AZURE_OPENAI_API_VERSION` | Azure OpenAI API version | +| `AZURE_OPENAI_DEPLOYMENT_NAME` | Generic fallback deployment | +| `AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME` | Preferred deployment for `OpenAIChatClient` | +| `AZURE_OPENAI_CHAT_DEPLOYMENT_NAME` | Preferred deployment for `OpenAIChatCompletionClient` | +| `AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME` | Preferred deployment for `OpenAIEmbeddingClient` | + +Deployment lookup order: + +- `OpenAIChatClient`: `AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME` -> `AZURE_OPENAI_DEPLOYMENT_NAME` +- `OpenAIChatCompletionClient`: `AZURE_OPENAI_CHAT_DEPLOYMENT_NAME` -> `AZURE_OPENAI_DEPLOYMENT_NAME` +- `OpenAIEmbeddingClient`: `AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME` -> `AZURE_OPENAI_DEPLOYMENT_NAME` + +When both OpenAI and Azure environment variables are present, the generic clients prefer OpenAI +when `OPENAI_API_KEY` is configured. To use Azure explicitly, pass `azure_endpoint` or +`credential`. + +## OpenAI example + +```python +from agent_framework.openai import OpenAIChatClient + +client = OpenAIChatClient(model="gpt-4.1") ``` -## Usage +## Azure OpenAI example ```python +from azure.identity.aio import AzureCliCredential + from agent_framework.openai import OpenAIChatClient -client = OpenAIChatClient(model_id="gpt-4o") + +async with AzureCliCredential() as credential: + client = OpenAIChatClient( + model="my-responses-deployment", + azure_endpoint="https://my-resource.openai.azure.com", + credential=credential, + ) ``` -When both OpenAI and Azure environment variables are present, the generic OpenAI clients prefer -OpenAI whenever `OPENAI_API_KEY` is configured. To force Azure routing, pass an explicit Azure input -such as `credential`, `azure_endpoint`, or `api_version`. +## ChatClient vs ChatCompletionClient + +Use `OpenAIChatClient` when you want the Responses API as your default chat surface. + +Use `OpenAIChatCompletionClient` when you specifically need the Chat Completions API: + +```python +from agent_framework.openai import OpenAIChatCompletionClient + +client = OpenAIChatCompletionClient(model="gpt-4o-mini") +``` From 77a17ee8006f3863c53053054f6b0bd7a3f0f662 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 17:56:00 +0100 Subject: [PATCH 17/30] clarified readme --- python/packages/openai/README.md | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/python/packages/openai/README.md b/python/packages/openai/README.md index a2d0b0cddd..e04a1f947a 100644 --- a/python/packages/openai/README.md +++ b/python/packages/openai/README.md @@ -86,13 +86,11 @@ from azure.identity.aio import AzureCliCredential from agent_framework.openai import OpenAIChatClient - -async with AzureCliCredential() as credential: - client = OpenAIChatClient( - model="my-responses-deployment", - azure_endpoint="https://my-resource.openai.azure.com", - credential=credential, - ) +client = OpenAIChatClient( + model="my-responses-deployment", + azure_endpoint="https://my-resource.openai.azure.com", + credential=AzureCliCredential(), +) ``` ## ChatClient vs ChatCompletionClient From 828c43b33e21b03472f618f528585f1f0ed79a13 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 18:50:21 +0100 Subject: [PATCH 18/30] Python: fix Azure OpenAI integration setup Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .github/workflows/python-integration-tests.yml | 1 + .github/workflows/python-merge-tests.yml | 1 + .../tests/azure_openai/test_azure_embedding_client.py | 5 +---- .../tests/openai/test_openai_embedding_client_azure.py | 5 +---- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/.github/workflows/python-integration-tests.yml b/.github/workflows/python-integration-tests.yml index d809bead5a..f372985ec7 100644 --- a/.github/workflows/python-integration-tests.yml +++ b/.github/workflows/python-integration-tests.yml @@ -101,6 +101,7 @@ jobs: AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__EMBEDDINGDEPLOYMENTNAME }} AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }} + AZURE_OPENAI_API_VERSION: ${{ vars.AZUREOPENAI__APIVERSION || '2024-12-01-preview' }} defaults: run: working-directory: python diff --git a/.github/workflows/python-merge-tests.yml b/.github/workflows/python-merge-tests.yml index 70474e1fe3..7f2e84635c 100644 --- a/.github/workflows/python-merge-tests.yml +++ b/.github/workflows/python-merge-tests.yml @@ -200,6 +200,7 @@ jobs: AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__EMBEDDINGDEPLOYMENTNAME }} AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }} + AZURE_OPENAI_API_VERSION: ${{ vars.AZUREOPENAI__APIVERSION || '2024-12-01-preview' }} defaults: run: working-directory: python diff --git a/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py b/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py index b27bc9fcd9..6e633e2f1f 100644 --- a/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py +++ b/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py @@ -108,10 +108,7 @@ def test_azure_otel_provider_name(azure_embedding_unit_test_env: None) -> None: skip_if_azure_openai_integration_tests_disabled = pytest.mark.skipif( os.getenv("AZURE_OPENAI_ENDPOINT", "") in ("", "https://test-endpoint.com") - or ( - os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "") == "" - and os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "") == "" - ), + or os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "") == "", reason="No Azure OpenAI endpoint or embedding deployment provided; skipping integration tests.", ) diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py index be832231df..ccec9caef8 100644 --- a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py @@ -17,10 +17,7 @@ skip_if_azure_openai_integration_tests_disabled = pytest.mark.skipif( os.getenv("AZURE_OPENAI_ENDPOINT", "") in ("", "https://test-endpoint.openai.azure.com") - or ( - os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "") == "" - and os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "") == "" - ), + or os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "") == "", reason="No real Azure OpenAI endpoint or embedding deployment provided; skipping integration tests.", ) From 63adedb157988856733ef18c483bb62a197d0b99 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 18:51:46 +0100 Subject: [PATCH 19/30] Python: correct Azure integration env mapping Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .github/workflows/python-integration-tests.yml | 2 +- .github/workflows/python-merge-tests.yml | 2 +- .../tests/azure_openai/test_azure_embedding_client.py | 5 ++++- .../tests/openai/test_openai_embedding_client_azure.py | 5 ++++- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/workflows/python-integration-tests.yml b/.github/workflows/python-integration-tests.yml index f372985ec7..9d4306e660 100644 --- a/.github/workflows/python-integration-tests.yml +++ b/.github/workflows/python-integration-tests.yml @@ -96,7 +96,7 @@ jobs: environment: integration timeout-minutes: 60 env: - AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} + AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__CHATDEPLOYMENTNAME }} AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__EMBEDDINGDEPLOYMENTNAME }} diff --git a/.github/workflows/python-merge-tests.yml b/.github/workflows/python-merge-tests.yml index 7f2e84635c..95963da6a7 100644 --- a/.github/workflows/python-merge-tests.yml +++ b/.github/workflows/python-merge-tests.yml @@ -195,7 +195,7 @@ jobs: runs-on: ubuntu-latest environment: integration env: - AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} + AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__CHATDEPLOYMENTNAME }} AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__EMBEDDINGDEPLOYMENTNAME }} diff --git a/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py b/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py index 6e633e2f1f..b27bc9fcd9 100644 --- a/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py +++ b/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py @@ -108,7 +108,10 @@ def test_azure_otel_provider_name(azure_embedding_unit_test_env: None) -> None: skip_if_azure_openai_integration_tests_disabled = pytest.mark.skipif( os.getenv("AZURE_OPENAI_ENDPOINT", "") in ("", "https://test-endpoint.com") - or os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "") == "", + or ( + os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "") == "" + and os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "") == "" + ), reason="No Azure OpenAI endpoint or embedding deployment provided; skipping integration tests.", ) diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py index ccec9caef8..be832231df 100644 --- a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py @@ -17,7 +17,10 @@ skip_if_azure_openai_integration_tests_disabled = pytest.mark.skipif( os.getenv("AZURE_OPENAI_ENDPOINT", "") in ("", "https://test-endpoint.openai.azure.com") - or os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "") == "", + or ( + os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "") == "" + and os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "") == "" + ), reason="No real Azure OpenAI endpoint or embedding deployment provided; skipping integration tests.", ) From 57f80092a803f9a767aea6c4ad0650f45eb4e8b5 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 20:10:53 +0100 Subject: [PATCH 20/30] updated code to fix int tests --- .github/workflows/python-integration-tests.yml | 6 +++--- .github/workflows/python-merge-tests.yml | 6 +++--- .../packages/openai/agent_framework_openai/_chat_client.py | 1 + .../agent_framework_openai/_chat_completion_client.py | 2 +- python/packages/openai/agent_framework_openai/_shared.py | 6 +++++- .../openai/tests/openai/test_openai_chat_client_azure.py | 7 ------- 6 files changed, 13 insertions(+), 15 deletions(-) diff --git a/.github/workflows/python-integration-tests.yml b/.github/workflows/python-integration-tests.yml index 9d4306e660..86fe83c261 100644 --- a/.github/workflows/python-integration-tests.yml +++ b/.github/workflows/python-integration-tests.yml @@ -99,7 +99,7 @@ jobs: AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__CHATDEPLOYMENTNAME }} AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} - AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__EMBEDDINGDEPLOYMENTNAME }} + AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME }} AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }} AZURE_OPENAI_API_VERSION: ${{ vars.AZUREOPENAI__APIVERSION || '2024-12-01-preview' }} defaults: @@ -251,8 +251,8 @@ jobs: --retries 2 --retry-delay 5 # Azure AI integration tests - python-tests-azure-ai: - name: Python Integration Tests - Azure AI + python-tests-foundry: + name: Python Integration Tests - Foundry runs-on: ubuntu-latest environment: integration timeout-minutes: 60 diff --git a/.github/workflows/python-merge-tests.yml b/.github/workflows/python-merge-tests.yml index 95963da6a7..6ca88e7a59 100644 --- a/.github/workflows/python-merge-tests.yml +++ b/.github/workflows/python-merge-tests.yml @@ -198,7 +198,7 @@ jobs: AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__CHATDEPLOYMENTNAME }} AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} - AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__EMBEDDINGDEPLOYMENTNAME }} + AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME }} AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }} AZURE_OPENAI_API_VERSION: ${{ vars.AZUREOPENAI__APIVERSION || '2024-12-01-preview' }} defaults: @@ -389,8 +389,8 @@ jobs: fail-on-empty: false title: Functions integration test results - python-tests-azure-ai: - name: Python Tests - Azure AI + python-tests-foundry: + name: Python Integration Tests - Foundry needs: paths-filter if: > github.event_name != 'pull_request' && diff --git a/python/packages/openai/agent_framework_openai/_chat_client.py b/python/packages/openai/agent_framework_openai/_chat_client.py index bbec08390b..9448142847 100644 --- a/python/packages/openai/agent_framework_openai/_chat_client.py +++ b/python/packages/openai/agent_framework_openai/_chat_client.py @@ -431,6 +431,7 @@ def __init__( env_file_encoding=env_file_encoding, openai_model_fields=("responses_model", "model"), azure_deployment_fields=("responses_deployment_name", "deployment_name"), + responses_mode=True, ) self.client = client diff --git a/python/packages/openai/agent_framework_openai/_chat_completion_client.py b/python/packages/openai/agent_framework_openai/_chat_completion_client.py index 4bddc1eea1..514d0a2991 100644 --- a/python/packages/openai/agent_framework_openai/_chat_completion_client.py +++ b/python/packages/openai/agent_framework_openai/_chat_completion_client.py @@ -84,7 +84,7 @@ logger = logging.getLogger("agent_framework.openai") -DEFAULT_AZURE_OPENAI_CHAT_COMPLETION_API_VERSION = "2024-10-21" +DEFAULT_AZURE_OPENAI_CHAT_COMPLETION_API_VERSION = "2024-12-01-preview" ResponseModelBoundT = TypeVar("ResponseModelBoundT", bound=BaseModel) ResponseModelT = TypeVar("ResponseModelT", bound=BaseModel | None, default=None) diff --git a/python/packages/openai/agent_framework_openai/_shared.py b/python/packages/openai/agent_framework_openai/_shared.py index 9f23bdfca8..340d88e8a2 100644 --- a/python/packages/openai/agent_framework_openai/_shared.py +++ b/python/packages/openai/agent_framework_openai/_shared.py @@ -194,6 +194,7 @@ def load_openai_service_settings( env_file_encoding: str | None, openai_model_fields: Sequence[OpenAIModelSettingName] = ("model",), azure_deployment_fields: Sequence[AzureDeploymentSettingName] = ("deployment_name",), + responses_mode: bool = False, ) -> tuple[dict[str, Any], AsyncOpenAI, bool]: """Load OpenAI settings, including Azure OpenAI aliases. @@ -298,7 +299,10 @@ def load_openai_service_settings( return azure_settings, client, True # type: ignore[return-value] client_args["default_headers"] = merged_headers if endpoint := azure_settings.get("endpoint"): - client_args["azure_endpoint"] = endpoint + if responses_mode: + client_args["base_url"] = f"{endpoint.rstrip('/')}/openai/v1/" + else: + client_args["azure_endpoint"] = endpoint if base_url := azure_settings.get("base_url"): client_args["base_url"] = base_url if api_key := azure_settings.get("api_key"): diff --git a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py index 21b37e8684..d952c7e92f 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py @@ -31,10 +31,6 @@ ) -def _get_azure_responses_deployment_name() -> str: - return os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") or os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"] - - class OutputStruct(BaseModel): """A structured output for testing purposes.""" @@ -51,10 +47,7 @@ def _create_azure_openai_chat_client( api_key if api_key is not None else None if credential is not None else os.environ["AZURE_OPENAI_API_KEY"] ) return OpenAIChatClient( - model=_get_azure_responses_deployment_name(), api_key=resolved_api_key, - azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], - api_version=os.getenv("AZURE_OPENAI_API_VERSION"), credential=credential, ) From 922b0107db7e68e8b1fdc6dae79460def7b8ed2d Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 20:54:10 +0100 Subject: [PATCH 21/30] test updates --- .../openai/agent_framework_openai/_shared.py | 19 +++++++++---------- .../openai/test_openai_chat_client_azure.py | 8 ++++---- ...est_openai_chat_completion_client_azure.py | 3 +-- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/python/packages/openai/agent_framework_openai/_shared.py b/python/packages/openai/agent_framework_openai/_shared.py index 340d88e8a2..feb30cc7d5 100644 --- a/python/packages/openai/agent_framework_openai/_shared.py +++ b/python/packages/openai/agent_framework_openai/_shared.py @@ -214,17 +214,16 @@ def load_openai_service_settings( azure_client = isinstance(client, AsyncAzureOpenAI) use_azure = azure_client or endpoint is not None or credential is not None checked_openai = False - openai_settings_kwargs: dict[str, Any] = { - "api_key": api_key_str, - "org_id": org_id, - "base_url": base_url, - "env_file_path": env_file_path, - "env_file_encoding": env_file_encoding, - } - if model is not None: - openai_settings_kwargs[openai_model_fields[0]] = model - openai_settings: OpenAISettings | None = None if not use_azure: + openai_settings_kwargs: dict[str, Any] = { + "api_key": api_key_str, + "org_id": org_id, + "base_url": base_url, + "env_file_path": env_file_path, + "env_file_encoding": env_file_encoding, + } + if model is not None: + openai_settings_kwargs[openai_model_fields[0]] = model openai_settings = load_settings( OpenAISettings, env_prefix="OPENAI_", diff --git a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py index d952c7e92f..c904d8191d 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py @@ -87,13 +87,13 @@ async def get_weather(location: str) -> str: def test_init_with_azure_endpoint(azure_openai_unit_test_env: dict[str, str]) -> None: - client = _create_azure_openai_chat_client() + client = _create_azure_openai_chat_client(credential=AzureCliCredential()) assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] assert isinstance(client, SupportsChatGetResponse) assert isinstance(client.client, AsyncAzureOpenAI) assert client.OTEL_PROVIDER_NAME == "azure.ai.openai" - assert client.azure_endpoint == azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"] + assert client.azure_endpoint.startswith(azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"]) assert client.api_version == azure_openai_unit_test_env["AZURE_OPENAI_API_VERSION"] @@ -195,10 +195,10 @@ async def get_token(self, *scopes: str, **kwargs: object): @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_VERSION"]], indirect=True) def test_init_uses_default_azure_api_version(azure_openai_unit_test_env: dict[str, str]) -> None: - client = _create_azure_openai_chat_client() + client = _create_azure_openai_chat_client(credential=AzureCliCredential()) assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] - assert client.api_version == "preview" + assert client.api_version is not None def test_openai_base_url_wins_over_azure_aliases(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: diff --git a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py index 3ae3b80602..22787f8092 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py @@ -182,11 +182,10 @@ async def get_token(self, *scopes: str, **kwargs: object): @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_VERSION"]], indirect=True) def test_init_uses_default_azure_api_version(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: - monkeypatch.setenv("OPENAI_API_VERSION", "preview") client = _create_azure_chat_completion_client() assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] - assert client.api_version == "2024-10-21" + assert client.api_version is not None def test_openai_base_url_wins_over_azure_aliases(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: From f6fab2d4991953ce540bb431adff1ef6c60858c8 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 20:56:28 +0100 Subject: [PATCH 22/30] test fix --- .../openai/tests/openai/test_openai_chat_client_azure.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py index c904d8191d..6a62f0b41a 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py @@ -94,7 +94,6 @@ def test_init_with_azure_endpoint(azure_openai_unit_test_env: dict[str, str]) -> assert isinstance(client.client, AsyncAzureOpenAI) assert client.OTEL_PROVIDER_NAME == "azure.ai.openai" assert client.azure_endpoint.startswith(azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"]) - assert client.api_version == azure_openai_unit_test_env["AZURE_OPENAI_API_VERSION"] def test_init_auto_detects_azure_env(azure_openai_unit_test_env: dict[str, str]) -> None: From 195dbd03ff9bb26ff733d32a01f511a4ed296bdd Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 26 Mar 2026 22:00:00 +0100 Subject: [PATCH 23/30] fix test setup --- .github/workflows/python-integration-tests.yml | 2 +- .github/workflows/python-merge-tests.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/python-integration-tests.yml b/.github/workflows/python-integration-tests.yml index 86fe83c261..2692a56a29 100644 --- a/.github/workflows/python-integration-tests.yml +++ b/.github/workflows/python-integration-tests.yml @@ -343,7 +343,7 @@ jobs: python-tests-azure-openai, python-tests-misc-integration, python-tests-functions, - python-tests-azure-ai, + python-tests-foundry, python-tests-cosmos ] steps: diff --git a/.github/workflows/python-merge-tests.yml b/.github/workflows/python-merge-tests.yml index 6ca88e7a59..c07f5ee699 100644 --- a/.github/workflows/python-merge-tests.yml +++ b/.github/workflows/python-merge-tests.yml @@ -515,7 +515,7 @@ jobs: python-tests-azure-openai, python-tests-misc-integration, python-tests-functions, - python-tests-azure-ai, + python-tests-foundry, python-tests-cosmos, ] steps: From f8e67183fea736a1603ca1029b6cca2cd7cbf793 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Fri, 27 Mar 2026 12:36:44 +0100 Subject: [PATCH 24/30] updates to tests and setup --- .../workflows/python-integration-tests.yml | 32 +- .github/workflows/python-merge-tests.yml | 30 +- python/.env.example | 8 +- python/DEV_SETUP.md | 4 +- python/README.md | 12 +- .../azure_openai/test_azure_chat_client.py | 39 + .../test_azure_embedding_client.py | 35 +- .../test_azure_responses_client.py | 222 +---- .../test_azure_responses_client_foundry.py | 182 +++++ .../azure-ai/tests/test_azure_ai_client.py | 18 +- python/packages/core/README.md | 4 +- .../agent_framework_devui/ui/assets/index.js | 7 +- python/packages/devui/dev.md | 2 +- .../components/layout/deployment-modal.tsx | 4 +- .../agent_framework_foundry/__init__.py | 7 +- .../{_foundry_agent_client.py => _agent.py} | 293 ++++++- ...foundry_chat_client.py => _chat_client.py} | 61 +- .../_entra_id_authentication.py | 67 -- .../agent_framework_foundry/_foundry_agent.py | 287 ------- ...memory_provider.py => _memory_provider.py} | 31 +- .../agent_framework_foundry/_shared.py | 49 -- .../foundry/tests/assets/sample_image.jpg | Bin 0 -> 182161 bytes .../foundry/tests/{ => foundry}/conftest.py | 0 .../tests/foundry/test_foundry_agent.py | 413 ++++++++++ .../tests/foundry/test_foundry_chat_client.py | 763 ++++++++++++++++++ .../foundry/test_foundry_memory_provider.py | 501 ++++++++++++ .../foundry/tests/test_foundry_agent.py | 374 --------- .../tests/test_foundry_memory_provider.py | 507 ------------ .../packages/lab/gaia/samples/openai_agent.py | 4 +- .../agent_framework_openai/_chat_client.py | 2 +- .../tests/openai/test_openai_chat_client.py | 164 ++-- .../openai/test_openai_chat_client_azure.py | 97 ++- ...est_openai_chat_completion_client_azure.py | 100 ++- .../test_openai_embedding_client_azure.py | 31 + .../samples/02-agents/chat_client/README.md | 4 +- python/samples/02-agents/devui/README.md | 2 +- python/samples/02-agents/mcp/README.md | 2 +- python/samples/02-agents/middleware/README.md | 2 +- .../02-agents/observability/.env.example | 4 +- .../05-end-to-end/m365-agent/.env.example | 2 +- .../05-end-to-end/m365-agent/README.md | 2 +- 41 files changed, 2616 insertions(+), 1752 deletions(-) create mode 100644 python/packages/azure-ai/tests/azure_openai/test_azure_responses_client_foundry.py rename python/packages/foundry/agent_framework_foundry/{_foundry_agent_client.py => _agent.py} (57%) rename python/packages/foundry/agent_framework_foundry/{_foundry_chat_client.py => _chat_client.py} (92%) delete mode 100644 python/packages/foundry/agent_framework_foundry/_entra_id_authentication.py delete mode 100644 python/packages/foundry/agent_framework_foundry/_foundry_agent.py rename python/packages/foundry/agent_framework_foundry/{_foundry_memory_provider.py => _memory_provider.py} (94%) delete mode 100644 python/packages/foundry/agent_framework_foundry/_shared.py create mode 100644 python/packages/foundry/tests/assets/sample_image.jpg rename python/packages/foundry/tests/{ => foundry}/conftest.py (100%) create mode 100644 python/packages/foundry/tests/foundry/test_foundry_agent.py create mode 100644 python/packages/foundry/tests/foundry/test_foundry_chat_client.py create mode 100644 python/packages/foundry/tests/foundry/test_foundry_memory_provider.py delete mode 100644 python/packages/foundry/tests/test_foundry_agent.py delete mode 100644 python/packages/foundry/tests/test_foundry_memory_provider.py diff --git a/.github/workflows/python-integration-tests.yml b/.github/workflows/python-integration-tests.yml index 2692a56a29..6066f4bab7 100644 --- a/.github/workflows/python-integration-tests.yml +++ b/.github/workflows/python-integration-tests.yml @@ -60,9 +60,8 @@ jobs: environment: integration timeout-minutes: 60 env: - OPENAI_CHAT_MODEL_ID: ${{ vars.OPENAI__CHATMODELID }} - OPENAI_RESPONSES_MODEL_ID: ${{ vars.OPENAI__RESPONSESMODELID }} - OPENAI_EMBEDDINGS_MODEL_ID: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }} + OPENAI_CHAT_MODEL: ${{ vars.OPENAI__CHATMODELID }} + OPENAI_RESPONSES_MODEL: ${{ vars.OPENAI__RESPONSESMODELID }} OPENAI_MODEL: ${{ vars.OPENAI__RESPONSESMODELID }} OPENAI_EMBEDDING_MODEL: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }} OPENAI_API_KEY: ${{ secrets.OPENAI__APIKEY }} @@ -101,7 +100,6 @@ jobs: AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME }} AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }} - AZURE_OPENAI_API_VERSION: ${{ vars.AZUREOPENAI__APIVERSION || '2024-12-01-preview' }} defaults: run: working-directory: python @@ -129,6 +127,7 @@ jobs: packages/openai/tests/openai/test_openai_chat_client_azure.py packages/openai/tests/openai/test_openai_embedding_client_azure.py packages/azure-ai/tests/azure_openai + --ignore=packages/azure-ai/tests/azure_openai/test_azure_responses_client_foundry.py -m integration -n logical --dist worksteal --timeout=120 --session-timeout=900 --timeout_method thread @@ -204,11 +203,11 @@ jobs: timeout-minutes: 60 env: UV_PYTHON: "3.11" - OPENAI_CHAT_MODEL_ID: ${{ vars.OPENAI__CHATMODELID }} - OPENAI_RESPONSES_MODEL_ID: ${{ vars.OPENAI__RESPONSESMODELID }} + OPENAI_CHAT_MODEL: ${{ vars.OPENAI__CHATMODELID }} + OPENAI_RESPONSES_MODEL: ${{ vars.OPENAI__RESPONSESMODELID }} OPENAI_MODEL: ${{ vars.OPENAI__RESPONSESMODELID }} - OPENAI_API_KEY: ${{ secrets.OPENAI__APIKEY }} OPENAI_EMBEDDING_MODEL: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }} + OPENAI_API_KEY: ${{ secrets.OPENAI__APIKEY }} AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }} AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} FOUNDRY_MODEL: ${{ vars.AZUREAI__DEPLOYMENTNAME }} @@ -250,7 +249,7 @@ jobs: --timeout=360 --session-timeout=900 --timeout_method thread --retries 2 --retry-delay 5 - # Azure AI integration tests + # Foundry integration tests python-tests-foundry: name: Python Integration Tests - Foundry runs-on: ubuntu-latest @@ -259,8 +258,10 @@ jobs: env: AZURE_AI_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }} AZURE_AI_MODEL_DEPLOYMENT_NAME: ${{ vars.AZUREAI__DEPLOYMENTNAME }} - FOUNDRY_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }} - FOUNDRY_MODEL: ${{ vars.AZUREAI__DEPLOYMENTNAME }} + FOUNDRY_PROJECT_ENDPOINT: ${{ secrets.FOUNDRY_PROJECT_ENDPOINT }} + FOUNDRY_MODEL: ${{ vars.FOUNDRY_MODEL }} + FOUNDRY_AGENT_NAME: ${{ vars.FOUNDRY_AGENT_NAME }} + FOUNDRY_AGENT_VERSION: ${{ vars.FOUNDRY_AGENT_VERSION }} LOCAL_MCP_URL: ${{ vars.LOCAL_MCP__URL }} defaults: run: @@ -284,9 +285,14 @@ jobs: subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} - name: Test with pytest timeout-minutes: 15 - run: | - uv run --directory packages/azure-ai poe integration-tests -n logical --dist worksteal --timeout=120 --session-timeout=900 --timeout_method thread --retries 2 --retry-delay 5 - uv run --directory packages/foundry poe integration-tests -n logical --dist worksteal --timeout=120 --session-timeout=900 --timeout_method thread --retries 2 --retry-delay 5 + run: > + uv run pytest --import-mode=importlib + packages/azure-ai/tests/azure_openai/test_azure_responses_client_foundry.py + packages/foundry/tests + -m integration + -n logical --dist worksteal + --timeout=120 --session-timeout=900 --timeout_method thread + --retries 2 --retry-delay 5 # Azure Cosmos integration tests python-tests-cosmos: diff --git a/.github/workflows/python-merge-tests.yml b/.github/workflows/python-merge-tests.yml index c07f5ee699..f90e193c26 100644 --- a/.github/workflows/python-merge-tests.yml +++ b/.github/workflows/python-merge-tests.yml @@ -141,9 +141,8 @@ jobs: runs-on: ubuntu-latest environment: integration env: - OPENAI_CHAT_MODEL_ID: ${{ vars.OPENAI__CHATMODELID }} - OPENAI_RESPONSES_MODEL_ID: ${{ vars.OPENAI__RESPONSESMODELID }} - OPENAI_EMBEDDINGS_MODEL_ID: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }} + OPENAI_CHAT_MODEL: ${{ vars.OPENAI__CHATMODELID }} + OPENAI_RESPONSES_MODEL: ${{ vars.OPENAI__RESPONSESMODELID }} OPENAI_MODEL: ${{ vars.OPENAI__RESPONSESMODELID }} OPENAI_EMBEDDING_MODEL: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }} OPENAI_API_KEY: ${{ secrets.OPENAI__APIKEY }} @@ -200,7 +199,6 @@ jobs: AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME }} AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }} - AZURE_OPENAI_API_VERSION: ${{ vars.AZUREOPENAI__APIVERSION || '2024-12-01-preview' }} defaults: run: working-directory: python @@ -226,6 +224,7 @@ jobs: packages/openai/tests/openai/test_openai_chat_client_azure.py packages/openai/tests/openai/test_openai_embedding_client_azure.py packages/azure-ai/tests/azure_openai + --ignore=packages/azure-ai/tests/azure_openai/test_azure_responses_client_foundry.py -m integration -n logical --dist worksteal --timeout=120 --session-timeout=900 --timeout_method thread @@ -335,11 +334,11 @@ jobs: environment: integration env: UV_PYTHON: "3.11" - OPENAI_CHAT_MODEL_ID: ${{ vars.OPENAI__CHATMODELID }} - OPENAI_RESPONSES_MODEL_ID: ${{ vars.OPENAI__RESPONSESMODELID }} + OPENAI_CHAT_MODEL: ${{ vars.OPENAI__CHATMODELID }} + OPENAI_RESPONSES_MODEL: ${{ vars.OPENAI__RESPONSESMODELID }} OPENAI_MODEL: ${{ vars.OPENAI__RESPONSESMODELID }} - OPENAI_API_KEY: ${{ secrets.OPENAI__APIKEY }} OPENAI_EMBEDDING_MODEL: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }} + OPENAI_API_KEY: ${{ secrets.OPENAI__APIKEY }} AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }} AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} FOUNDRY_MODEL: ${{ vars.AZUREAI__DEPLOYMENTNAME }} @@ -403,8 +402,10 @@ jobs: env: AZURE_AI_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }} AZURE_AI_MODEL_DEPLOYMENT_NAME: ${{ vars.AZUREAI__DEPLOYMENTNAME }} - FOUNDRY_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }} - FOUNDRY_MODEL: ${{ vars.AZUREAI__DEPLOYMENTNAME }} + FOUNDRY_PROJECT_ENDPOINT: ${{ secrets.FOUNDRY_PROJECT_ENDPOINT }} + FOUNDRY_MODEL: ${{ vars.FOUNDRY_MODEL }} + FOUNDRY_AGENT_NAME: ${{ vars.FOUNDRY_AGENT_NAME }} + FOUNDRY_AGENT_VERSION: ${{ vars.FOUNDRY_AGENT_VERSION }} LOCAL_MCP_URL: ${{ vars.LOCAL_MCP__URL }} defaults: run: @@ -426,9 +427,14 @@ jobs: subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} - name: Test with pytest timeout-minutes: 15 - run: | - uv run --directory packages/azure-ai poe integration-tests -n logical --dist worksteal --timeout=120 --session-timeout=900 --timeout_method thread --retries 2 --retry-delay 5 - uv run --directory packages/foundry poe integration-tests -n logical --dist worksteal --timeout=120 --session-timeout=900 --timeout_method thread --retries 2 --retry-delay 5 + run: > + uv run pytest --import-mode=importlib + packages/azure-ai/tests/azure_openai/test_azure_responses_client_foundry.py + packages/foundry/tests + -m integration + -n logical --dist worksteal + --timeout=120 --session-timeout=900 --timeout_method thread + --retries 2 --retry-delay 5 working-directory: ./python - name: Test Azure AI samples timeout-minutes: 10 diff --git a/python/.env.example b/python/.env.example index c09300d775..4e7ba727e5 100644 --- a/python/.env.example +++ b/python/.env.example @@ -1,6 +1,6 @@ # Azure AI -AZURE_AI_PROJECT_ENDPOINT="" -AZURE_AI_MODEL_DEPLOYMENT_NAME="" +FOUNDRY_PROJECT_ENDPOINT="" +FOUNDRY_MODEL="" # Bing connection for web search (optional, used by samples with web search) BING_CONNECTION_ID="" # Azure AI Search (optional, used by AzureAISearchContextProvider samples) @@ -13,8 +13,8 @@ AZURE_SEARCH_KNOWLEDGE_BASE_NAME="" # (different from AZURE_AI_PROJECT_ENDPOINT - Knowledge Base needs OpenAI endpoint for model calls) # OpenAI OPENAI_API_KEY="" -OPENAI_CHAT_MODEL_ID="" -OPENAI_RESPONSES_MODEL_ID="" +OPENAI_CHAT_MODEL="" +OPENAI_RESPONSES_MODEL="" # Azure OpenAI AZURE_OPENAI_ENDPOINT="" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="" diff --git a/python/DEV_SETUP.md b/python/DEV_SETUP.md index d90e29226d..dbddbaac93 100644 --- a/python/DEV_SETUP.md +++ b/python/DEV_SETUP.md @@ -108,10 +108,10 @@ Content of `.env` or `openai.env`: ```env OPENAI_API_KEY="" -OPENAI_CHAT_MODEL_ID="gpt-4o-mini" +OPENAI_MODEL="gpt-4o-mini" ``` -You will then configure the ChatClient class with the keyword argument `env_file_path`: +You will then configure the ChatClient class with the keyword argument `env_file_path` (alternatively you can use `load_dotenv` in your code): ```python from agent_framework.openai import OpenAIChatClient diff --git a/python/README.md b/python/README.md index 32462ee0a2..0a3042992f 100644 --- a/python/README.md +++ b/python/README.md @@ -47,7 +47,7 @@ Set as environment variables, or create a .env file at your project root: ```bash OPENAI_API_KEY=sk-... -OPENAI_CHAT_MODEL_ID=... +OPENAI_MODEL=... ... AZURE_OPENAI_API_KEY=... AZURE_OPENAI_ENDPOINT=... @@ -60,7 +60,7 @@ FOUNDRY_MODEL=... For the generic OpenAI clients (`OpenAIChatClient` and `OpenAIChatCompletionClient`), configuration resolves in this order: -1. Explicit Azure inputs such as `credential`, `azure_endpoint`, or `api_version` +1. Explicit Azure inputs such as `credential` or `azure_endpoint` 2. `OPENAI_API_KEY` / explicit OpenAI API-key parameters 3. Azure environment fallback such as `AZURE_OPENAI_ENDPOINT` and `AZURE_OPENAI_API_KEY` @@ -70,12 +70,12 @@ pass an explicit Azure input such as `credential=AzureCliCredential()`. You can also override environment variables by explicitly passing configuration parameters to the chat client constructor: ```python -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.openai import OpenAIChatClient -client = AzureOpenAIChatClient( +client = OpenAIChatClient( api_key='', - endpoint='', - deployment_name='', + azure_endpoint='', + model='', api_version='', ) ``` diff --git a/python/packages/azure-ai/tests/azure_openai/test_azure_chat_client.py b/python/packages/azure-ai/tests/azure_openai/test_azure_chat_client.py index 22e0a20d96..c1485d430d 100644 --- a/python/packages/azure-ai/tests/azure_openai/test_azure_chat_client.py +++ b/python/packages/azure-ai/tests/azure_openai/test_azure_chat_client.py @@ -2,6 +2,8 @@ import json import os +from functools import wraps +from typing import Any from unittest.mock import AsyncMock, MagicMock, patch import openai @@ -33,6 +35,8 @@ from openai.types.chat.chat_completion_chunk import ChoiceDelta as ChunkChoiceDelta from openai.types.chat.chat_completion_message import ChatCompletionMessage +pytestmark = pytest.mark.filterwarnings("ignore:AzureOpenAIChatClient is deprecated\\..*:DeprecationWarning") + # region Service Setup skip_if_azure_integration_tests_disabled = pytest.mark.skipif( @@ -41,6 +45,32 @@ ) +def _with_azure_openai_debug() -> Any: + def decorator(func: Any) -> Any: + @wraps(func) + async def wrapper(*args: Any, **kwargs: Any) -> Any: + try: + return await func(*args, **kwargs) + except Exception as exc: + model = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME") or os.getenv( + "AZURE_OPENAI_DEPLOYMENT_NAME", "" + ) + api_version = os.getenv("AZURE_OPENAI_API_VERSION", "") + endpoint = os.getenv("AZURE_OPENAI_ENDPOINT", "") + debug_message = f"Azure OpenAI debug: endpoint={endpoint}, model={model}, api_version={api_version}" + if hasattr(exc, "add_note"): + exc.add_note(debug_message) + elif exc.args: + exc.args = (f"{exc.args[0]}\n{debug_message}", *exc.args[1:]) + else: + exc.args = (debug_message,) + raise + + return wrapper + + return decorator + + def test_init(azure_openai_unit_test_env: dict[str, str]) -> None: # Test successful initialization azure_chat_client = AzureOpenAIChatClient() @@ -820,6 +850,7 @@ def get_weather(location: str) -> str: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_client_response() -> None: """Test Azure OpenAI chat completion responses.""" azure_chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) @@ -851,6 +882,7 @@ async def test_azure_openai_chat_client_response() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_client_response_tools() -> None: """Test AzureOpenAI chat completion responses.""" azure_chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) @@ -873,6 +905,7 @@ async def test_azure_openai_chat_client_response_tools() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_client_streaming() -> None: """Test Azure OpenAI chat completion responses.""" azure_chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) @@ -909,6 +942,7 @@ async def test_azure_openai_chat_client_streaming() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_client_streaming_tools() -> None: """Test AzureOpenAI chat completion responses.""" azure_chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) @@ -937,6 +971,7 @@ async def test_azure_openai_chat_client_streaming_tools() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_client_agent_basic_run(): """Test Azure OpenAI chat client agent basic run functionality with AzureOpenAIChatClient.""" async with Agent( @@ -954,6 +989,7 @@ async def test_azure_openai_chat_client_agent_basic_run(): @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_client_agent_basic_run_streaming(): """Test Azure OpenAI chat client agent basic streaming functionality with AzureOpenAIChatClient.""" async with Agent( @@ -976,6 +1012,7 @@ async def test_azure_openai_chat_client_agent_basic_run_streaming(): @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_client_agent_session_persistence(): """Test Azure OpenAI chat client agent session persistence across runs with AzureOpenAIChatClient.""" async with Agent( @@ -1002,6 +1039,7 @@ async def test_azure_openai_chat_client_agent_session_persistence(): @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_client_agent_existing_session(): """Test Azure OpenAI chat client agent with existing session to continue conversations across agent instances.""" # First conversation - capture the session @@ -1038,6 +1076,7 @@ async def test_azure_openai_chat_client_agent_existing_session(): @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_chat_client_agent_level_tool_persistence(): """Test that agent-level tools persist across multiple runs with Azure Chat Client.""" diff --git a/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py b/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py index b27bc9fcd9..a172be577f 100644 --- a/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py +++ b/python/packages/azure-ai/tests/azure_openai/test_azure_embedding_client.py @@ -3,16 +3,20 @@ from __future__ import annotations import os +from functools import wraps +from typing import Any from unittest.mock import AsyncMock, MagicMock import pytest from agent_framework.azure import AzureOpenAIEmbeddingClient -from agent_framework_openai import OpenAIEmbeddingOptions +from agent_framework.openai import OpenAIEmbeddingOptions from azure.identity.aio import AzureCliCredential from openai.types import CreateEmbeddingResponse from openai.types import Embedding as OpenAIEmbedding from openai.types.create_embedding_response import Usage +pytestmark = pytest.mark.filterwarnings("ignore:AzureOpenAIEmbeddingClient is deprecated\\..*:DeprecationWarning") + def _make_openai_response( embeddings: list[list[float]], @@ -116,6 +120,32 @@ def test_azure_otel_provider_name(azure_embedding_unit_test_env: None) -> None: ) +def _with_azure_openai_debug() -> Any: + def decorator(func: Any) -> Any: + @wraps(func) + async def wrapper(*args: Any, **kwargs: Any) -> Any: + try: + return await func(*args, **kwargs) + except Exception as exc: + model = os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME") or os.getenv( + "AZURE_OPENAI_DEPLOYMENT_NAME", "" + ) + api_version = os.getenv("AZURE_OPENAI_API_VERSION", "") + endpoint = os.getenv("AZURE_OPENAI_ENDPOINT", "") + debug_message = f"Azure OpenAI debug: endpoint={endpoint}, model={model}, api_version={api_version}" + if hasattr(exc, "add_note"): + exc.add_note(debug_message) + elif exc.args: + exc.args = (f"{exc.args[0]}\n{debug_message}", *exc.args[1:]) + else: + exc.args = (debug_message,) + raise + + return wrapper + + return decorator + + def _get_azure_embedding_deployment_name() -> str: return os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME") or os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"] @@ -140,6 +170,7 @@ def _create_azure_openai_embedding_client( @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_azure_openai_get_embeddings() -> None: """End-to-end test of Azure OpenAI embedding generation.""" async with AzureCliCredential() as credential: @@ -159,6 +190,7 @@ async def test_integration_azure_openai_get_embeddings() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_azure_openai_get_embeddings_multiple() -> None: """Test Azure OpenAI embedding generation for multiple inputs.""" async with AzureCliCredential() as credential: @@ -174,6 +206,7 @@ async def test_integration_azure_openai_get_embeddings_multiple() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_azure_openai_get_embeddings_with_dimensions() -> None: """Test Azure OpenAI embedding generation with custom dimensions.""" async with AzureCliCredential() as credential: diff --git a/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client.py b/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client.py index 65e9629b96..92951f820e 100644 --- a/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client.py +++ b/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client.py @@ -3,9 +3,9 @@ import json import logging import os +from functools import wraps from pathlib import Path from typing import Annotated, Any -from unittest.mock import MagicMock import pytest from agent_framework import ( @@ -22,11 +22,40 @@ from pydantic import BaseModel from pytest import param +pytestmark = pytest.mark.filterwarnings("ignore:AzureOpenAIResponsesClient is deprecated\\..*:DeprecationWarning") + skip_if_azure_integration_tests_disabled = pytest.mark.skipif( os.getenv("AZURE_OPENAI_ENDPOINT", "") in ("", "https://test-endpoint.com"), reason="No real AZURE_OPENAI_ENDPOINT provided; skipping integration tests.", ) + +def _with_azure_openai_debug() -> Any: + def decorator(func: Any) -> Any: + @wraps(func) + async def wrapper(*args: Any, **kwargs: Any) -> Any: + try: + return await func(*args, **kwargs) + except Exception as exc: + model = os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") or os.getenv( + "AZURE_OPENAI_DEPLOYMENT_NAME", "" + ) + api_version = os.getenv("AZURE_OPENAI_API_VERSION", "") + endpoint = os.getenv("AZURE_OPENAI_ENDPOINT", "") + debug_message = f"Azure OpenAI debug: endpoint={endpoint}, model={model}, api_version={api_version}" + if hasattr(exc, "add_note"): + exc.add_note(debug_message) + elif exc.args: + exc.args = (f"{exc.args[0]}\n{debug_message}", *exc.args[1:]) + else: + exc.args = (debug_message,) + raise + + return wrapper + + return decorator + + logger = logging.getLogger(__name__) @@ -141,119 +170,6 @@ def test_init_with_empty_model_id(azure_openai_unit_test_env: dict[str, str]) -> AzureOpenAIResponsesClient() -def test_init_with_project_client(azure_openai_unit_test_env: dict[str, str]) -> None: - """Test initialization with an existing AIProjectClient.""" - from unittest.mock import patch - - from openai import AsyncOpenAI - - # Create a mock AIProjectClient that returns a mock AsyncOpenAI client - mock_openai_client = MagicMock(spec=AsyncOpenAI) - mock_openai_client.default_headers = {} - - mock_project_client = MagicMock() - mock_project_client.get_openai_client.return_value = mock_openai_client - - with patch( - "agent_framework_azure_ai._deprecated_azure_openai.AzureOpenAIResponsesClient._create_client_from_project", - return_value=mock_openai_client, - ): - azure_responses_client = AzureOpenAIResponsesClient( - project_client=mock_project_client, - deployment_name="gpt-4o", - ) - - assert azure_responses_client.model == "gpt-4o" - assert azure_responses_client.client is mock_openai_client - assert isinstance(azure_responses_client, SupportsChatGetResponse) - - -def test_init_with_project_endpoint(azure_openai_unit_test_env: dict[str, str]) -> None: - """Test initialization with a project endpoint and credential.""" - from unittest.mock import patch - - from openai import AsyncOpenAI - - mock_openai_client = MagicMock(spec=AsyncOpenAI) - mock_openai_client.default_headers = {} - - with patch( - "agent_framework_azure_ai._deprecated_azure_openai.AzureOpenAIResponsesClient._create_client_from_project", - return_value=mock_openai_client, - ): - azure_responses_client = AzureOpenAIResponsesClient( - project_endpoint="https://test-project.services.ai.azure.com", - deployment_name="gpt-4o", - credential=AzureCliCredential(), - ) - - assert azure_responses_client.model == "gpt-4o" - assert azure_responses_client.client is mock_openai_client - assert isinstance(azure_responses_client, SupportsChatGetResponse) - - -def test_create_client_from_project_with_project_client() -> None: - """Test _create_client_from_project with an existing project client.""" - from openai import AsyncOpenAI - - mock_openai_client = MagicMock(spec=AsyncOpenAI) - mock_project_client = MagicMock() - mock_project_client.get_openai_client.return_value = mock_openai_client - - result = AzureOpenAIResponsesClient._create_client_from_project( - project_client=mock_project_client, - project_endpoint=None, - credential=None, - ) - - assert result is mock_openai_client - mock_project_client.get_openai_client.assert_called_once() - - -def test_create_client_from_project_with_endpoint() -> None: - """Test _create_client_from_project with a project endpoint.""" - from unittest.mock import patch - - from openai import AsyncOpenAI - - mock_openai_client = MagicMock(spec=AsyncOpenAI) - mock_credential = MagicMock() - - with patch("agent_framework_azure_ai._deprecated_azure_openai.AIProjectClient") as MockAIProjectClient: - mock_instance = MockAIProjectClient.return_value - mock_instance.get_openai_client.return_value = mock_openai_client - - result = AzureOpenAIResponsesClient._create_client_from_project( - project_client=None, - project_endpoint="https://test-project.services.ai.azure.com", - credential=mock_credential, - ) - - assert result is mock_openai_client - MockAIProjectClient.assert_called_once() - mock_instance.get_openai_client.assert_called_once() - - -def test_create_client_from_project_missing_endpoint() -> None: - """Test _create_client_from_project raises error when endpoint is missing.""" - with pytest.raises(ValueError, match="project endpoint is required"): - AzureOpenAIResponsesClient._create_client_from_project( - project_client=None, - project_endpoint=None, - credential=MagicMock(), - ) - - -def test_create_client_from_project_missing_credential() -> None: - """Test _create_client_from_project raises error when credential is missing.""" - with pytest.raises(ValueError, match="credential is required"): - AzureOpenAIResponsesClient._create_client_from_project( - project_client=None, - project_endpoint="https://test-project.services.ai.azure.com", - credential=None, - ) - - def test_serialize(azure_openai_unit_test_env: dict[str, str]) -> None: default_headers = {"X-Unit-Test": "test-guid"} @@ -285,8 +201,6 @@ def test_serialize(azure_openai_unit_test_env: dict[str, str]) -> None: "option_name,option_value,needs_validation", [ # Simple ChatOptions - just verify they don't fail - param("temperature", 0.7, False, id="temperature"), - param("top_p", 0.9, False, id="top_p"), param("max_tokens", 500, False, id="max_tokens"), param("seed", 123, False, id="seed"), param("user", "test-user-id", False, id="user"), @@ -299,7 +213,6 @@ def test_serialize(azure_openai_unit_test_env: dict[str, str]) -> None: # OpenAIResponsesOptions - just verify they don't fail param("safety_identifier", "user-hash-abc123", False, id="safety_identifier"), param("truncation", "auto", False, id="truncation"), - param("top_logprobs", 5, False, id="top_logprobs"), param("prompt_cache_key", "test-cache-key", False, id="prompt_cache_key"), param("max_tool_calls", 3, False, id="max_tool_calls"), # Complex options requiring output validation @@ -343,6 +256,7 @@ def test_serialize(azure_openai_unit_test_env: dict[str, str]) -> None: ), ], ) +@_with_azure_openai_debug() async def test_integration_options( option_name: str, option_value: Any, @@ -425,6 +339,7 @@ async def test_integration_options( @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_web_search() -> None: client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) @@ -479,6 +394,7 @@ async def test_integration_web_search() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_client_file_search() -> None: """Test Azure responses client with file search tool.""" azure_responses_client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) @@ -509,6 +425,7 @@ async def test_integration_client_file_search() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_client_file_search_streaming() -> None: """Test Azure responses client with file search tool and streaming.""" azure_responses_client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) @@ -541,6 +458,7 @@ async def test_integration_client_file_search_streaming() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_client_agent_hosted_mcp_tool() -> None: """Integration test for MCP tool with Azure Response Agent using Microsoft Learn MCP.""" client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) @@ -566,6 +484,7 @@ async def test_integration_client_agent_hosted_mcp_tool() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_client_agent_hosted_code_interpreter_tool(): """Test Azure Responses Client agent with code interpreter tool.""" client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) @@ -591,6 +510,7 @@ async def test_integration_client_agent_hosted_code_interpreter_tool(): @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_client_agent_existing_session(): """Test Azure Responses Client agent with existing session to continue conversations across agent instances.""" # First conversation - capture the session @@ -627,6 +547,7 @@ async def test_integration_client_agent_existing_session(): @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_responses_client_tool_rich_content_image() -> None: """Test that Azure OpenAI Responses client can handle tool results containing images.""" image_path = Path(__file__).parent.parent / "assets" / "sample_image.jpg" @@ -660,70 +581,3 @@ def get_test_image() -> Content: assert len(response.text) > 0 # sample_image.jpg contains a photo of a house; the model should mention it. assert "house" in response.text.lower(), f"Model did not describe the house image. Response: {response.text}" - - -# region Integration with Foundry V2 - - -skip_if_azure_ai_integration_tests_disabled = pytest.mark.skipif( - os.getenv("AZURE_AI_PROJECT_ENDPOINT", "") in ("", "https://test-project.cognitiveservices.azure.com/") - or os.getenv("AZURE_AI_MODEL", "") == "", - reason="No real AZURE_AI_PROJECT_ENDPOINT or AZURE_AI_MODEL provided; skipping integration tests.", -) - - -@pytest.mark.flaky -@pytest.mark.integration -@skip_if_azure_ai_integration_tests_disabled -async def test_integration_function_call_roundtrip_preserves_fidelity(): - """Test that function calls roundtrip correctly with full fidelity preserved. - - This verifies the changes where: - 1. raw_representation is preserved when parsing function calls - 2. fc_id and status are included in additional_properties - 3. When re-sending messages, the full object fidelity is preserved - """ - call_count = 0 - - @tool(name="get_weather", approval_mode="never_require") - async def get_weather_tool(location: str) -> str: - """Get weather for a location.""" - nonlocal call_count - call_count += 1 - return f"Weather in {location} is sunny, 72F" - - client = AzureOpenAIResponsesClient( - project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], - deployment_name=os.environ["AZURE_AI_MODEL"], - credential=AzureCliCredential(), - ) - - async with Agent( - client=client, - name="WeatherAgent", - instructions="You help check weather. Use get_weather when asked about weather.", - tools=[get_weather_tool], - default_options={"store": False}, # Store messages locally to test fidelity across messages - ) as agent: - session = agent.create_session() - - # First request - should invoke the tool - response1 = await agent.run("What is the weather in Seattle?", session=session) - - assert response1 is not None - assert response1.text is not None - assert call_count >= 1 - - # Verify the response contains expected content - response_text = response1.text.lower() - assert "seattle" in response_text or "sunny" in response_text or "72" in response_text - - # Second request - should work correctly with the preserved conversation - response2 = await agent.run("And how about in Portland?", session=session) - - assert response2 is not None - assert response2.text is not None - assert call_count >= 2 - - -# endregion diff --git a/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client_foundry.py b/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client_foundry.py new file mode 100644 index 0000000000..f44dae280b --- /dev/null +++ b/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client_foundry.py @@ -0,0 +1,182 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os +import warnings +from unittest.mock import MagicMock + +import pytest +from agent_framework import Agent, SupportsChatGetResponse, tool + +warnings.filterwarnings( + "ignore", + message=r"RawAzureAIClient is deprecated\..*", + category=DeprecationWarning, +) + +from agent_framework.azure import AzureOpenAIResponsesClient # noqa: E402 +from azure.identity import AzureCliCredential # noqa: E402 + +pytestmark = pytest.mark.filterwarnings("ignore:AzureOpenAIResponsesClient is deprecated\\..*:DeprecationWarning") + +skip_if_foundry_integration_tests_disabled = pytest.mark.skipif( + os.getenv("FOUNDRY_PROJECT_ENDPOINT", "") == "" or os.getenv("FOUNDRY_MODEL", "") == "", + reason="No real FOUNDRY_PROJECT_ENDPOINT or FOUNDRY_MODEL provided; skipping integration tests.", +) + + +def test_init_with_project_client(azure_openai_unit_test_env: dict[str, str]) -> None: + """Test initialization with an existing AIProjectClient.""" + from unittest.mock import patch + + from openai import AsyncOpenAI + + # Create a mock AIProjectClient that returns a mock AsyncOpenAI client + mock_openai_client = MagicMock(spec=AsyncOpenAI) + mock_openai_client.default_headers = {} + + mock_project_client = MagicMock() + mock_project_client.get_openai_client.return_value = mock_openai_client + + with patch( + "agent_framework_azure_ai._deprecated_azure_openai.AzureOpenAIResponsesClient._create_client_from_project", + return_value=mock_openai_client, + ): + azure_responses_client = AzureOpenAIResponsesClient( + project_client=mock_project_client, + deployment_name="gpt-4o", + ) + + assert azure_responses_client.model == "gpt-4o" + assert azure_responses_client.client is mock_openai_client + assert isinstance(azure_responses_client, SupportsChatGetResponse) + + +def test_init_with_project_endpoint(azure_openai_unit_test_env: dict[str, str]) -> None: + """Test initialization with a project endpoint and credential.""" + from unittest.mock import patch + + from openai import AsyncOpenAI + + mock_openai_client = MagicMock(spec=AsyncOpenAI) + mock_openai_client.default_headers = {} + + with patch( + "agent_framework_azure_ai._deprecated_azure_openai.AzureOpenAIResponsesClient._create_client_from_project", + return_value=mock_openai_client, + ): + azure_responses_client = AzureOpenAIResponsesClient( + project_endpoint="https://test-project.services.ai.azure.com", + deployment_name="gpt-4o", + credential=AzureCliCredential(), + ) + + assert azure_responses_client.model == "gpt-4o" + assert azure_responses_client.client is mock_openai_client + assert isinstance(azure_responses_client, SupportsChatGetResponse) + + +def test_create_client_from_project_with_project_client() -> None: + """Test _create_client_from_project with an existing project client.""" + from openai import AsyncOpenAI + + mock_openai_client = MagicMock(spec=AsyncOpenAI) + mock_project_client = MagicMock() + mock_project_client.get_openai_client.return_value = mock_openai_client + + result = AzureOpenAIResponsesClient._create_client_from_project( + project_client=mock_project_client, + project_endpoint=None, + credential=None, + ) + + assert result is mock_openai_client + mock_project_client.get_openai_client.assert_called_once() + + +def test_create_client_from_project_with_endpoint() -> None: + """Test _create_client_from_project with a project endpoint.""" + from unittest.mock import patch + + from openai import AsyncOpenAI + + mock_openai_client = MagicMock(spec=AsyncOpenAI) + mock_credential = MagicMock() + + with patch("agent_framework_azure_ai._deprecated_azure_openai.AIProjectClient") as MockAIProjectClient: + mock_instance = MockAIProjectClient.return_value + mock_instance.get_openai_client.return_value = mock_openai_client + + result = AzureOpenAIResponsesClient._create_client_from_project( + project_client=None, + project_endpoint="https://test-project.services.ai.azure.com", + credential=mock_credential, + ) + + assert result is mock_openai_client + MockAIProjectClient.assert_called_once() + mock_instance.get_openai_client.assert_called_once() + + +def test_create_client_from_project_missing_endpoint() -> None: + """Test _create_client_from_project raises error when endpoint is missing.""" + with pytest.raises(ValueError, match="project endpoint is required"): + AzureOpenAIResponsesClient._create_client_from_project( + project_client=None, + project_endpoint=None, + credential=MagicMock(), + ) + + +def test_create_client_from_project_missing_credential() -> None: + """Test _create_client_from_project raises error when credential is missing.""" + with pytest.raises(ValueError, match="credential is required"): + AzureOpenAIResponsesClient._create_client_from_project( + project_client=None, + project_endpoint="https://test-project.services.ai.azure.com", + credential=None, + ) + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_foundry_integration_tests_disabled +async def test_integration_function_call_roundtrip_preserves_fidelity() -> None: + """Test that function calls roundtrip correctly with full fidelity preserved.""" + call_count = 0 + + @tool(name="get_weather", approval_mode="never_require") + async def get_weather_tool(location: str) -> str: + """Get weather for a location.""" + nonlocal call_count + call_count += 1 + return f"Weather in {location} is sunny, 72F" + + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + deployment_name=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + async with Agent( + client=client, + name="WeatherAgent", + instructions="You help check weather. Use get_weather when asked about weather.", + tools=[get_weather_tool], + default_options={"store": False}, + ) as agent: + session = agent.create_session() + + response1 = await agent.run("What is the weather in Seattle?", session=session) + + assert response1 is not None + assert response1.text is not None + assert call_count >= 1 + + response_text = response1.text.lower() + assert "seattle" in response_text or "sunny" in response_text or "72" in response_text + + response2 = await agent.run("And how about in Portland?", session=session) + + assert response2 is not None + assert response2.text is not None + assert call_count >= 2 diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py index f0d8cbc430..f3e459d0a4 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -3,6 +3,7 @@ import json import os import sys +import warnings from collections.abc import AsyncGenerator, AsyncIterator from contextlib import asynccontextmanager from typing import Annotated, Any @@ -41,8 +42,21 @@ from pydantic import BaseModel, ConfigDict, Field from pytest import fixture -from agent_framework_azure_ai import AzureAIClient, AzureAISettings -from agent_framework_azure_ai._shared import from_azure_ai_tools +from agent_framework_azure_ai import AzureAIClient, AzureAISettings # noqa: E402 +from agent_framework_azure_ai._shared import from_azure_ai_tools # noqa: E402 + +warnings.filterwarnings( + "ignore", + message=r"RawAzureAIClient is deprecated\..*", + category=DeprecationWarning, +) +warnings.filterwarnings( + "ignore", + message=r"AzureAIClient is deprecated\..*", + category=DeprecationWarning, +) + +pytestmark = pytest.mark.filterwarnings("ignore:AzureAIClient is deprecated\\..*:DeprecationWarning") @pytest.fixture diff --git a/python/packages/core/README.md b/python/packages/core/README.md index 9d88cc4556..36f0f6e02c 100644 --- a/python/packages/core/README.md +++ b/python/packages/core/README.md @@ -29,8 +29,8 @@ Set as environment variables, or create a .env file at your project root: ```bash OPENAI_API_KEY=sk-... -OPENAI_CHAT_MODEL_ID=... -OPENAI_RESPONSES_MODEL_ID=... +OPENAI_CHAT_MODEL=... +OPENAI_RESPONSES_MODEL=... ... AZURE_OPENAI_API_KEY=... AZURE_OPENAI_ENDPOINT=... diff --git a/python/packages/devui/agent_framework_devui/ui/assets/index.js b/python/packages/devui/agent_framework_devui/ui/assets/index.js index 6387ada58a..a71e62397f 100644 --- a/python/packages/devui/agent_framework_devui/ui/assets/index.js +++ b/python/packages/devui/agent_framework_devui/ui/assets/index.js @@ -481,7 +481,7 @@ services: environment: # OpenAI - OPENAI_API_KEY=\${OPENAI_API_KEY} - - OPENAI_CHAT_MODEL_ID=\${OPENAI_CHAT_MODEL_ID:-gpt-4o-mini} + - OPENAI_CHAT_MODEL=\${OPENAI_CHAT_MODEL:-gpt-4o-mini} # Or Azure OpenAI - AZURE_OPENAI_API_KEY=\${AZURE_OPENAI_API_KEY} - AZURE_OPENAI_ENDPOINT=\${AZURE_OPENAI_ENDPOINT} @@ -514,7 +514,10 @@ az acr build --registry myregistry \\ --target-port 8080 \\ --ingress 'external' \\ --registry-server myregistry.azurecr.io \\ - --env-vars OPENAI_API_KEY=secretref:openai-key OPENAI_CHAT_MODEL_ID=gpt-4o-mini`})]}),o.jsxs("div",{className:"border-l-2 border-primary pl-3",children:[o.jsxs("div",{className:"flex items-center gap-2 mb-1",children:[o.jsx("div",{className:"w-5 h-5 rounded-full bg-primary text-primary-foreground flex items-center justify-center text-xs font-bold",children:"5"}),o.jsx("h5",{className:"font-medium text-sm",children:"Get Application URL"})]}),o.jsx("pre",{className:"bg-muted p-2 rounded text-xs overflow-x-auto border mt-2",children:`az containerapp show --name ${r.toLowerCase()}-app \\ + --env-vars OPENAI_API_KEY=secretref:openai-key OPENAI_CHAT_MODEL=gpt-4o-mini`})] + }), o.jsxs("div", { + className: "border-l-2 border-primary pl-3", children: [o.jsxs("div", { className: "flex items-center gap-2 mb-1", children: [o.jsx("div", { className: "w-5 h-5 rounded-full bg-primary text-primary-foreground flex items-center justify-center text-xs font-bold", children: "5" }), o.jsx("h5", { className: "font-medium text-sm", children: "Get Application URL" })] }), o.jsx("pre", { + className: "bg-muted p-2 rounded text-xs overflow-x-auto border mt-2", children: `az containerapp show --name ${r.toLowerCase()}-app \\ --resource-group myResourceGroup \\ --query properties.configuration.ingress.fqdn`})]})]})]}),o.jsxs("div",{className:"bg-blue-50 dark:bg-blue-950/50 border border-blue-200 dark:border-blue-800 rounded-md p-3",children:[o.jsx("h4",{className:"text-sm font-semibold mb-2",children:"Learn More"}),o.jsx("p",{className:"text-xs text-muted-foreground mb-3",children:"Explore Azure Container Apps documentation for advanced features like scaling, monitoring, and CI/CD integration."}),o.jsx(Le,{size:"sm",variant:"outline",className:"w-full",asChild:!0,children:o.jsxs("a",{href:"https://learn.microsoft.com/azure/container-apps/",target:"_blank",rel:"noopener noreferrer",children:[o.jsx(Hu,{className:"h-3 w-3 mr-1"}),"View Azure Container Apps Documentation"]})})]})]})]})]})})})]})})}function tD({className:e,...n}){return o.jsx("div",{"data-slot":"card",className:We("bg-card text-card-foreground flex flex-col gap-6 rounded border py-6 shadow-sm",e),...n})}function nD({className:e,...n}){return o.jsx("div",{"data-slot":"card-header",className:We("@container/card-header grid auto-rows-min grid-rows-[auto_auto] items-start gap-1.5 px-6 has-data-[slot=card-action]:grid-cols-[1fr_auto] [.border-b]:pb-6",e),...n})}function N2({className:e,...n}){return o.jsx("div",{"data-slot":"card-title",className:We("leading-none font-semibold",e),...n})}function sD({className:e,...n}){return o.jsx("div",{"data-slot":"card-description",className:We("text-muted-foreground text-sm",e),...n})}function rD({className:e,...n}){return o.jsx("div",{"data-slot":"card-content",className:We("px-6",e),...n})}function oD({className:e,...n}){return o.jsx("div",{"data-slot":"card-footer",className:We("flex items-center px-6 [.border-t]:pt-6",e),...n})}const Cr=[{id:"foundry-weather-agent",name:"Azure AI Weather Agent",description:"Weather agent using Azure AI Agent (Foundry) with Azure CLI authentication",type:"agent",url:"https://raw.githubusercontent.com/microsoft/agent-framework/main/python/samples/02-agents/devui/foundry_agent/agent.py",tags:["azure-ai","foundry","tools"],author:"Microsoft",difficulty:"beginner",features:["Azure AI Agent integration","Azure CLI authentication","Mock weather tools"],requiredEnvVars:[{name:"AZURE_AI_PROJECT_ENDPOINT",description:"Azure AI Foundry project endpoint URL",required:!0,example:"https://your-project.api.azureml.ms"},{name:"FOUNDRY_MODEL_DEPLOYMENT_NAME",description:"Name of the deployed model in Azure AI Foundry",required:!0,example:"gpt-4o"}]},{id:"weather-agent-azure",name:"Azure OpenAI Weather Agent",description:"Weather agent using Azure OpenAI with API key authentication",type:"agent",url:"https://raw.githubusercontent.com/microsoft/agent-framework/main/python/samples/02-agents/devui/weather_agent_azure/agent.py",tags:["azure","openai","tools"],author:"Microsoft",difficulty:"beginner",features:["Azure OpenAI integration","API key authentication","Function calling","Mock weather tools"],requiredEnvVars:[{name:"AZURE_OPENAI_API_KEY",description:"Azure OpenAI API key",required:!0},{name:"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME",description:"Name of the deployed model in Azure OpenAI",required:!0,example:"gpt-4o"},{name:"AZURE_OPENAI_ENDPOINT",description:"Azure OpenAI endpoint URL",required:!0,example:"https://your-resource.openai.azure.com"}]},{id:"spam-workflow",name:"Spam Detection Workflow",description:"5-step workflow demonstrating email spam detection with branching logic",type:"workflow",url:"https://raw.githubusercontent.com/microsoft/agent-framework/main/python/samples/02-agents/devui/spam_workflow/workflow.py",tags:["workflow","branching","multi-step"],author:"Microsoft",difficulty:"beginner",features:["Sequential execution","Conditional branching","Mock spam detection"]},{id:"fanout-workflow",name:"Complex Fan-In/Fan-Out Workflow",description:"Advanced data processing workflow with parallel validation, transformation, and quality assurance stages",type:"workflow",url:"https://raw.githubusercontent.com/microsoft/agent-framework/main/python/samples/02-agents/devui/fanout_workflow/workflow.py",tags:["workflow","fan-out","fan-in","parallel"],author:"Microsoft",difficulty:"advanced",features:["Fan-out pattern","Parallel execution","Complex state management","Multi-stage processing"]}];Cr.filter(e=>e.type==="agent"),Cr.filter(e=>e.type==="workflow"),Cr.filter(e=>e.difficulty==="beginner"),Cr.filter(e=>e.difficulty==="intermediate"),Cr.filter(e=>e.difficulty==="advanced");const aD=e=>{switch(e){case"beginner":return"bg-green-100 text-green-700 border-green-200";case"intermediate":return"bg-yellow-100 text-yellow-700 border-yellow-200";case"advanced":return"bg-red-100 text-red-700 border-red-200";default:return"bg-gray-100 text-gray-700 border-gray-200"}},j2=w.forwardRef(({className:e,...n},r)=>o.jsx("div",{ref:r,role:"alert",className:We("relative w-full rounded-lg border p-4 [&>svg~*]:pl-7 [&>svg+div]:translate-y-[-3px] [&>svg]:absolute [&>svg]:left-4 [&>svg]:top-4 [&>svg]:text-foreground",e),...n}));j2.displayName="Alert";const S2=w.forwardRef(({className:e,...n},r)=>o.jsx("h5",{ref:r,className:We("mb-1 font-medium leading-none tracking-tight",e),...n}));S2.displayName="AlertTitle";const _2=w.forwardRef(({className:e,...n},r)=>o.jsx("div",{ref:r,className:We("text-sm [&_p]:leading-relaxed",e),...n}));_2.displayName="AlertDescription";function E2({children:e,copyable:n=!1}){const[r,a]=w.useState(!1),l=()=>{navigator.clipboard.writeText(e),a(!0),setTimeout(()=>a(!1),2e3)};return o.jsxs("div",{className:"relative",children:[o.jsx("pre",{className:"bg-muted p-3 rounded-md text-sm overflow-x-auto font-mono",children:o.jsx("code",{children:e})}),n&&o.jsx(Le,{variant:"ghost",size:"sm",className:"absolute top-2 right-2 h-6 w-6 p-0",onClick:l,children:r?o.jsx(jo,{className:"h-3 w-3"}):o.jsx(uo,{className:"h-3 w-3"})})]})}function iu({number:e,title:n,description:r,code:a,action:l,copyable:c=!1}){return o.jsxs("div",{className:"flex gap-4",children:[o.jsx("div",{className:"flex-shrink-0",children:o.jsx("div",{className:"flex h-8 w-8 items-center justify-center rounded-full bg-primary text-primary-foreground font-semibold",children:e})}),o.jsxs("div",{className:"flex-1 space-y-2",children:[o.jsx("h4",{className:"font-semibold",children:n}),r&&o.jsx("p",{className:"text-sm text-muted-foreground",children:r}),a&&o.jsx(E2,{copyable:c,children:a}),l&&o.jsx("div",{children:l})]})]})}function iD({sample:e,open:n,onOpenChange:r}){const a=e.requiredEnvVars&&e.requiredEnvVars.length>0,l=a?0:-1;return o.jsx(Ir,{open:n,onOpenChange:r,children:o.jsxs(Lr,{className:"max-w-3xl",children:[o.jsxs($r,{className:"px-6 pt-6 pb-2",children:[o.jsxs(Pr,{children:["Setup: ",e.name]}),o.jsxs(OR,{children:["Follow these steps to run this sample ",e.type," locally"]})]}),o.jsx("div",{className:"px-6 pb-6",children:o.jsx(Wn,{className:"h-[500px]",children:o.jsxs("div",{className:"space-y-6 pr-4",children:[o.jsx(iu,{number:1,title:"Download the sample file",action:o.jsx(Le,{asChild:!0,size:"sm",children:o.jsxs("a",{href:e.url,download:`${e.id}.py`,target:"_blank",rel:"noopener noreferrer",children:[o.jsx(Pu,{className:"h-4 w-4 mr-2"}),"Download ",e.id,".py"]})})}),o.jsx(iu,{number:2,title:"Create a project folder",description:"Create a dedicated folder for this sample and move the downloaded file there:",code:`mkdir -p ~/my-agents/${e.id} mv ~/Downloads/${e.id}.py ~/my-agents/${e.id}/`,copyable:!0}),a&&o.jsx(iu,{number:3,title:"Set up environment variables",description:"Create a .env file in the project folder with these required variables:",code:e.requiredEnvVars.map(c=>`${c.name}=${c.example||"your-value-here"} diff --git a/python/packages/devui/dev.md b/python/packages/devui/dev.md index 5a4166112d..0566e75429 100644 --- a/python/packages/devui/dev.md +++ b/python/packages/devui/dev.md @@ -33,7 +33,7 @@ Then edit `.env` and add your API keys: ```bash # For OpenAI (minimum required) OPENAI_API_KEY="your-api-key-here" -OPENAI_CHAT_MODEL_ID="gpt-4o-mini" +OPENAI_CHAT_MODEL="gpt-4o-mini" # Or for Azure OpenAI AZURE_OPENAI_ENDPOINT="your-endpoint" diff --git a/python/packages/devui/frontend/src/components/layout/deployment-modal.tsx b/python/packages/devui/frontend/src/components/layout/deployment-modal.tsx index 5a90a5350a..dd5ae81180 100644 --- a/python/packages/devui/frontend/src/components/layout/deployment-modal.tsx +++ b/python/packages/devui/frontend/src/components/layout/deployment-modal.tsx @@ -243,7 +243,7 @@ services: environment: # OpenAI - OPENAI_API_KEY=\${OPENAI_API_KEY} - - OPENAI_CHAT_MODEL_ID=\${OPENAI_CHAT_MODEL_ID:-gpt-4o-mini} + - OPENAI_CHAT_MODEL=\${OPENAI_CHAT_MODEL:-gpt-4o-mini} # Or Azure OpenAI - AZURE_OPENAI_API_KEY=\${AZURE_OPENAI_API_KEY} - AZURE_OPENAI_ENDPOINT=\${AZURE_OPENAI_ENDPOINT} @@ -802,7 +802,7 @@ az acr build --registry myregistry \\ --target-port 8080 \\ --ingress 'external' \\ --registry-server myregistry.azurecr.io \\ - --env-vars OPENAI_API_KEY=secretref:openai-key OPENAI_CHAT_MODEL_ID=gpt-4o-mini`} + --env-vars OPENAI_API_KEY=secretref:openai-key OPENAI_CHAT_MODEL=gpt-4o-mini`} diff --git a/python/packages/foundry/agent_framework_foundry/__init__.py b/python/packages/foundry/agent_framework_foundry/__init__.py index 3f7a5d5095..50c500ad4e 100644 --- a/python/packages/foundry/agent_framework_foundry/__init__.py +++ b/python/packages/foundry/agent_framework_foundry/__init__.py @@ -2,10 +2,9 @@ import importlib.metadata -from ._foundry_agent import FoundryAgent, RawFoundryAgent -from ._foundry_agent_client import RawFoundryAgentChatClient -from ._foundry_chat_client import FoundryChatClient, FoundryChatOptions, RawFoundryChatClient -from ._foundry_memory_provider import FoundryMemoryProvider +from ._agent import FoundryAgent, RawFoundryAgent, RawFoundryAgentChatClient +from ._chat_client import FoundryChatClient, FoundryChatOptions, RawFoundryChatClient +from ._memory_provider import FoundryMemoryProvider try: __version__ = importlib.metadata.version(__name__) diff --git a/python/packages/foundry/agent_framework_foundry/_foundry_agent_client.py b/python/packages/foundry/agent_framework_foundry/_agent.py similarity index 57% rename from python/packages/foundry/agent_framework_foundry/_foundry_agent_client.py rename to python/packages/foundry/agent_framework_foundry/_agent.py index 0976d5572a..67c6f6070d 100644 --- a/python/packages/foundry/agent_framework_foundry/_foundry_agent_client.py +++ b/python/packages/foundry/agent_framework_foundry/_agent.py @@ -1,30 +1,37 @@ # Copyright (c) Microsoft. All rights reserved. -"""Microsoft Foundry Agent client for connecting to pre-configured agents in Foundry. +"""Microsoft Foundry Agent for connecting to pre-configured agents in Foundry. -This module provides ``RawFoundryAgentClient`` and ``FoundryAgentClient`` for -communicating with PromptAgents and HostedAgents via the Responses API. +This module provides ``RawFoundryAgent`` and ``FoundryAgent`` — Agent subclasses +that connect to existing PromptAgents or HostedAgents in Foundry. Use +``FoundryAgent`` for the recommended experience with full middleware and telemetry. """ from __future__ import annotations import logging import sys -from collections.abc import Callable, Mapping, MutableMapping, Sequence +from collections.abc import Awaitable, Callable, Mapping, MutableMapping, Sequence from typing import TYPE_CHECKING, Any, ClassVar, Generic, cast -from agent_framework._middleware import ChatMiddlewareLayer -from agent_framework._settings import load_settings -from agent_framework._telemetry import AGENT_FRAMEWORK_USER_AGENT -from agent_framework._tools import FunctionInvocationConfiguration, FunctionInvocationLayer, FunctionTool -from agent_framework._types import Message -from agent_framework.observability import ChatTelemetryLayer +from agent_framework import ( + AGENT_FRAMEWORK_USER_AGENT, + AgentMiddlewareLayer, + BaseContextProvider, + ChatAndFunctionMiddlewareTypes, + ChatMiddlewareLayer, + FunctionInvocationConfiguration, + FunctionInvocationLayer, + FunctionTool, + Message, + RawAgent, + load_settings, +) +from agent_framework.observability import AgentTelemetryLayer, ChatTelemetryLayer from agent_framework_openai._chat_client import OpenAIChatOptions, RawOpenAIChatClient from azure.ai.projects.aio import AIProjectClient - -from ._entra_id_authentication import AzureCredentialTypes - -logger: logging.Logger = logging.getLogger(__name__) +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential if sys.version_info >= (3, 13): from typing import TypeVar # type: ignore # pragma: no cover @@ -33,22 +40,25 @@ if sys.version_info >= (3, 12): from typing import override # type: ignore # pragma: no cover else: - from typing_extensions import override # type: ignore # pragma: no cover + from typing_extensions import override # type: ignore[import] # pragma: no cover if sys.version_info >= (3, 11): from typing import TypedDict # type: ignore # pragma: no cover else: from typing_extensions import TypedDict # type: ignore # pragma: no cover if TYPE_CHECKING: - from agent_framework import Agent, BaseContextProvider - from agent_framework._middleware import ( - ChatMiddleware, - ChatMiddlewareCallable, - FunctionMiddleware, - FunctionMiddlewareCallable, + from agent_framework import ( + Agent, + BaseContextProvider, + ChatAndFunctionMiddlewareTypes, MiddlewareTypes, + ToolTypes, ) - from agent_framework._tools import ToolTypes + +logger: logging.Logger = logging.getLogger("agent_framework.foundry") + +AzureTokenProvider = Callable[[], str | Awaitable[str]] +AzureCredentialTypes = TokenCredential | AsyncTokenCredential class FoundryAgentSettings(TypedDict, total=False): @@ -203,8 +213,6 @@ def as_agent( **kwargs: Any, ) -> Agent[FoundryAgentOptionsT]: """Create a FoundryAgent that reuses this client's Foundry configuration.""" - from ._foundry_agent import FoundryAgent - function_tools = cast( FunctionTool | Callable[..., Any] | Sequence[FunctionTool | Callable[..., Any]] | None, tools, @@ -359,9 +367,7 @@ def __init__( allow_preview: bool | None = None, env_file_path: str | None = None, env_file_encoding: str | None = None, - middleware: ( - Sequence[ChatMiddleware | ChatMiddlewareCallable | FunctionMiddleware | FunctionMiddlewareCallable] | None - ) = None, + middleware: (Sequence[ChatAndFunctionMiddlewareTypes] | None) = None, function_invocation_configuration: FunctionInvocationConfiguration | None = None, **kwargs: Any, ) -> None: @@ -393,3 +399,236 @@ def __init__( function_invocation_configuration=function_invocation_configuration, **kwargs, ) + + +class RawFoundryAgent( # type: ignore[misc] + RawAgent[FoundryAgentOptionsT], +): + """Raw Microsoft Foundry Agent without agent-level middleware or telemetry. + + Connects to an existing PromptAgent or HostedAgent in Foundry. + For full middleware and telemetry support, use :class:`FoundryAgent`. + + Examples: + .. code-block:: python + + from agent_framework.foundry import RawFoundryAgent + from azure.identity import AzureCliCredential + + agent = RawFoundryAgent( + project_endpoint="https://your-project.services.ai.azure.com", + agent_name="my-prompt-agent", + agent_version="1.0", + credential=AzureCliCredential(), + ) + result = await agent.run("Hello!") + """ + + def __init__( + self, + *, + project_endpoint: str | None = None, + agent_name: str | None = None, + agent_version: str | None = None, + credential: AzureCredentialTypes | None = None, + project_client: AIProjectClient | None = None, + allow_preview: bool | None = None, + tools: FunctionTool | Callable[..., Any] | Sequence[FunctionTool | Callable[..., Any]] | None = None, + context_providers: Sequence[BaseContextProvider] | None = None, + client_type: type[RawFoundryAgentChatClient] | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + **kwargs: Any, + ) -> None: + """Initialize a Foundry Agent. + + Keyword Args: + project_endpoint: The Foundry project endpoint URL. + Can also be set via environment variable FOUNDRY_PROJECT_ENDPOINT. + agent_name: The name of the Foundry agent to connect to. + Can also be set via environment variable FOUNDRY_AGENT_NAME. + agent_version: The version of the agent (required for PromptAgents, optional for HostedAgents). + Can also be set via environment variable FOUNDRY_AGENT_VERSION. + credential: Azure credential for authentication. + project_client: An existing AIProjectClient to use. + allow_preview: Enables preview opt-in on internally-created AIProjectClient. + tools: Function tools to provide to the agent. Only ``FunctionTool`` objects are accepted. + context_providers: Optional context providers for injecting dynamic context. + client_type: Custom client class to use (must be a subclass of ``RawFoundryAgentChatClient``). + Defaults to ``_FoundryAgentChatClient`` (full client middleware). + env_file_path: Path to .env file for settings. + env_file_encoding: Encoding for .env file. + kwargs: Additional keyword arguments passed to the Agent base class. + """ + # Create the client + actual_client_type = client_type or _FoundryAgentChatClient + if not issubclass(actual_client_type, RawFoundryAgentChatClient): + raise TypeError( + f"client_type must be a subclass of RawFoundryAgentChatClient, got {actual_client_type.__name__}" + ) + + client = actual_client_type( + project_endpoint=project_endpoint, + agent_name=agent_name, + agent_version=agent_version, + credential=credential, + project_client=project_client, + allow_preview=allow_preview, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + + super().__init__( + client=client, # type: ignore[arg-type] + tools=tools, # type: ignore[arg-type] + context_providers=context_providers, + **kwargs, + ) + + async def configure_azure_monitor( + self, + enable_sensitive_data: bool = False, + **kwargs: Any, + ) -> None: + """Setup observability with Azure Monitor (Microsoft Foundry integration). + + This method configures Azure Monitor for telemetry collection using the + connection string from the Foundry project client (accessed via the internal client). + + Args: + enable_sensitive_data: Enable sensitive data logging (prompts, responses). + Should only be enabled in development/test environments. Default is False. + **kwargs: Additional arguments passed to configure_azure_monitor(). + + Raises: + ImportError: If azure-monitor-opentelemetry-exporter is not installed. + """ + from azure.core.exceptions import ResourceNotFoundError + + client = self.client + if not isinstance(client, RawFoundryAgentChatClient): + raise TypeError("configure_azure_monitor requires a RawFoundryAgentChatClient-based client.") + + try: + conn_string = await client.project_client.telemetry.get_application_insights_connection_string() + except ResourceNotFoundError: + logger.warning( + "No Application Insights connection string found for the Foundry project. " + "Please ensure Application Insights is configured in your project, " + "or call configure_otel_providers() manually with custom exporters." + ) + return + + try: + from azure.monitor.opentelemetry import configure_azure_monitor # type: ignore[import] + except ImportError as exc: + raise ImportError( + "azure-monitor-opentelemetry is required for Azure Monitor integration. " + "Install it with: pip install azure-monitor-opentelemetry" + ) from exc + + from agent_framework.observability import create_metric_views, create_resource, enable_instrumentation + + if "resource" not in kwargs: + kwargs["resource"] = create_resource() + + configure_azure_monitor( + connection_string=conn_string, + views=create_metric_views(), + **kwargs, + ) + + enable_instrumentation(enable_sensitive_data=enable_sensitive_data) + + +class FoundryAgent( # type: ignore[misc] + AgentMiddlewareLayer, + AgentTelemetryLayer, + RawFoundryAgent[FoundryAgentOptionsT], +): + """Microsoft Foundry Agent with full middleware and telemetry support. + + Connects to an existing PromptAgent or HostedAgent in Foundry. + This is the recommended class for production use. + + Examples: + .. code-block:: python + + from agent_framework.foundry import FoundryAgent + from azure.identity import AzureCliCredential + + # Connect to a PromptAgent + agent = FoundryAgent( + project_endpoint="https://your-project.services.ai.azure.com", + agent_name="my-prompt-agent", + agent_version="1.0", + credential=AzureCliCredential(), + tools=[my_function_tool], + ) + result = await agent.run("Hello!") + + # Connect to a HostedAgent (no version needed) + agent = FoundryAgent( + project_endpoint="https://your-project.services.ai.azure.com", + agent_name="my-hosted-agent", + credential=AzureCliCredential(), + ) + + # Custom client (e.g., raw client without client middleware) + agent = FoundryAgent( + project_endpoint="https://your-project.services.ai.azure.com", + agent_name="my-agent", + credential=AzureCliCredential(), + client_type=RawFoundryAgentChatClient, + ) + """ + + def __init__( + self, + *, + project_endpoint: str | None = None, + agent_name: str | None = None, + agent_version: str | None = None, + credential: AzureCredentialTypes | None = None, + project_client: AIProjectClient | None = None, + allow_preview: bool | None = None, + tools: FunctionTool | Callable[..., Any] | Sequence[FunctionTool | Callable[..., Any]] | None = None, + context_providers: Sequence[BaseContextProvider] | None = None, + middleware: Sequence[MiddlewareTypes] | None = None, + client_type: type[RawFoundryAgentChatClient] | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + **kwargs: Any, + ) -> None: + """Initialize a Foundry Agent with full middleware and telemetry. + + Keyword Args: + project_endpoint: The Foundry project endpoint URL. + agent_name: The name of the Foundry agent to connect to. + agent_version: The version of the agent (for PromptAgents). + credential: Azure credential for authentication. + project_client: An existing AIProjectClient to use. + allow_preview: Enables preview opt-in on internally-created AIProjectClient. + tools: Function tools to provide to the agent. Only ``FunctionTool`` objects are accepted. + context_providers: Optional context providers. + middleware: Optional agent-level middleware. + client_type: Custom client class (must subclass ``RawFoundryAgentChatClient``). + env_file_path: Path to .env file for settings. + env_file_encoding: Encoding for .env file. + kwargs: Additional keyword arguments. + """ + super().__init__( + project_endpoint=project_endpoint, + agent_name=agent_name, + agent_version=agent_version, + credential=credential, + project_client=project_client, + allow_preview=allow_preview, + tools=tools, + context_providers=context_providers, + middleware=middleware, + client_type=client_type, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + **kwargs, + ) diff --git a/python/packages/foundry/agent_framework_foundry/_foundry_chat_client.py b/python/packages/foundry/agent_framework_foundry/_chat_client.py similarity index 92% rename from python/packages/foundry/agent_framework_foundry/_foundry_chat_client.py rename to python/packages/foundry/agent_framework_foundry/_chat_client.py index 8397f29639..51d1b96bb3 100644 --- a/python/packages/foundry/agent_framework_foundry/_foundry_chat_client.py +++ b/python/packages/foundry/agent_framework_foundry/_chat_client.py @@ -4,14 +4,17 @@ import logging import sys -from collections.abc import Sequence +from collections.abc import Awaitable, Callable, Sequence from typing import TYPE_CHECKING, Any, ClassVar, Generic, Literal -from agent_framework._middleware import ChatMiddlewareLayer -from agent_framework._settings import load_settings -from agent_framework._telemetry import AGENT_FRAMEWORK_USER_AGENT -from agent_framework._tools import FunctionInvocationConfiguration, FunctionInvocationLayer -from agent_framework._types import Content +from agent_framework import ( + AGENT_FRAMEWORK_USER_AGENT, + ChatMiddlewareLayer, + Content, + FunctionInvocationConfiguration, + FunctionInvocationLayer, + load_settings, +) from agent_framework.observability import ChatTelemetryLayer from agent_framework_openai._chat_client import OpenAIChatOptions, RawOpenAIChatClient from azure.ai.projects.aio import AIProjectClient @@ -25,9 +28,8 @@ ) from azure.ai.projects.models import FileSearchTool as ProjectsFileSearchTool from azure.ai.projects.models import MCPTool as FoundryMCPTool - -from ._entra_id_authentication import AzureCredentialTypes, AzureTokenProvider -from ._shared import resolve_file_ids +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential if sys.version_info >= (3, 13): from typing import TypeVar # type: ignore # pragma: no cover @@ -43,15 +45,13 @@ from typing_extensions import TypedDict # type: ignore # pragma: no cover if TYPE_CHECKING: - from agent_framework._middleware import ( - ChatMiddleware, - ChatMiddlewareCallable, - FunctionMiddleware, - FunctionMiddlewareCallable, - ) + from agent_framework import ChatAndFunctionMiddlewareTypes logger: logging.Logger = logging.getLogger("agent_framework.foundry") +AzureTokenProvider = Callable[[], str | Awaitable[str]] +AzureCredentialTypes = TokenCredential | AsyncTokenCredential + class FoundrySettings(TypedDict, total=False): """Settings for Microsoft FoundryChatClient resolved from args and environment. @@ -67,6 +67,33 @@ class FoundrySettings(TypedDict, total=False): project_endpoint: str | None +def resolve_file_ids(file_ids: Sequence[str | Content] | None) -> list[str] | None: + """Resolve file IDs from strings or hosted-file Content objects.""" + if not file_ids: + return None + + resolved: list[str] = [] + for item in file_ids: + if isinstance(item, str): + if not item: + raise ValueError("file_ids must not contain empty strings.") + resolved.append(item) + elif isinstance(item, Content): + if item.type != "hosted_file": + raise ValueError( + f"Unsupported Content type {item.type!r} for code interpreter file_ids. " + "Only Content.from_hosted_file() is supported." + ) + if item.file_id is None: + raise ValueError( + "Content.from_hosted_file() item is missing a file_id. " + "Ensure the Content object has a valid file_id before using it in file_ids." + ) + resolved.append(item.file_id) + + return resolved if resolved else None + + FoundryChatOptionsT = TypeVar( "FoundryChatOptionsT", bound=TypedDict, # type: ignore[valid-type] @@ -492,9 +519,7 @@ def __init__( env_file_path: str | None = None, env_file_encoding: str | None = None, instruction_role: str | None = None, - middleware: ( - Sequence[ChatMiddleware | ChatMiddlewareCallable | FunctionMiddleware | FunctionMiddlewareCallable] | None - ) = None, + middleware: (Sequence[ChatAndFunctionMiddlewareTypes] | None) = None, function_invocation_configuration: FunctionInvocationConfiguration | None = None, **kwargs: Any, ) -> None: diff --git a/python/packages/foundry/agent_framework_foundry/_entra_id_authentication.py b/python/packages/foundry/agent_framework_foundry/_entra_id_authentication.py deleted file mode 100644 index b1ae8a4739..0000000000 --- a/python/packages/foundry/agent_framework_foundry/_entra_id_authentication.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from __future__ import annotations - -import logging -from collections.abc import Awaitable, Callable -from typing import Union - -from agent_framework.exceptions import ChatClientInvalidAuthException -from azure.core.credentials import TokenCredential -from azure.core.credentials_async import AsyncTokenCredential - -logger: logging.Logger = logging.getLogger(__name__) - -AzureTokenProvider = Callable[[], Union[str, Awaitable[str]]] -"""A callable that returns a bearer token string, either synchronously or asynchronously.""" - -AzureCredentialTypes = Union[TokenCredential, AsyncTokenCredential] -"""Union of Azure credential types. - -Accepts: -- ``TokenCredential`` — synchronous Azure credential (e.g. ``DefaultAzureCredential()``) -- ``AsyncTokenCredential`` — asynchronous Azure credential (e.g. ``azure.identity.aio.DefaultAzureCredential()``) -""" - - -def resolve_credential_to_token_provider( - credential: AzureCredentialTypes | AzureTokenProvider, - token_endpoint: str | None, -) -> AzureTokenProvider: - """Convert an Azure credential or token provider into an ``ad_token_provider`` callable. - - If the credential is already a callable token provider, it is returned as-is - (``token_endpoint`` is not required in this case). - If it is a ``TokenCredential`` or ``AsyncTokenCredential``, it is wrapped using - ``azure.identity.get_bearer_token_provider`` (sync or async variant) which - handles token caching and automatic refresh. - - Args: - credential: An Azure credential or token provider callable. - token_endpoint: The token scope/endpoint - (e.g. ``"https://cognitiveservices.azure.com/.default"``). - Required when ``credential`` is a ``TokenCredential`` or ``AsyncTokenCredential``. - - Returns: - A callable that returns a bearer token string (sync or async). - - Raises: - ServiceInvalidAuthError: If the token endpoint is empty when needed for credential wrapping. - """ - # Already a token provider callable (not a credential object) — use directly - if callable(credential) and not isinstance(credential, (TokenCredential, AsyncTokenCredential)): - return credential - - if not token_endpoint: - raise ChatClientInvalidAuthException( - "A token endpoint must be provided either in settings, as an environment variable, or as an argument." - ) - - if isinstance(credential, AsyncTokenCredential): - from azure.identity.aio import get_bearer_token_provider as get_async_bearer_token_provider - - return get_async_bearer_token_provider(credential, token_endpoint) - - from azure.identity import get_bearer_token_provider - - return get_bearer_token_provider(credential, token_endpoint) # type: ignore[arg-type] diff --git a/python/packages/foundry/agent_framework_foundry/_foundry_agent.py b/python/packages/foundry/agent_framework_foundry/_foundry_agent.py deleted file mode 100644 index 19c298eaf8..0000000000 --- a/python/packages/foundry/agent_framework_foundry/_foundry_agent.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Microsoft Foundry Agent for connecting to pre-configured agents in Foundry. - -This module provides ``RawFoundryAgent`` and ``FoundryAgent`` — Agent subclasses -that connect to existing PromptAgents or HostedAgents in Foundry. Use -``FoundryAgent`` for the recommended experience with full middleware and telemetry. -""" - -from __future__ import annotations - -import logging -import sys -from collections.abc import Callable, Sequence -from typing import TYPE_CHECKING, Any - -from agent_framework import ( - AgentMiddlewareLayer, - BaseContextProvider, - RawAgent, -) -from agent_framework.observability import AgentTelemetryLayer -from azure.ai.projects.aio import AIProjectClient - -from ._entra_id_authentication import AzureCredentialTypes -from ._foundry_agent_client import ( - RawFoundryAgentChatClient, - _FoundryAgentChatClient, # pyright: ignore[reportPrivateUsage] -) - -if sys.version_info >= (3, 13): - from typing import TypeVar # type: ignore # pragma: no cover -else: - from typing_extensions import TypeVar # type: ignore # pragma: no cover -if sys.version_info >= (3, 11): - from typing import TypedDict # type: ignore # pragma: no cover -else: - from typing_extensions import TypedDict # type: ignore # pragma: no cover - -if TYPE_CHECKING: - from agent_framework._middleware import MiddlewareTypes - from agent_framework._tools import FunctionTool - from agent_framework_openai._chat_client import OpenAIChatOptions - -logger: logging.Logger = logging.getLogger("agent_framework.foundry") - -FoundryAgentOptionsT = TypeVar( - "FoundryAgentOptionsT", - bound=TypedDict, # type: ignore[valid-type] - default="OpenAIChatOptions", - covariant=True, -) - - -class RawFoundryAgent( # type: ignore[misc] - RawAgent[FoundryAgentOptionsT], -): - """Raw Microsoft Foundry Agent without agent-level middleware or telemetry. - - Connects to an existing PromptAgent or HostedAgent in Foundry. - For full middleware and telemetry support, use :class:`FoundryAgent`. - - Examples: - .. code-block:: python - - from agent_framework.foundry import RawFoundryAgent - from azure.identity import AzureCliCredential - - agent = RawFoundryAgent( - project_endpoint="https://your-project.services.ai.azure.com", - agent_name="my-prompt-agent", - agent_version="1.0", - credential=AzureCliCredential(), - ) - result = await agent.run("Hello!") - """ - - def __init__( - self, - *, - project_endpoint: str | None = None, - agent_name: str | None = None, - agent_version: str | None = None, - credential: AzureCredentialTypes | None = None, - project_client: AIProjectClient | None = None, - allow_preview: bool | None = None, - tools: FunctionTool | Callable[..., Any] | Sequence[FunctionTool | Callable[..., Any]] | None = None, - context_providers: Sequence[BaseContextProvider] | None = None, - client_type: type[RawFoundryAgentChatClient] | None = None, - env_file_path: str | None = None, - env_file_encoding: str | None = None, - **kwargs: Any, - ) -> None: - """Initialize a Foundry Agent. - - Keyword Args: - project_endpoint: The Foundry project endpoint URL. - Can also be set via environment variable FOUNDRY_PROJECT_ENDPOINT. - agent_name: The name of the Foundry agent to connect to. - Can also be set via environment variable FOUNDRY_AGENT_NAME. - agent_version: The version of the agent (required for PromptAgents, optional for HostedAgents). - Can also be set via environment variable FOUNDRY_AGENT_VERSION. - credential: Azure credential for authentication. - project_client: An existing AIProjectClient to use. - allow_preview: Enables preview opt-in on internally-created AIProjectClient. - tools: Function tools to provide to the agent. Only ``FunctionTool`` objects are accepted. - context_providers: Optional context providers for injecting dynamic context. - client_type: Custom client class to use (must be a subclass of ``RawFoundryAgentChatClient``). - Defaults to ``_FoundryAgentChatClient`` (full client middleware). - env_file_path: Path to .env file for settings. - env_file_encoding: Encoding for .env file. - kwargs: Additional keyword arguments passed to the Agent base class. - """ - # Create the client - actual_client_type = client_type or _FoundryAgentChatClient - if not issubclass(actual_client_type, RawFoundryAgentChatClient): - raise TypeError( - f"client_type must be a subclass of RawFoundryAgentChatClient, got {actual_client_type.__name__}" - ) - - client = actual_client_type( - project_endpoint=project_endpoint, - agent_name=agent_name, - agent_version=agent_version, - credential=credential, - project_client=project_client, - allow_preview=allow_preview, - env_file_path=env_file_path, - env_file_encoding=env_file_encoding, - ) - - super().__init__( - client=client, # type: ignore[arg-type] - tools=tools, # type: ignore[arg-type] - context_providers=context_providers, - **kwargs, - ) - - async def configure_azure_monitor( - self, - enable_sensitive_data: bool = False, - **kwargs: Any, - ) -> None: - """Setup observability with Azure Monitor (Microsoft Foundry integration). - - This method configures Azure Monitor for telemetry collection using the - connection string from the Foundry project client (accessed via the internal client). - - Args: - enable_sensitive_data: Enable sensitive data logging (prompts, responses). - Should only be enabled in development/test environments. Default is False. - **kwargs: Additional arguments passed to configure_azure_monitor(). - - Raises: - ImportError: If azure-monitor-opentelemetry-exporter is not installed. - """ - from azure.core.exceptions import ResourceNotFoundError - - from ._foundry_agent_client import RawFoundryAgentChatClient - - client = self.client - if not isinstance(client, RawFoundryAgentChatClient): - raise TypeError("configure_azure_monitor requires a RawFoundryAgentChatClient-based client.") - - try: - conn_string = await client.project_client.telemetry.get_application_insights_connection_string() - except ResourceNotFoundError: - logger.warning( - "No Application Insights connection string found for the Foundry project. " - "Please ensure Application Insights is configured in your project, " - "or call configure_otel_providers() manually with custom exporters." - ) - return - - try: - from azure.monitor.opentelemetry import configure_azure_monitor # type: ignore[import] - except ImportError as exc: - raise ImportError( - "azure-monitor-opentelemetry is required for Azure Monitor integration. " - "Install it with: pip install azure-monitor-opentelemetry" - ) from exc - - from agent_framework.observability import create_metric_views, create_resource, enable_instrumentation - - if "resource" not in kwargs: - kwargs["resource"] = create_resource() - - configure_azure_monitor( - connection_string=conn_string, - views=create_metric_views(), - **kwargs, - ) - - enable_instrumentation(enable_sensitive_data=enable_sensitive_data) - - -class FoundryAgent( # type: ignore[misc] - AgentMiddlewareLayer, - AgentTelemetryLayer, - RawFoundryAgent[FoundryAgentOptionsT], -): - """Microsoft Foundry Agent with full middleware and telemetry support. - - Connects to an existing PromptAgent or HostedAgent in Foundry. - This is the recommended class for production use. - - Examples: - .. code-block:: python - - from agent_framework.foundry import FoundryAgent - from azure.identity import AzureCliCredential - - # Connect to a PromptAgent - agent = FoundryAgent( - project_endpoint="https://your-project.services.ai.azure.com", - agent_name="my-prompt-agent", - agent_version="1.0", - credential=AzureCliCredential(), - tools=[my_function_tool], - ) - result = await agent.run("Hello!") - - # Connect to a HostedAgent (no version needed) - agent = FoundryAgent( - project_endpoint="https://your-project.services.ai.azure.com", - agent_name="my-hosted-agent", - credential=AzureCliCredential(), - ) - - # Custom client (e.g., raw client without client middleware) - agent = FoundryAgent( - project_endpoint="https://your-project.services.ai.azure.com", - agent_name="my-agent", - credential=AzureCliCredential(), - client_type=RawFoundryAgentChatClient, - ) - """ - - def __init__( - self, - *, - project_endpoint: str | None = None, - agent_name: str | None = None, - agent_version: str | None = None, - credential: AzureCredentialTypes | None = None, - project_client: AIProjectClient | None = None, - allow_preview: bool | None = None, - tools: FunctionTool | Callable[..., Any] | Sequence[FunctionTool | Callable[..., Any]] | None = None, - context_providers: Sequence[BaseContextProvider] | None = None, - middleware: Sequence[MiddlewareTypes] | None = None, - client_type: type[RawFoundryAgentChatClient] | None = None, - env_file_path: str | None = None, - env_file_encoding: str | None = None, - **kwargs: Any, - ) -> None: - """Initialize a Foundry Agent with full middleware and telemetry. - - Keyword Args: - project_endpoint: The Foundry project endpoint URL. - agent_name: The name of the Foundry agent to connect to. - agent_version: The version of the agent (for PromptAgents). - credential: Azure credential for authentication. - project_client: An existing AIProjectClient to use. - allow_preview: Enables preview opt-in on internally-created AIProjectClient. - tools: Function tools to provide to the agent. Only ``FunctionTool`` objects are accepted. - context_providers: Optional context providers. - middleware: Optional agent-level middleware. - client_type: Custom client class (must subclass ``RawFoundryAgentChatClient``). - env_file_path: Path to .env file for settings. - env_file_encoding: Encoding for .env file. - kwargs: Additional keyword arguments. - """ - super().__init__( - project_endpoint=project_endpoint, - agent_name=agent_name, - agent_version=agent_version, - credential=credential, - project_client=project_client, - allow_preview=allow_preview, - tools=tools, - context_providers=context_providers, - middleware=middleware, - client_type=client_type, - env_file_path=env_file_path, - env_file_encoding=env_file_encoding, - **kwargs, - ) diff --git a/python/packages/foundry/agent_framework_foundry/_foundry_memory_provider.py b/python/packages/foundry/agent_framework_foundry/_memory_provider.py similarity index 94% rename from python/packages/foundry/agent_framework_foundry/_foundry_memory_provider.py rename to python/packages/foundry/agent_framework_foundry/_memory_provider.py index 3c24e3380e..36d4a27a43 100644 --- a/python/packages/foundry/agent_framework_foundry/_foundry_memory_provider.py +++ b/python/packages/foundry/agent_framework_foundry/_memory_provider.py @@ -13,25 +13,38 @@ from contextlib import AbstractAsyncContextManager from typing import TYPE_CHECKING, Any, ClassVar -from agent_framework import AGENT_FRAMEWORK_USER_AGENT, Message -from agent_framework._sessions import AgentSession, BaseContextProvider, SessionContext -from agent_framework._settings import load_settings +from agent_framework import ( + AGENT_FRAMEWORK_USER_AGENT, + AgentSession, + BaseContextProvider, + Message, + SessionContext, + load_settings, +) from azure.ai.projects.aio import AIProjectClient +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential from openai.types.responses import ResponseInputItemParam -from ._entra_id_authentication import AzureCredentialTypes -from ._shared import FoundryProjectSettings - if sys.version_info >= (3, 11): - from typing import Self # pragma: no cover + from typing import Self, TypedDict # pragma: no cover else: - from typing_extensions import Self # pragma: no cover + from typing_extensions import Self, TypedDict # pragma: no cover if TYPE_CHECKING: - from agent_framework._agents import SupportsAgentRun + from agent_framework import SupportsAgentRun + logger = logging.getLogger(__name__) +AzureCredentialTypes = TokenCredential | AsyncTokenCredential + + +class FoundryProjectSettings(TypedDict, total=False): + """Foundry project settings loaded from FOUNDRY_ environment variables.""" + + project_endpoint: str | None + class FoundryMemoryProvider(BaseContextProvider): """Foundry Memory context provider using the new BaseContextProvider hooks pattern. diff --git a/python/packages/foundry/agent_framework_foundry/_shared.py b/python/packages/foundry/agent_framework_foundry/_shared.py deleted file mode 100644 index 8eed50f067..0000000000 --- a/python/packages/foundry/agent_framework_foundry/_shared.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from __future__ import annotations - -import logging -import sys -from collections.abc import Sequence - -from agent_framework import Content - -if sys.version_info >= (3, 11): - from typing import TypedDict # pragma: no cover -else: - from typing_extensions import TypedDict # type: ignore # pragma: no cover - -logger = logging.getLogger("agent_framework.foundry") - - -class FoundryProjectSettings(TypedDict, total=False): - """Foundry project settings loaded from FOUNDRY_ environment variables.""" - - project_endpoint: str | None - - -def resolve_file_ids(file_ids: Sequence[str | Content] | None) -> list[str] | None: - """Resolve file IDs from strings or hosted-file Content objects.""" - if not file_ids: - return None - - resolved: list[str] = [] - for item in file_ids: - if isinstance(item, str): - if not item: - raise ValueError("file_ids must not contain empty strings.") - resolved.append(item) - elif isinstance(item, Content): - if item.type != "hosted_file": - raise ValueError( - f"Unsupported Content type {item.type!r} for code interpreter file_ids. " - "Only Content.from_hosted_file() is supported." - ) - if item.file_id is None: - raise ValueError( - "Content.from_hosted_file() item is missing a file_id. " - "Ensure the Content object has a valid file_id before using it in file_ids." - ) - resolved.append(item.file_id) - - return resolved if resolved else None diff --git a/python/packages/foundry/tests/assets/sample_image.jpg b/python/packages/foundry/tests/assets/sample_image.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea6486656fd5b603af043e29b941c99845baea7a GIT binary patch literal 182161 zcmeGF2Ut_j@&F8VVI$bqWf~w2CQs(&2ap9!fedp37!Ok?!%P4^2Is*ziA-)8|wgXu1r|!=|5FJRSmQzraJ4dymd#c!T z)T{8F^ROiv7@P_^4_}pE8cPi^0$%Wxsj6aWR`Jhc>6Yb#Cm&1yvkb8e%kVmYXI!Ok zj6F`44=j$VBla9QUn*swwAk|$aO_X`=1Q66<>YR{mSuuc+=Q<@f3Av~R4Xv6#Z8(O zn40$XhGlwScS%cfU?gKrTB;)qh=G#e8I;QUwicNlVmn z;3)tB;DVD9VK7qvKvE;+O$|(E<)Q@&C&gf-(){cns1ttn57f6Q`v*8|r4H2h;H%G= zPia}85m^=lk^e4I^kbkPMkMUwD8x)|ac;V%5NvS_VkO5Q%s~?8VkdMIVy363XP{?h zU|?ovVq{|HVq<1zo{3iSvg4oE9Vjm z`+p)}@gc-c58*Z;5EKwZ7{)ZH3InUq9*&!mz zW%X#UqEf)&*Sd6DVy!>F|AHnUxNqkk5!1V1gVC=_3Pf&bwC%-y%{%(IiTL50f%wJb zWBHe!G=H2(DX4Dgo3wKeIv#hYu%@+tO4-ofBRJ}EYEf<5z%)AqhlA2mlgmU$OM@U6 zVYB>tYES}yVGbIFL+3e3C3s}@`m>_w-uD5AV|A^$&;+F~TWCoIpc6w>HG)DUZNTW; zA_K9#t3~+NB@q8vgvAbsnTnhzJERSbf02npR`mw61%6rBCpqGE2)fs90~?C+>2)gheB4)Km}^5x zK~C!~`AXy3`{MW;-{J(y-BPbQD>$C3jmsIs?uq`G*ZHHTaTa@{eY=sN)%M|gXUh~3 z#S4NbJGeJk4riurX>Vp(IMO-f(1t6yKI|F%H6rGs>?(^?d2y@}&oc5K1Ls&&3og|~L*pDlTmQ?;w;iSgEr8`}%sW3W+72CC9TyompO z_19bs^TU;fC5Up}J9`VuGM{;*`1z;QwFPx7G``bTp2=j9ikn+4h<(HGGqT7404>zP`wY9bue#v>C%@$d{c>MJCipIsk`H{LjJ&D(mJR@oq@UI-pCmE|QloajGRTI$JN zewK*u8fCyO1djfwe%?PRlX4!rci2c}Zg*>;6IDmgF7+9ij`5TQE^Ap98GjkVOU3DY z-m=-s85(Xg_iA19_2=2%27L>ywHV^MdLW%C4P73_`+7aTYut>kG08W#vTI(r!Rxfk zfI}l6`n1lgxm{(mbshuhng;1Pb+!)@rNeD1(N4NVwA)~5?O0I9G`?tl%c-;5)4iN} zB}I=EbS5;q-lFrA$RC@x50iaGlf|udxK?2Npl zk;$fhwxqGhnIZAbHD9O7u(o4e?_TvJ!w^rrBkI!9LeL4}>^AkcS#t!Fd|gH-dB3=JrwhC5wZ_1(Jc3(q5ju{G=Q=U{Wo*{X za4=gr*y3gGBky=wuXGmQdp$lq^~Xm$Cw7mAvf4&Tbo!LmR!qkiw#AE^H)vlg`*!g{ z1JeYRTkGC4*xmOkhel>^suYz8Hr=;(H_^W`#$Qs=R8m%Pqp)Tn+Ff~A)-Z3|_cv}g z-*}}F>t{vw_@-fR+{kMVzVl$FsE>(fs=dE#Q+a{9KOc25-eYXEXD@w+6EAbSunSS} z!3V*sS3-_Bv89=_W;L!`^ITNSJO~8yumL9mMz$yR&jfn>Ch?1(MH#n=|4SJxzlA6{~}Z$ zQL8-1cpVYY$-j%%+jGE*dHBb*#JIS1sCmT>q+Dnn%d}R+F|*FEQxTzO9gW&!iw!#D zuMsl$%Y>ibDBX*Fy!oY{dSqf`{~}~b+vB5YCT{2_dd>X2^E!DsmTb>3Z~xq<2aQC2 zh|kJ;d`H;6%<9e}P)E12(sn=o7UI)tw+MNZ3_idNg{2-#I(hQKmGRf~cj2xmI)j4| zQIpq3?K=;}#2jZT(iZztQa8K1F+94X$4g)lqR+A~X4O1`+lbL zQk;fEIGsSxWUR#MdV5CL5WIW=H8EK2hrZ^@(mvjZ zX~TJR@OOsu`!+_Rc6Y{S98GzZppz^WS`RlrO%pflw~~CTDzl z#ICfDhig(u%^7-9a;YM3EYC0Os%A~CR;S=d=dAwjm%9!Wp4zlYpe*QYvkDu5r=$FA zR}Z=gwf%^z!AW_f&ZoD-*CLtjPP*HgDW`5n8w_8qWm$xhwMLWYGSkrx)v@orTX|x6 zjl^TXNa*=m#vSPMVpULpIMlxP!hv49| zXUk~UfN8|Q@rZ#3MX$rO3va|dE4^)KQRs}j$8=KrW;(BG2h(HBHM+>U`G(K?Sj*N& zzswm$vh#Vde)Jl<9Cxf z%fhs|A4%0kb=Xx9W|A>6O z2-yZ|Y7b2kb26WmgBKyJc{ur^S3)SSRZgERK)ABZF{r zSE;DKR%^po8zx1WlJ@U)$hO^CA9gH4VUMRHhTf#6@{0_)=OZ2^VV#r1*amW98oz3wH)|0 ztAZoqUe%3y!cO6@=vMO$0_8B8TBnfExtdn5ec8h34Wg@A@6uQq}N^UgHC4s z%Ae-7jjuNh-;2|R6|}Re&*M+Sa>h1~2fv=BR_>@#9qlpMIC$4k5i`rMp;-GxW0nxV zI0jRZiiyJ6dWoni!N9+ra3X5$AXw z2V}?A%)(|9eOCMn9#@tL=abGnb@kw7ssE$$^S-jsE#JF!*{?Z{y7MV`VQu1$U^9`* z-rmXDtwo>0+NUFt8Sh2JpGy=>QlEtD7W&U3ZlaJ0@?Uv_Lb+5geJmJTgz{UpijbXx z&OH-Si_j6LxH7@@=en!zrQx+k9~H#HCf%G93+flZi|Kvk)$I0?`ts6@2O6cbURurj z2wN+r*$Pz$_>gC6lH>Rq{9+zRQXO^(!c?H`$ zN4~CXT7;qtoTv@^c)zo9v3r(Yc1cRN89Cs=s76$?Hpcm8=&)eNeR@8%yjh@{d9E2> z6xvhQX3dw1?4;TC4Erja*QGWzM%D|TQ=(*<5;vdJ?N-^8IuV)iW-xQZM8a@wR$)$E z&Nb~#ohLhjP7++SjoL!0gk9vwSAv=3=f<1`2D7p{3zymzbzt!hD?5y=8qLsb<`FeB2pzeyQ)=>g+ z9<=&bIJ7h&9Jhau41s?;t|Rc}qMAb8sg*3pI3JhbDF zZ0k@-pR75@^8V7kzOkU2@<+=H){VZs(|2``j;4kkM`lrG#S34l(oJ)MV>G=^uz8Mz zD$PXAMM$^NF1@rg{@hT9rt5LMP}ByFg4l?dyhTVeqsaLvO`+|>sIJeAU-4aSZRtUI z6Q(^QdEYqrMnWDGw=0)>U+vQjV?kDU&$4>STvb`u`y0|RHa_Osdqbc zEm^>5b3<2ET`Qt~N^)K-!{xTaXDP0H)pzTAo!&%`PAa>WYD&-TLoY(FGnl@H7tUG_ z=SNXZTqTZmM`};(?w_qpwx10tI9wZ*Ti$o*{aBQ;E1yeA;>P)%8Ri%1k4GI%O-gP4 z8qj&Zynrc7h6Tr-6ds)s=9XLLGBqa^rhWIWGqz{noFs36f)7Do@4@|oFL^!aU~Ml= z^TvqFgSS4qdNj)1Yttqq-d9qJV+$Tk?ix>>d%9m$wx(!ws!!pvy^H0LoOTbh48bd+ zzhfwJ@Xm)Ch#U6lR&kIv3WI3(soXrJ()GNWNQb;h+s1gBC)jZ)w9Iulv~Urs%G~9a zJ|1j2I3sq%wKxP%z(;(?(`x(Fmb`82oV$K~vR+XzeNs|almExCG;R3|>zt-3d&%2t zyQXYswF8>o$SlPlCnw2|l=q_~Fqi^2BQklLz z^u3+cTc)6{uwz7eq5~$K;V|v(tyIvu5I=5r0~^(fb?A#4$xf?&pMU&_`5+$6H>BCw zV_H8kFBZ0tklZo*ZSYnl&KIuFB@T7)?Tx4VWQG~mR@VtQDP*3a^ z)1uQdPh}GIndj-=d~%oR>}XxM9Qi3dE2dH0apayrtAL{LOLLnxN8-Ht#t&&%dk%e=^b$#8WlkvkYXPkmT|)XkD?42P)<_2~V!g~ZTj!7WaMkAn1rJDPUW zY!pJYG9Zf!MyKC+1&3)}@A+Q1<@9TUPf1J5rOPLDrTt_|!^`+14QDGxM<*5`li=^B zlQ$WX&q-&uKAgYRlvdFcW9|RRtJCX>$(I+t*nrX2aKU_xZCTJFWL6oPq|a4(r@!FX z^`tLgbQpJdz&G_16h3A>ELixt^tueoMiGf08ERvD3Yl&e&horQVV(EgDf69LgbvMm z?I;UsOxRG~y3&DzQyR92xnchd#kTOu_u2`IP-c_;?X2TZ)1|v+Hy(TSb-3%* zcXWo=_Gy)m>X+wwJU)44cY>bR<;6%=E3%7FdYhh27P>KHH(&QAdv=a{5o(pW6;^8C z7+ze&ytnp^ow{_^D`&JraMx&7-b762tgj{L*w5rmyog6%-zwa3L?OfLw9|K_m$pt! z@X$!!JdJeTJns?hdpm5Mthse;Ge33LqWD}dvEZSO0-3n$!3C*yv-9H8f79o{uMb`S3#wvFOQ%OMUP5S8XNL4FkUYbCKsJ)1nP`-s|s0o( z6IGff96LMdx(G$)(8;`0))wKwGJEmXqKgPG=LG9()Pu(&5-L9_ys19PnNyMj|x^hwYR0; z@GyVwuG{{>tZC2TT4d3cxSS$HMdG#K0ld}VHHldlx7f$I*`FpI^nJTn7omQylD8?| zcAbZ=v?{6Hl+JmX)pn3S965$`YP(i(q2uwVrdj=c4>nzMoya{kwr9)72WR<4J36(m zc2_POwtBwrkf8ZcR{2zS{qqcThodX3)j@f^0LvkD$DC}RMJOa>!O9b7R;n9QLm7k@pPzjh8Hp-3+QGMdSsVz=C#y8#_ocq+$hUIl@QwxJr+4%_u+Et z_F%_T#%@2q(mhq_n>MT6WAHL1D_v65()T8=(mUJxY*6t##pduPpPtWe)8FQVqMp>w z37Q9))rQI-i>w?_Xlc-w zX!GONgVi#*k4qB-tmoTDQbs+}Jbt)lEJCL(v)NQzjz=^ld8K3b%+9Ry%DAx$-iD~d z`p#~>CUOVs^772RF_nuJ1E&>e;^I_uX6Z2PvowFcX zbbPY943DljQm&oiTBdpW#M383lck;4^y3u1Jt*4rYEb>iRe{0r(&rtcJ@K!sm-?Xdlre59XKvh^C@l4*t52{oW5b|(7&0LdvC6*=%*VOfR$l1Q^%_78P zUpdWHR_d;w=yt|w+AH;L71c?;hT@3e@tmD!FI;@(U6^_r;;>MIM_vp!?Fv^SxO%H^ z)GPT?{!Sp1I=;Owb9yc!Hd89~#5tJYi`i%Vx=D*IpWZZX-7oymsfV`g#OLFSP*3c9 zRAZ8z=35N=B4pK?Nf*98-SKe+W7?GLZl*6-E$vfOO=`A!H}o_gTXdJj+yLLz_e@5v zROi{Tggx+{8tg}INayLPQ~xmk^buQ2r1fA`=C+qA%7SyIQQy2Ok5~z))ob#j`X#kz zXfxeEbC%s%NO4kS9Ym_@8mM}nfTwc8j_mPgsjUgbRpLAqzew(DxE&$>U7}@rM(l=1 z%Dnc3Xs8p*V3yidmq?x{!@gPLS|haCm3+lo6d`hGcYaLMLgV?52O2lE5Er3@Z1W?= z#p{r>BEDboL6vW0^aa#;)I)0TwjHDOKJy2g0#7~1GL+!*+;%dzFdS#T+_Kx zdvlT|5KqtZZPFf}v(_xvyisrSaDVIoY}zL@&37V9`<}hF+lI&*?S(XvN3+6$*7K({ zc5B84=zH6i;xb+aiZZ- z&n45I#%uF#y*F=cDvLZ+^<;l1+b}#`lS7xNp5XKviU{h~yk|>PM~)FpM1!E1w z{l$#>kN<5#ogW?wG5(z9=7-}TUDM?m{N-u-mAQF;A}Szobt+%#|D34jhi`?p=mua> zfh1P|3@Q+-CPbP_m6k(DE2u~<&7}&VWV~hg;TFFZz!u`v@dgaU03kop#JxuV%Fh!U zzz_HNm5u<>n+N*{1^Whg@xy7L?f+}3`QgeCJ*kXx2n7{>xD&+j2TufkxEsW<>bJlT z$3k?=t_l3`jS!PgK%kJBFEI#<%I<*zy*2=nc=*Erpay){O~}C_1B5!B}>~vvNUL2 z>7}oQn9}F(EDxDhm}S-qlB|$wbSaJGk3yP#UinM5P%&~ED>)YGmt|Nz?Qg3v*IAK6a(HGq>GU}k4=K$+TLx@E|13sgp!3 zVQ$4ItYQ?%cqb69&>+Yd>7N*~%E>h{h_j%Gfxd=VA8Y{VtuO!q*@XCE zmx&pdh)Ebyim|tc5cu=|@A^{_1MoKb4mKdGW#y8T|0~9n;DP-GLyN)(+Mqms!Lg!& z7nTinf1okJ$i{TnUSD54Ng(|S`WIpbPv3x09X!rsMN@1`l{Z>J15P>&)*VH}2Lg~G z2pbUi2Xea=^snShdp&f0@xB2R%4J`Yzr%1f0kA-3zCI*fGX(njnG*vESW3fT!~;Y6 z2bOuSZy+$Jf8d$GBli5AoZO1Hk}`sT_ZB)AY{0eJ#A5ZnYZA*f5 ziz*`Pzc(5e_=Z7mi?js0r-z5TqgfheK<-gqB^ z+I~%8vUD{7lhCrlLS&LaFHK=Z8F?9bX?b~hVZh0t5rA=5v(htIW&|-!;pHL*2M5ap zE6Vr=c*x4Ds;bJ$DaZo98$cmV2=NI-?U(i;h>&G03DLt6&;d9*-2e+;0**us zTw+$~|B*n!It+^}z*dDJ*C5 z8=8W_6gcE){bP@kWj1mmC9y9Ci(jTDJMOH|k?|CSWn{vilUT)DpE>OPF_k;UIpB$QV0cbD=GjSkfSUI zZa}L{iX%WexD}NY71ULA74#L9Q~rq@J!mQdxgh-M^HKRALQT zk`YlD70FuvBTECVY1M{*F|?Itwqj=>u4d|s!MTU%0mln1vRj$FoV1*(&B{J5l5(UC zT{K7yl#+5uqa-BRGYz;D0JjD$@*Wj`-Xms70wwWpBC!8S3CR3Qlt0-`LGuP~aj5@9 zz%R69b^g~%#)55z{;XwE$}i2Cgis@GO9cEfNE`ZSk+!bUko^aGsZM0H0obp{(+{_Q zl>@ZQWscvID7eT@9M+%%#gcjevLFi7FNEHH0ayY7i`jt-Bv@brEO7fl15%Rx$@WJA z3K3)u2R)oOmb^)k76O4pF6a4s((hc3Ke-fpFEbwFn?;OyNV6_m-&l0q=pGXSgT_kTHYhPjju+Wv- zP~s>^4Sdlk{4T7IN1&&cyb7hFC=w{>tx0wWih&LE^aYP}xgft{DaZ|Qc>iWV%EG@PD$x`d~;~cdb_LH#7yq%F`h0ZABNqASsCLJ#m59 z9Vk4?heC0`ASsB+W{Sc)`GWG0h7VfutIhf!Nfc}p3ODvqM0k4xeRyjrD6eSer`jo! zDfr21(M9=@Oz5BbqD-J*GDP8gbbWmS1HkKJMG2HR3Q{r|@Y!9ZTSZV1S))MqOJk-L zaTF*@LQhnHpAYB)SH)ScVkpUod-q|{fqFPXAW8RDC?AiXmTUc|L<(j@3+v_n+gMLa zK?b;~%d8aYmm~_7Uq%6nNEM`#l#-k(QjQ#gk%zLp!cwR}#w>-30FRJgqEV6}aVe8S zFiHU<5P$(8z<@D`961Cd6aa(q2?)W6Mp+SzcSs?i2caq{Q~{JAB;}otQb-g^zk5&2XD*6 zH*2i_a&&1phsS9tDgetMjrRad7P372TP0m?g@2T)@pH%^M3Ui$3Luc1pr$Zsd`;?Y zmq&aI%cM9`A7y~T6R<0-n~Yh`nJmr<3tAEwsw@*MM=2@DJ=cmF$Pvm^1ELR_{B7|| zHY-Rd8xH8uV7;0+{^$8O%KflRhCm$Me2j*NFFYmno{yD>0n8mvCmR>v?@;ZKNCm6M=4GZysLw-7=r*` z?|*AQWM!z~ek*{1lDwimD8Igvk{qc%DhO4io*Wnws^}>q6y(4V?;m{27O;fJ{H7lS zQdtq9j6ftR1 z<>*SkK(atJ@YeUx6y7hr?Ds9K^%rZJ{?}bX|JW(5mXln$e^f>T)G!bih{ygnb^Bk} z9)Yb*|AR796pvI|G--L2ZvbU^S3BP2J{$DQKi78sF^5$SvQ~Z(Ny>w;G+3a_V+YFP zx96=zvFz!$Ojb-nYgt48D*4yNQU8jHaueDAo917o{$WYizam{-Y4xuN{=E3=H|nM6 zs?Nw-{za)IEt8iU{PMD0HP+P!`R7LU>ng85=DjM4oHs>QOVt0(Pc`Iqd87?>S9_UX zey~|h{^j?CRjeBM>z@4P1RpVfLN9eWKU0i#b-~x9JNWp~B3K#j5Hi-+Marupk z;M4z)bia$xA%ZWg0K&@8jaq+NVn`|KclzJuk-iMTk0SN~;1eg{mqzxpIKR^TE`jv5 zzx2}(<+nU#87L`!=QhHkFnF*uX{BMU?5X_UEY_sVb5-u|8S3H!L$v|}pJe&V>*(s&e zU$}o44WwbfPrsDiC`E;r5`X7e{zP9{o=Tb5a?U7;P0 z($+dcN~G`0KYT?{bX@;3&;MRk#(sflB{2P>>l=Vw)fSisL6Zkdgg}feB?Vb+y_$%Ty>snLSUuod4 zh}W!ZOb^VnF{)%|by4KY7R~q;$;x+49Q`cW<;ID|+tZPkOf2D!HB3`quHFf=!2L6h8 z&AQgq^;a7BE8;clT2t3wY2dGj*R1RRJ?i?ouN&AQMN>RjTw80gm)QXH^yFw%Kbilmjz06q#}d~l#23I7?uoO``U zFr0Ko6=wh_Ab@#Du*VWiI`@xr?-IP71Y^8?z@cVvQknfQ-WU@61i&YPh@`{LsCEJP zIO!ll05=0z6i@U9XIxQ{&e-Gh#-hLhY}BMvutWo~Xiosk0hl?!#!45!8sH=`W)BK@ zF9jS3P8|cZkgl&^2zjBJ5NVMs*e*jw$Os#Z#|8#UTY$~>PyrYrU0-iMuyh)Nmg-F6 zg4X|Ig(w-l6#Sow6mEA5L&kV->@+a;m=TDsPZE(&XIM0yk+)v!zcnB&w z20=U>KXIZrA&BiL1Qj+=)Q6j7FBDt*;jm~Kl0sLne@n1R`L6+r{A5V^t*)*W(px&V zO^9UGXmAW0IF*w?+Nw)P`gbG#uNf(dMNtkZupb53ln88JBV-4x46HQ+kJ|?<#U!ol z1SgYHt^uZy@L#e~NFaerb`1cI?>b>mUWT`rLLg*nw>) zA;?~EV9D+Q4ARNrF9AFTMBxM+`6xh0*UCl+O$-QHBB3Vzp#i60vO%2CMracx0&NB7 zJ1RiRkUF#-(ua0}Ga79mM`$;M1{+&?gA*Ntp##t%=s0v5ItN{X5}+GUGL!~oLAg*7 z^Z2HCpnPyLyCfrf>KpGJ~KjmDV9fyRR-nC3W59L*h?0-9=?7MebqDOy@u zZd!3#Wm+RzN7{X~VYFvxuhC}HKBRq3`+;_nj*f04og|$mojDzfE|BgxT|8X|T_s&3 zT_4?ddM0{7dIfp|dMA1x`or{b^!Mm1=o{$==;s*LF>GN_V=!mHFoZC~FeEdSGBhys zG0ZWtGm0~6GTJciV?4xog)xhS6Ak!75`%F)n-ZRZGuVCdIXXRj(VKrvOutu^bvKF#7 zvW~Gaux(+}VcX3X$`;3#$JW3$vW{V$*gD;HsC5zR64#ZiYg;$T&cQCvZqDw_eun)X z`!n`W>*>~ut=C_VS$}kW()!2iKX6cSh;Zm~pgE3kBy&`A^l{R1igOxs?&CbonaNqt z`I&1ymlBs9*M6=o`Qald4~uLQ3pZ!qt5-fG?vK2AP$J~ZDczFfWzemK7b zzZHKNe-i&o{z-vN0)_&90`UTm1x7Y;Z`!_T-=>S3DmD!YatLY(;sh@WRtgRYaS7=N z;f1aUJrVl6nSZmCGuHRP}EfPfM~jC z#}C*3IxMg<99F{4P z8Iu*2b&@?VTPwRDhmga|CChcnub1B`e^|a${;Pt70$L$ap;?hd(Lga$u~_kolB5z= z>AF%of&*cSh(c5$=8?+C0Av<&P+3%YkMdRJHWf}43zah}PgQADcc@0HK2V)eL#hR; z<*JRVORIaS-&6mjA+CYdNY?nEDXfXoyrub0OGs;v)=jN<+MBge+PAfPw~K7YY`?R8 zKxdoIKAjAmu^nh6uwj z!-qyRMkYq*joOR_jM2tv#-Dd8?+oAh#Dv+z+9biGcbCMjfL*1gaMPWp7fid%wwU>t z6`4ck#^x8yyDh{m{4L5XX)P@*6D|9#5kK+v%d2s=ROyN%W;=hS8><wSNDEK>!L5ChcVkR7cqlaO>8W7z+KZl z*8P)*rpE=3K~HVZIL{HBE-nH0WuMW$8~dic%)L^*7V-A@Y;St+J>Dfg>wUa@p7?I^ z-S7L_PtxzW-#dRb|4aU#155($5MTsXLUAA`7)-q+ZY3Tgz7Nt0N(}lQY!{rjpMAg2 z{+A&VAtyryLJdMw!l=X0VO0k-aW*4$m>wu zVY$PxhbNBMA1OU5aP;8O5629Tr5|TK9&o%ZNb3W%<&ugE*8_OOW8vEgb>4n0JLKjb6oVet6 z=~OhOmx-6(UDOs&&xDr59f{dj`LCY5I(-dut?~Ny>sdDhZk)a` zdlPrF{g&aa!rNlE%I3~KeIL3{yg!t)C#N;nB)2M0B`+&~Oa9dYj)GH#u)>hS z&qba^?}{CZUzhAGsVY@3Ehv*IOMS5UL1H;qc}xXk#nFnz%FxQmhkg%79(g?aP~}$D z`Pkuc^ApP__0=ZTPiqWn9@pyBRzB5y`rw(`vy$g3&x>9lUlhJXyez0g)D_et>kAu{ z8;W14zAAmK@w%c>yYW$zUQP~WiM@Z8ApQTEZRV_V1a$G4Ba{Os_#?@Pd!#jj_+ZTyxxp)^r7X)*bB%6n>V z`qcN0-|x<-%+$`>&koE5|DgMEWq#Xy>4M2Z*P=H#b-EL5Lrnz-FIG4exTwK@8hRQU zYHAurIyzcUf`dWMlvD3re!QW89gdsRPjEWt$_yiIJV;UMTd;r(dkOjnl9^VQG z3f6&R!>Op?)KsKWgVb=44pFnytluoJL(5@_q7(M#R5*107QM)hoJU+%uRn__?hQE1 zz{tIUXCvmT?uIP~S~w~5KA>F+bMqv4eWSsi~=`=}7g0!GlQ^ zXQ!swEKj>$$C3`^&mpXEh@NxD`CB=U7(^7UK6C92c+JQys`P%#7gE*8HT%yhcKE-l z*=ogp)~f?zrUJ+7vQx1`+R!|;x7k}yWsjf`W{HFzi8Q7~s1;G;IeW3uAg#V*2tV;g zEN1`U?xdJ)kLzCg4t5O2Ffz>YeiDh7>7|BD{pC80na)m@qty-1Mid;qU1V_H{b7cF#sWV79kpczJ99=yw~MN}owq}KxA&*d?0;14 zaLo9V#7?C>SMKsRH=2~4%H};#F@f}wKYN}^w%j^a^TDb75}yM8Q@ESL$13nPnN^c_ z{R^Y3(@vZ+R%>w-O-r$2L+o_#zf0{gpp>RuXVaI+wV~s1Qx0wRRYf~H{8>r5(Rj^$ zFE=E$o@VW@j5qENYvC$kR(mCuZ0VnTXQse5xgiv7V)oD>7T+wkH?#GYqK)a+I(5dr zlgYz2Z+BUl%9h)OJnBu*%k2uFsy5xc$;|L=Msx9_Ve9ald#@ey=c@mbP|jbTF8Q)+ zCf)K`<8k_-{P_BZcM2{QewDn|*h$Za$Psu*e9CpS2&osGczsuIJa^DQiet@qiv0c4 z+fQ>BU3a6+Vmk4$Uagh7X&{yMns=abfM=gTonvyR<2U1HX}i5!cJ%7h(eKI(BlgX| zWpOu+6n%_RZQvXptUx}yRu-DBXC#x!0KAZ3R;Jd^+XPpbSIg+Js4&Cq* zy;7UnG~1TZz092L}h($vbEQZ%iQ85!j`lXvUlvlNz1w6zayOzu06flFBnXogF)V zYfiH{=TLNp0h?vR&Us&%npf32_xNO+f21lsvahP!nOAn+jq{%8?Q+7S;`a%mv%Gs! zX~YhP97-!YRjBW9$-*EpVoLQcvXSF;0Ykk;V`3-!u4 zm}M+WnDVjFMz1Z7(A-Tz>w^P<tP-b&@YuS9mEt@xNFE1LX-u+hP4QyJ;b zoDMbBj2q6<5VbgSi_Xp|vhw9*$2_sIdJt3cxT!DS-2AhPv_G2jY*|3kk=C-X{ahA% z2o7q()m)`gd-gDX+I$Y3c>s$e80URTOP5?<3u)>z;gt!EccM+gC7u>Flw^4&wr75f z*=eZnRMdCrE+!^;U*aL1CIt2A{Fdn$Z`*78nnZ6Yg_SjX@$fU|51W9GT>nA)0K6#D zz|7yNuux2nRnpC*QCH%lT{>>xJ(Ucs*6~wfuKCzk{1KO=tFxb~+r?ZwcTT=qU#!MX zHKXKI&dVOdzCL{qo|w?C@}nD-*=r;v-W|GX_bfh7q1W^QYem7Ozz+$pE2_>)t{*aE zmBe74D+ds(v8J2T?X8FzWhd3z^mbvMrd&X{CM(5qB%Jx;l*D(U&bB+xs;F)k_F*Qw zf{=(3K6NyWOVWD1P=05cU1O_P6m_yuPn;`o!2CbM0o7iTbynua=_A5Xv@akV-;9fBXJ4yiv9)H=9mUsJiMld+@3uj9kmlWX5*eACDfa8w&zsb^ZV77g;$zK9VC2sH>e60$i_fVk-Y&Z5A7&)BYg8C=8O5C{qV~=R zOG938;xKu{buwIbx0jx-P;tWD7lfj6eS&{mLVZGSp;2A#BGfT6Y=IJtp@Mzxc1$7;)siX6usBZA9SvFZ1ti5G2O z1a+T!a#q*LE+DkIoi)+)avzg3`+ZyZeW`#9`ax&Q16^$z(U$%Wqqp^+_Qq-SUac!k z&eIAbJi8~8&4&rhEIjW(uobuHmdG+O%I11BZsGFmQF3D6M}J$fE4c~D#s*)_jK%VJ zec$q4pEWBuFItVMBUW!jy2R(QDUEV}8S#zN8-NZ=T)DhoqxJaZT*=@dm$(m~GdDd+ zn4K!vly|GEqiUG?YnsdX^9eIly>ugAOOYpu_Y4+@Pkb65?34d`)xMcsr{9&CS%zS_ z|MBAwPOrzMpZVq19u!e`KGfcJ>rjn36=J0OLg-b8m(m&-X)EP$w)z_*t;LDYZBSy0 z2Nl#XHps#gjoU7ar5_v~z3w&0paJtub1YA}9;2CFWuv0&fR{ z#h)I%k;lH}k34^xeB@Fs z`g?Fzcm^i=)Tp}uv+RrmH1EQ^o%Lf|;1ue2VDU_l(ax9bqAl?mnGu ztJ+XwqEaOPk*Y3e>V{l4%XdO=YcuzjbNGR)Uv{_L={hMwZC{1nre=-1;T(1Mp1zWA z=ew=xlH$6KId!#8S4?+1cEksdyWhIdH*2l&-l9(24+rD_8gwsK+37&^^@;Wm3P`BV zQ3Q1;iwBZ)s;TYgx0!W4rvaPHI$LXQRO1+O_u`1nUSkiuNVK1Pwetl&KMB>3Uu)?& zt2%G1{20FRvN^>+&V3-p=`O~CjbPPPr~u0->1XtJ65jFU&;Zw=pr9N?y1!UohWax$ zM8>X!Q%w49H>Ep_ zxy(~exHAc_=d=a3#Y*{&d=s!xq;aj?D1LLcr7JGW%E8~FXSZ&uZiPFBnqMyZ2G7Qi zTg3|PUd`H{I&k1i^X!JpcbpK7d0G8oUIiyu4OOZ6Cuhw0BV5`uY~=D?jRR$_iCgX5 z`i+H9>l8ILeM&xLs#4=P=CCR|ru`7z({{;op~R1&%}#C6sb8w!UQ6k^Ju~%13Kw+q z*u~Kq+oAv?@zm@A7i5}JX;DY&`MQF>#tffPKA9lYh7)9;417}Kxk z77|jS{??>A7CvX+Z(VCy(a0A^n*ydqN%B9;wIiOiynpW){}*2 z{p3_9wH`Rk1?4P4IxVM)^PMtw>MkKfz;QNKP4VD%h*xdU`Hml>Qstm1P7`i}fK~UaI zPLsDJo7rr7x6s#!`tvTSGVjoS^^%jHQdG*WBXeRk9o;3(anVTipsxm@hpe`wD`67e zsTpC;Whh>0HZBGezelp`$JB9~mnq7zyr&CY~qk;R3WDj32h@OfGMHP#b z%UE7x>3&|f!D+x(%dTOk^#L2=z1(iv`cPW~BnSGy^8*0{+XoMEpNqsY>=E7(_)xp* ztC4cm=#E0ky>;KJ-D{$UL#WYonK&o23Wu3=hG`|#qZ$d7fz8MHh8>;yFRS$x^SoYTXQkqjkbv;rLkU!$uyvZ-lOl5_OcebX)SaE-HO@s(my>B0vM>8lnhaE7hwpCvL zSitMZ7vvFanv1J04lZ|vMoRm{raL)59KSfdDezP};d8oSn}mq&+Y9CoN){o#SUSvk zXXXsWV)t?U>MHLlAqm7#3&~SC2HJYy_q~0jw#wg0C z(lbFraGwKlTLHC@ST*`F>tq9Bo0hvbpS~l1YPBzO_r4B|BD?cjC62~XUE)!lZL99+ zyWH-hQIVAq5Xn2gsVL7bzx_i%(Oa%}ix9UKRr}Be>$eVf4?8AGy}cTged}4C3hLva zZTrl~fK;3U|HQQ6(*Rl3Vb{pVoxTd+&30TmW^#4GW)a$O0NJ9kMST3jw|9~^kn#@~ zAq_kC^NNG%B2^{^gU1CV$5K^#x4um*Z)o`R`Z~wD@_cXfgJxOx(&Kh>+qWu&ew*M* zOzo@_e%N^>qt^53M1~RZL`Pd6jTqe+u`YNwS5v3OgQm~Cj_ovzJw(kfyG@Pe=+$rN z8{Doobr_4_@|+&O%Q9TJF2H!)&2aFhF5{s=m#C;8TBYIt4**v{sK1mTE+kROuCCuJ z7!~=>I}!DYoX6hbX!{s)noBpmPLozNmtI?7nw~qam#uk@CvD9$H@z{Ws-I{cwCLN+ zJXBaNPQNv1>9FpT)YVAgV7bO?M^8enyR}Ox8}=~>RC?Bro+DnhksZ+Ey-c!i9Gd55 zsG4ca+*3TltBmtcRobTlt4vuMkQMG2ob!swnodnzx|FJuSu;(7YgmxSTuuU39rWFK zs~1;Yd8})zjOUu#D9y6H)SI(eb9bvUNxMANSsUf`u85ejoWiU5C5>BU+-P91!tyqD zs4Q?ZT3*IhAxmzRI@;#2{u_9Zc*{q+y%MauFRmZ}0h9|N3a13~j8|8wxAtzIEP+eQ zv6)qHSYUnZ5;6xFsEtJ^^R0uIp^>OuTIlvOUSCfgt*k0p<&}b{A5&f-@h{^gviO5e z(=1vmizx1G<+w4ac?d5X0fT@zCp@3<^Zx)Ae0aU_H->J!J*Zemb#}K`4D+&q<;adk z&8M*pG3Yw`aM}t>W#WqsW@#k4jV@HRys?8Eg$B}{ImSmkj91HI^Jh7GDq1e5qely; zG>@--EqKz~z`h54L8Mrwx{R&5mcte*P86^40iSL<*TcH(I-ieYXv~sGt+}_#$0_q? zab`S_2>gDv=wBQ@DqCyb8qu^HZF2JX+oCN#%*DLI<2fL4@{A61)7QOu8KyDfCl?2?+lRyQLtWU6+MblrSUk&c%74}t1eFKo zZZrHRwP|XW*7G!R$ul#qTuhQ}8&r}NxHvrHr#bIRx_!Q-bl3MW?TMgpCr9~q>?0T_ zxg4D5j+I)+L$}l+w6(Slt#KnWnQ+_Vz&IrI$;YoauMY8)Z?UYq1@+V#t*dD;qDa3a ztm*@YUUH+S%yZKn{c8uyvi|^}1i$y`tF~P}&d%o6)#6+4v*sV){JwLLa*7lUr)W5A zo`bz&e{EQQ(7R{(xA<2*IYr-={{Yv_NnHIwR$1ZN)r}5?c^0fEf>a#m%;>||`&QCI z(Y6S|tB|Jcqz;C^VN-F~@p*5#+vv9Kb#E7#%FIRyUs}+T!|eGO&!MV*9h3KHF^=`W zbfQRe(ANx}r%GBJmZfR)791MMlPTAwbeEC!2C%Lph4bIb6qP+E~WhQAx-<4d3O|_Vlik>+) za5~hKYAx5jJl$>>&1Y?8=8ep?79Go`TsK;Rd9o_2PH|Oax-?iWU0aQ<)|N|v)sZF9 z^`@=&y*mo!tI0UcbDD!^HK%cLxlc8Xc_!@Eks+AZeNAUcCfd7n-D@i5OlG!dt`DtMh`FowgHKD0_pEihJr$-5 zibY(z=hAt`t0QjfQ^y`?7_7vKuOphcsy5V9MmFYvAc?(d!Y~G?d4*b~S$*n+Yuib; zrE6JEe85S?bJo`bfNNG6g5w98$&{JZTc0s-PX?*YCy_B3szbF(R_|EtFb0$DSnpL~T-9l*R_nN|hP=;uXuR9rorQB&TaS9BJpFm9l3$Nn zr#0qt%~C?TCApY3Yc@$Xnrzou?^NDwn%*eNOx3i!+`&y;^T5qu!#39juKD#=1kFz~ za`mK^-|dlkLxRT{tlzYi?^diXkJ=NzG}ZS5=WmW41Mv=z;|Oo;EXjrxS)-aR4s-W^ z59?j`!M$U|`lLQiv25_Hc_7OPQ}`bB?l&5H>QStd1a(w6EIv_>TBRN2I!(L~TuU66 z3^xUdcp2v(t$Py8ooqa4N(r^sspZ2jRIw7T8MS_$42??MKG@}vcCp++Jl36+nn`XW zhwg!#isO7^aV$5BG-GHu3R|}|)98AamkJfjZUZ28t|`H)YDn&kmorCMrNZB3Y_>}A zgHY<4DAg_679~OC5!SRc_|wVUINiystG9~s+?-`}c3CxMc*l!;4AHFR;ErTv!+DF5 z_5Ca5PaAk{KMCr=p&PBP6|je*oSvT5`U$MsrOmSfGsS%6@zdeT>Y9D6)x1r$}YmcQ;l&Qt)c^OV! zR)s}UTQkz_c6nnJV|LX!;<4elFt|gSmr%VB!X29ez5|uq`-|v4;$@Ovy(we6U8e_y zu5xWkR>ZM$?dwmsx}HP}w=Q`*MQiNcjQPzQ-C|{RR6?UXvvkFDdLH>9S96~AfoE>l zQldz?UiH@5TdW8bMg?P0q^ygL?{j9|m&pr?mNp!Yl}SET7&R)$S0tL_X6U`Yk%Ly@ zljZ=PYZdPTk&rimQxsRP_ByIin?8CYJ8&a^kVJrQbt7 z>u6J&=I>_O#<#AYf=zRJi*k6a;$T}Iy%_ieg>X(q|_V00XD#HTfF2{~e0Q!K0OJuZOkkZxLy>lg7a(ngpGo&&+{< zJdU{MjP)GWpo5yyGEH3>m6jUhekp2KI%kIU1V|ycwrgT#K4STibHi*TZ&T9%j)uF7 zmirx?*9#L|c`Lo6kM9sUIQPeH_4Bvw3Go-hek#&ov(ujJ>QJ(nq-gOIF(edqJd6?7 zj{R%T$Ks=hqT6&w_tTwlEHT;`mAlb*Ar+XYC50sI~>%cv< zJazj(Xu6(<;+;8_XTDiL(;qCVM$j@3%F244;E;NX@SPXM(OGzJQMmrk+pR2rzL0~I zRaU`K(44Tr_N)yi^)+ZDX>OvL2p4CV2hOZ9fKGb<04yJ+d{pyVijsvrExGB&gn6#| znD+X$-Sqb}M(B*q_nI8;+)m@ysXMzh0UTgO zyArEpys|07H@bu9I^wc?JvHU8i41peY_z&2DdSU;yULBgXXQTQ-i>ouhTmVB2*H@G zym2IL%8Z@PNIgbye+u>8Hz=r>-p#YE(si4^5NN6%+CZ`^giIWbs=sWdW3 z$8pH7)=}iMXF$|8%!on3uC%a{F@szktTRO-kbY5JexAl?7n~Z(YpJYR9ppo4Ju43K zF8HneL0N6wrWt_fYa9l+gmJaIk4jPQ4|5twsxK!ajPYn0BTGt7djl!L556c5Y22lsJt;sN2}q zt+XI8#bGHLq%+E6)NxIc+O+(kwxg0UwzK9g#vIWVtm)USZCxnItjmPXc&!l(#<*g& zNv;Z<=CrP2CbF)hE1K0Aus1}lQn`~kqDCUATu6SEX%besZnc+j^JcAFNmM3!)ey}4 zjx$uAOjX&Jby1o#n$XRWw6yr4k{0{XT25}$(PcjSvK%$NhTFiNjWt{ zmt(lWtqWVMuoZ(GnQT?tcV%vPq|jXH?e2SKqMGRLKQ(5>G3%Zxy!W3m=AlHpnlfHv zy+GH#^_3;wbIm|*PdwGAkmPJN_q{ORd)7N#lTur&OxOmUiMgQnp7mBs!$~!}Mmpx3 z9k%XCstrW*nZ0pM+*^vZ<%AUjkfhZnD+sJ}R^yX!sw&?$D$IW^Y9w1l-+@|ocAj0* zWOU@!Ej4`EFnF$sV;?E*2U>|cXj)e+Yg1gECE&I!ON_4xmt@9C=CkhSg@X)%RT`4m98twx>14pt0fww=h}uhStTJ(3w30({x)qQU z(2B#NwlWInFBn+V^!Hals zE139!<4C*{;hWuBW{)wc5s4cL&x3$__4F0XD=W#HF2`4KbO<7{F28w`Ow)CXU2?`d zt2yJig@8MvV89Xq#(jAe=l=j0zA0OLJn;SNMLcV(hPAmEshvns zgQ+K?6`Y-s$^O!Jw-#O>(-zpt48LrNgpM<7Al?+kDFhYHMjO<173SX@J}z3?_-|40 zbb6Xy32in_JT{(M#^OaDRBMs61O)CovPi}&%)Tsqa@Txe;36Mfjy8rhN3qyb&R|pL z9Q?!|n+@1!+PJGZZZBi9c^ov-M8fe@d5SqFaU_g(I0Na$e5Nlnl&>l}CU#SxwwCDh zy$9oq_&-|k;D*xTG>Y0dE~L1aIlDXpbMk-!gN&1d(~9;#9)8hQo*TUI?3S@Y>3wSR z$p@GoL}CDqNCb0}&!_2MB3o+aO@GQugABU~iZqYrUVE-_-}zI#Pp9f$F4Z843)wE^ zP$PN3Sj+-713VGb06z-ghG$-kT5{j;Dy0a?rjMxnCGqD?ytVNy&c51|5M1gJMlLk% z$sM@z)o@=tH$q3aHQ|2{?(_{OSh~B>bsa+P&PVds?ah$npPBdrkV!pzRX+{Jnq)5x zsclF`-{~cYK6wOhcKNahB;$^Q&{ig**7sVpl1B4E4YQD|A{X4fNdT$tI6ZkM*0^h9 zYGUIWbJp72>8DOK?<7^YzPXWJD=`e#lkLb7F7|8;kh$o6`e5|pvNYzBIPR_OgWkmq z=gsot1yhn7U=TMA#ASi#M{4ObO;XoTid(s3Qf=@M2RK33e?Nu?8LD?09R42h7OOU& zC)kpEy}|%+SZ8yE#&ga(k80*tQERCD(W|D*4fKLJU0`KpjT$qwC^^GPvyI36I#(~_ z9X4ML$#XTHrE#`N4qjqeN|Y_N6b=u}2RI`=O>}y_?yok2@1o2LZya zbIAHvnD~nNYn#au`sR6Hk~USDw+u)C1xOvbpKkqWVW~>@dlk*5vrF$kn5NwmgzE{f;+S_D1eOCy%^)e!tSqyqbQsbsda%aa!5J z14kRK31lRZxL}e-7Z~K4n#aR&t<}b*t6zPtBKy_|K~?9HojUW-SFsn#4M~#b*t_8Q zEH0$EFKA}5jC|XU=NKT29Aq5&8tyzr;MuRDn)^;`%Zr9Zf3wWXwG$Y?&ulk6_&-YG zbiWYUO@9PYSzC=F-wx3-d7n0TQU_7SfBjY1*!Y_7QNFrap}|+=!7kj!JFy=s@(ASc z4>jc1=Z#Kvp_`O;GxP}V-o@=Efnd0cB74xFp~wItmHz;Dr}Q;b`xZ~`zy11O@U1OU z;M{$k6^K@pCA4AnHd-i_0q@t+umHxX7rau~`!!V2XUg zIjtylBEK$iSjieAaU12PO~|ToY5}+=m9Gb_L02o? zbu~gxwQ6a@W~xZSv_@>HcNpp`BH=;EBDJn080N9>BplX=mSxYQ-Df$h`KHZik(*?4zAC(g8f24X)MfMDtb#!pCZ+p6+!}^A z#c08XQJMiOjJCHD9CxiKYzp-h+gLWZU~56H3h|7JhRqRG^z$c5Mbq9PmFg=--syX< zHEmU9T#gM#GbrY!Suv8Q6aAde%D}Q=^;Tnr&Q3n$lkoq-TnyE!iinK+R|H(vCf ze(v=|xu*G;sB~W<-s*#Ikq-w4I5I;Pn#qhVyVb>)P_LImE$#9XEjJoi{^K!7U0%RuRFJ#?iJ`B4!BPxx4sGJJ63fSmCb3rPPaw0^C5Cq<_8r^#abe28goS~VVAE= zn%UE5MYkIm9o(Ezc_@)iYmPE;ozc?boNRpSr|C24ek+n%O-i^nnGnOi>}pK zm`J;eU=HS@)h{iy_>8wRrMv)zX=QDy2{;)irz8sSFBbe*zt^MHv~4|=?yYr1+V)Zc zg4uTc;=|`G#~9jF@yNz%s#S#;-b5(5Jr8x)JV6hIQ%{!VGac33cDuI)5$`zMbJ%}T zT~lh7)^S4f%#f^(v8MBl%*P6&J;^;QUTR6BH}--`)>jsWMYnWf z0C@|pNf_LyHsy28N&7;4e7e%)@b;OYYb~h7cW|;?U0HBV}BW#d3C^V_t9Z5936>E%Cmm6fP#y@9e}=Mt*mTZ5xv>p5y|~5acPqPDgRZab6W{!B%dX(m$onnA6cS z=`AngZMTAaFQG@MYuco{rHvu6x`12#o-}oEvJwe$%g0;{^V1c^{Ce?rrQ*+s8pB+p z9}n2XqSiZ^6}A@nvH%=>+~v6gis61cUrX@|#|C?CJ_R;TzJ2ASyNk^lfsyHxgUIMH z#w(uHF0XY>GS^hNzMb!ws|E8g=oAH2JAowa=eI$QmEvP@(yJFr4@>@E=5@w`ic;!j zc!T>#S~@Mfx^R<4U#jo=cQM@YsYY5+jS7%=EZj9RFc4! z$l6r)_04$_m$izD(J#cSYjcKe9_vT7zST-HTty>DpJ!qOh*)GQ=aGZh06EQRcp+!| z9G4SGD@$?n#IBu)9m7o=*2OOVQCp+8`W|fJpTX>^UZY) zshf-Eb1GWT9~+|ip;RCZpx^+1`t{R3c||F87M%re?Iw{;rubo!P3GQa!I47%T$~K& zsm6FX_N`A9TrBW;@l3_!gsDg!fo!M>aC%^nfBkiv<-@N{=V~a+4Tk$kkC@0O+!t_M zpSnBX=Ze?z{@ryQil9qbq74&B6nP#18qBgn~dBSMO#ypl$DV{pg=k~?!*QCTD!#P2Pw!g+`H z)7-f@%M1cSoaB`Q*!^n9hje&~#zyqKjYvhcQg(nia@aha<0SUaHQ6}9#mk|c+0a5y^GbI|6xdnHXl zu9_h6U8dcx?|~AY7e0(Y>OBTUVEBVY)?u@sP-a_)jRLDEc4OU+2;(?ks2tZl87_3B z%Z{kvwM$s^o6A@&r&;7I(uR$8u$!hmx&!!o*Fx4hj;Cof8`XZ!c8qR(6Up7l+mghQ z)MWlV_n8&##1=A(nJ*;1FC1_H9hi_*l6gIGkA9-PA5ifAo}NX*U8wWdV|l2<&1FYW z91d5mNyc-Y{e4>16*U{~WzQ7s_B{U2!}_k3;y9zWSjC!@dF}!aD+Rjq=zDwg=Dk8s zvf224>Rl!k5(OBR;4aWIPtHAC2ab3=W~tff^6JHO_@fZqC^=)7h@mHUY-D_=@#N%Y zu{=?(THRV++xfx`Szu_@ZOLS1z~CNma(nt$1xGAI;=6xZk?!t}noG?^Y;7##lFBRN z7ndX93$$mfK#UQC*mIHXT=(`h@BI2*f8W_EZwPDpL@~n?M)KUqHix%D2Ukw#AQh%r9iU<;PF-@Z+`AU2LtvV;x{mehT)oB64V5)4 z7saE_c^zuGNx9M+pnr;)CE&+e=bKW;dShyN9cglhF{yKT*P6(?yX#O(eY-r?ea*&m z&02{qjk>t&&0}3$a(St4t}80ye!SM{8Oe;EdZjGKnkJYxdaE;Y*0e@KJefJB2`UL2 zZAIr*6LU&AD)bX^ipGvn>r=yY6}r-}(>hCgkCz6nEW6ZJ3|FHA2ChSQyS-;)TbfHD z-Njvo^JoIG*5C{r^GYv1ZaP$0cQ)jhOtnsKM?BR;y4v4*rE7(g9Mae>Q@XbKcP(W~ zrnlP;gjQY6xl_`uG9-+~=A+~uD%(fxOlWHEW#tt3pndNtrfQppmnXiw4<>ciJoTG zGwG1Kf_%j}$@HzAA6FL^qjCAGrqwifWQES$WMJ2u&2a{ge=383cweZln6+Is)XGcQ zv#o1@&46=IEQ}`kj}^yh{wjO>=R{H?Q-I`fYo_~htYO&o;)zj}H)bf*Qq>o38+bg` z80|vb88OXs{u%Mrw~9ZsTHJP+cu(V|kK$X) zNTrlXdcovHg?8>=Fskr*&H((6rEeKSN>h5hjHKH4k@Qu>=IYhZHaOz2V~*k%0H2V5 z7p-Mj_{uqFVdcrRfO1!iR?K>}z35P~u1^7T$>TISGo_u&Uvq!&2b8&w01|Mg@%`BXC|X_ABAscwu03mvA_F7clwpi z=~Z1CCQ%vX_+=%Oe7iHAayZR9UehgZC7M)`CTN5as&C4(U@^fTaDd*p&lQ&1)Z$HQ z(&A`Ut{@vcxo{nN?Bw+MTh#CerCsn{>+17c%=W1Sk(CS}R@^byJb*_%6m;V?^Vq5L z(@m`r)fAIxp3g-yS;W>B{!QD4mAuD0SwSkf+(|gYHap;Z*7~#>szCx?MfUQo70Si9 z!VXIvtAUai2d*-`t2QZaf8iv!GRO_gajF=|42C=qfcD5I2k`W-LGW&bkobWhxJzv} zNn~?#buvpULgp6X7bA8_0|0Y@gI;YnDwB*G+;&S-$#sk4aPU~^(n+XH*D8+a*lnSD zmFx!Zz3N9b)abeu#+w*Pt~~hwJ3PcH?Z>@fc(cJeo|QBgHZ~U)O>of( z*bpQELE1KwGNk2;WBaw<>9?0wdMxj#NF%z6J;h1NfMigkckyHqk~(Ddu9r`hSkcQ{ zn6vnb$zZtCtxFkg@T%Bp2y(crIg58L4}WY(mlcB1CIXyopsuU z_M3YWiRPA7wSD=uUL<~V)4vC)BxfAgFSyvrb$e!tiz!%#5#bkUC3}Dl6myK@9M)9g ztz?R6oc6cjcavT%Z*Os?+WCHTT7v>q{LA;U+%Usu9Q5P0be29Gzk=c`J86E|r(F=! zKH}}UPV9_l9Wl@0)~#CWlTS2p-X)7p@i&sP$+|EB>J)SX{EFJswT)KZdzYR+Ib%+d zGexzAa=UUk?hilAQ=H{(dKp<<;&sc2Ce)E_?DsM0Z8jDbmoZydi8CaK#!oO{@G^Q3-8*w!uCe2b%cV$-x0PzEgKzhh z#_mVcr+U|GTPx+6tssi&rZ|;Q?r7K+8%b_gC!ogzKT7A4oK>fyVsbg1BJHm{S*tdW zC;B9?uJ5!j%Lr??F`uCWy=dz~ zH5WFP@7K(TREtc^weVeyjh&_Ep9S3T0s}fTz^}U}0Oa(?B=hN1S5R*gc$rAFw~{E? zyoHuBsLVkDPB1f-WAE?MrMS2r9@p+{XVe}IK0>m_8*4T;?H~cusUH6I&uBg(X}m#w zJ=u~ASjz=me5--O?c+T`2A@YjHF!{jSq7v4+cdK2c!OIaS&= zHclN9Q0NOBr&$EBy)tw7#^KJpB)FIuQXfmk~ z`Ll=TaUh-tun*Bz{oM^AHtG4Bo20be}-cK{96{l`c#fD%FODMe+#_Uy=y2|yeUpzwC4V9^u z;4<*lor}4kw}*BrPdGJYBW1D;S>2xWni?+11F5XrqubJ~26bG6kyS1qlbR4SK6AFR z?j+j8jtyPBn4IFVuOuS2iID#Qw;21>6KV%>!K~@-J?f;FSnpbLiL+nrn~165xZ9jI zSG{w>@OL!-0JnUk1J;KS=0>#FqbHiK*Z%d5CE>~DtI2uqPn4cxeYFAWRPHbO;MPN5 zA6l;^=5x(85_yZ$T^8?Dt|S|J)n>c*s?y!n*oMb6oYWIf?@coX?@VP@@6AyJZ!YSK z%*7)wI26sMorFo}d()C*>s4@bPE(Hb#%RZI!@XX&ydxEp8P7FgTW@-c6|u0_n~w&U z?Vg6Rgp}bWPbgf}Th40CS46j3#aF8_F+J*91kIJYBfUS&5-tx~$`0L)IcAgl)ix^S z=8xK-boQlNf;l|XmACX10ac;qqLL48D@CK-gFv<{y(!#AB$0Xwhsn)r%>qXvD>AD& z!l^hcahwmpAIhuEY*mgr)uu_Fv7GT!Mx~A_SpHmswJdAU;;cmWsuQnDxVdG>JXBCc z=U@R^w)%eM=}_3r_Rj*WMhfQ@Vp}sC3<_$-e(ot;ELOTuz})iSo$;<9f0o1;?BZ!I!U zHDzzOIW>nJ>~dT^vdV^d)H}u)ZdEzKBhcXfM!Ag_;x(qL@fTL`$~vjBw1#0L9ByWI zRv-ER=NZo#?Tfck?s^n+?tGrLv1c&A8Lx-FD*o2fXulUUoi^4cw$n75($hJSg50Tv z)P}}#GE|HM>N<+j_}%+izlZ!u;QM_Z^9D6dLgpwJK5Dm^g*Owl<7w%TcjO)`j#!FX z>#<7b+4queRs*GZ#=YTJxofE-ZH7R4j-LM2!u&Y>r?kCW!uoCAxhmU4@};!b)3Trs z_GOjH1Cxd&cmub3;Xi19+O0ert4pHkQT?JjyBMZfBtjF*aK)G~InD^^KZN~jqMj-= zS2|?Q9Qq!!XC|9#r0Mb-%W-dT%C_##2bjbX04KS@uRHjW;@I_%hEZ#fUp<|ZT(zUe zZrGH{B?^xl2}% zGDi#iqSaIYFi%xw!O6$WMn3g?iQ=yrYByS3O8S8^89|aT=Lc>Z83!G4)BB>gejaO| z*m@qJsA~5&8imdD5?VoNV1=bdWCkY~;Ric0jDSut(!9zUoaH62y1ScH!>F$_+J9;P z03BT|jnLCI8$0_JybwbeEgY8aqB9~p0kxwfjD{nUI|}*t!aDu$hBZqoi+j6&?I{(A z7R<7&YBwQFkVbKmI^zJ03a_piV@Q^43%4yL+-&1d3V8;+cF*D_w=afc z(d3a~(iPfbvBsdg@CPTJiaU?H-!+A*YBE{rw^x!$<;=3}R#3TMGxLH*NC5CS&MVTy zaUEKe<3f~d^t-(#^IVnn<>mGDp@wyi;K=bLj7i??0AHIpQI1bx z#(k_wWpUx-Hl=%TWYkOjp3>eeq7)|pmLTIGZag2U_4(O3&hn`J=daB4?4$0jj=Nm( z&Z~7SOL$_IXPeLaY#@2;nCc8l<=LK^qw1EFjuca=_pYdz199Nr^tip~K|C91Le6B(;%{ z?FW#j0}Kv&{Q(ur_;^Bl+X2E#8yk5>NoUMrrDOS`&)=H~X|ZCd8wqqeCd?UlMWndsQu zLCHJ;&1Bo@aZ5GCFkDS{2n0JR;c_}EgPsT{pVqmYQDruk16R|U>3WH7Cq8tnNjVr`0_10b zkF8+uHo3YOTdF+s#On4s3^sRxqnpk$0hEw3mEfFo%XJ6Wn!lx6&wYIvyI5qkwvS|C zvANrWv@a!39OwGh--xuEOG}7E5nG}Vvb4`J3LJD*KCAeF?M=GXtt8bW(`>C;3u|%< zP3E(#l3+G6atSyXC)2h?bjqhRESGbdM@-iGRkUkqa}~LqurSFWStMpdm**Qs3uBIY z`c`F)zL_SqaVxanY*NxU*`IVEU-BM;r{>-9Ueci&kWK?awHP6va{p>7aoJZHRY-^X~{{pa#WO}_a^ZcrDnb` zh8X4hG!e=qXN^G5$_U8=IUh`M-nwl!#z`iy($2zkl?$hxaj|1U!x+!q13y!P(!8_8 z*k(r0l^o<1ogC#sBoNsAdB=Zx=yZEKTQ~y6ZzT6p$af@!ukKWUGnLLUo^#aZyDH(} z>BpPU=hdB8h%^zcO>Z1G(cIiC2R8`He|3U(0Dkc}7{*0%ddGsiM=HT+g}2U0Mn)YaZ{GeXA@F^ykWDOik<1g$%wcfjATT-3 zaLvX>bLuMAm*Ia7+FskmsahYl$XG4UoUTp=0|Ax+jz0`~)kRAUdOPc>i;KAAja%(F zW>y&5Ry=nD955r0J!-6aytca1-^`K;V|CiGppGrek&GUg!1m{geAOj)cYnA!wYk}^ zjh9XraOk#xMhBKY%mDK?sNCZNk}=o)qg+q?EUys%0Dk-Pm;7s}kHuGa7vE${d#SHv zw^<_*v60DaoQ(8NK*k0D&!r~s7Jtw_$MpXI;%g|VtGcmQec7`2wUa+Yt?stq_pLj7 z&B{5hV#4e&!8NNKpf3jozh-FWIy-yKxaPEBxZDP7mxkk}4Qtw5WB^S=MWbXgmCr*> z^ERHe)sJ#y9Eyv}EY%|?d!q^$Q^NJD7CN*SSPI~uKx;ZlJf1Vfb2^puT284P+YD=* z?m$RBxfP^osp-_0E?rMhhir?tHCpP`h$j`1rFf1jmPUyAifeA;QHEe}<9Bgf@{FSF z%~X_;&fQzOvl_*`Q-hk@)isN2H6~|N9;UGFFYT^AXysN@(1XQq7|PeNlakoyucRnV zWyy7p^|z`-tfiNl=B}eQN3k#U5I#WQtZ8zF9odD)WkDk}{l*MM(n*4tvpIi6kX?0;#Yn+}6x$t-&t3hf_9mlmwUv>{ltqO)` z3c-pTFb9xvoFBlC&aA4Al`Jkv9EZJ5vmU~r^2n#J%i5yFT$bxoipJ7gncK^THQda5 zReuQhn%l+t4W6lLtgzfB;H}giglE4wu4Cd}xHO*%-GsREuN)64Bidv<_Co4gBx3;N z=D1(kug8`br1)kli6ov2QFV#131Y{104Um{D}YHOIPF~WnvB$0XbdX-@?BZ;@AE?EV^{-d1HADdT>v)?8)CF z2N95d?id^r2`kff0V$~3{YH~#(@MT(2&q@)6^&=&3u!z-d!wm8YP7eC;r%wba6j|| zQ)-u{-%Yu=7?ER*m?&+#xabe5=}eR7PmUiMn@RY$py{@AypUM2k>f=fGL(@ptnBz{J|6wv1X0EF}6wxM;Y>QMN8crTdA zc_8zq`=vqxz{uwdfslA4a!q{?@TcQcy6447{2yf;ml_qVlE#+|Jjq&REty>9nL`W= zF*w>fXBEdzx4heXn@ToF_NKAwaaFFSx6{!c=2eLvM1@OtXJD#75=CX}KM<|7zld5- zh6<@`=p>ZNe}t|M(c3u5ImZ~rb6!9DVf;js!ulScqgl-icb79qal3;eY;ej~o;ep{BRo)qY^VWOEOwSu;~66(o;e;a4MpE`UPh0;m?O3^S-SPq zT5g+vb>>8mYYCONumN~GOL_uFQ(ry&8vU#^y+(U&GhUZ{)!oFRJB7{xX-?H{I2%qz zGyN;aF1|c!7v44T<;~G~ZSUX6F#V*MQ4nDQpcyO)B=o@tBdDtS)jm}yvGTo|KCAd+ z@d<8z7Dcb?MriH+(8#wFjiyv-K2kDHc814O!V||JdHmnCHkYN{c)s4{owi-9Hmz?u zI7A>uT~2U7+Fyf%p4H$N9~SQH{tVgZ@lSC%GDmD?lX+Fz%OO-ON`aLb052He=QZOW z+V?u3=(!BcE=|wMR({pIkQr!JJ_z&?Q{6zSFYp6<|OqRey zDItPzIQf{I;A1B~{AUA7KaE}-UmI#Z7@Far)a>I9(MY5-a9N1s?u7vE8SCv|8Tbd| z)&Bs1z7K0!eU$d*?Vu4`hjL_80aUt&&pF(7gO8YyyjPKUXT{=cj}U7qsovV(x*ETd>t zo)sXf;}{G_@eKPCE1qAB+NO=;YmXB{40FeK=EWYHa>cQ3cdEiz<2zPN0stdFO7iyA zFT6#m!+m-byCx)%4Zdpwg6u%~pT2wjYcs|kBzWO;1fdcndBOQqH>vzOdsoiY#a5|P zmNL-jP>VABNvgZuJf?3n`Iu9^i|dwh?4^YKSPpF zB$7!alUefVR(gev#pIq?o^7TP6KxT)gTWZV=lRwD00LRHo|AcdFmTezzieys3?7`G zbDn~;om8o})Y38ckzyIHbhf(tJf>TDm&@MRA~1252#e*x<1t0^}SHa&mfL^sY+c5p{hO-glnMDPJx}&fvU*z~F#D&#@Kg z8Xu8o;oEuUmFy9%_nUao10igbmwoB5TuJQ5i9fItTrBj!Ae4teRGGE?P~cL%pAPfyUI zTNqC(H6Q%8qL z)1gaSc@t3c3l+;c#H=zD=zsXlk%MBfHG=1BbpmHuQf|4CXnJNi?V>t zoB_4Jyh-`HXtUXp_NdGh5x8Z5`EWO6k3u-jZfcjYZ7FDDxxJco zSehp?c}vJgAm<04&~(NsL-M5i#e-h7ZZWzOIS(Qs3N~fZXp12)r zM)O{aT8?{2Y$Lr8D-}!yRZvb&SCO4&wzj=8x0ob1mi}^O*|J6mC(s_h#h+@=*8DqrGKc=t zwbU>-U6=vNB92^VjN>>RdBNI z<$cVhEl#p6V?^;JZKbMzWu(lqDoH0Lz``yF&jT3i!SBU=WBU*I1$B=Y_=8u}ETNj> z&q$ip*+7vU5k5gNVRi4Yu-s+*G^3|mq zPDoSF7#QnZ*qkr2#5k{q^gE+2XcRnA;-%D~f(WvOmNouI1&r!)O3(|N^ zZWafPWN0Kw6o-_fuw~j<47PvX9+jMyDlmHfr=2OuZj1V4;%^Q8oo_7d9yvqE&Px)Zh8SS-Nh6QP*13-tM`w4e zBz9V$h1MlA%`>9y3^)K_5^yuX&pm3MyEDUm6wyWG#{`6^AZ^+~+QfGNp4H6iI%x3~ z(@P6I!>akRTq|vfLH_>$18#B#f1P#FqSU$EjIQ37E$UhXnxr`B6vS zC3`k|XFQI*YroPiH8|s0FQe1oG7zfQ3>k{>MsxEW*dI(*LqghSm-A>*L29uCCn1AK zc8`>=&69)ABaeEusCa@4tt!^)Xwu;R@NSwMNt~PzcvH701bXzTttnPYDQH7vo-C71 zywwWLryF;+VA5>I?~35;RX(JV)L?U56y7ks{?!j}652|!?6M*zt{Howeh(b^*)D*)r>QZj!W4oA|Z z*EI_p2_>|!O_wqSQ-))@Gmt_7jC4IapGuEy3v$9bzP*I3Z#CVmt*ojd33Ac2LCma+ zl~NAVjGl5cz~u8@d-l8k0Pb9W-=qHk#a7VN^<7<}euiFC0w;FzAA1Ds;Id6V4E&!NtEl54x`hYN0H zk8X0ci>KPpYaD_bi4b$X1{n*VO7w{=#D913tUHTvw-RlPeEo4EX16-|Q`q1d zz>cD+-D#w91$Q@ELF9qOQGGLZL96C7aX3?ERk;-Owf3%_Z8;T=MN^s>;8t_OsLA51 zn4VKHY!9V7>}_0U!oQ4Kf5e@8U(=u|6WLA$vM~YWi)hPZJ)1oAN`Ein#3G}F;IPz%IBqT+(#TQ63FU} zD{Vl>8=D2NJ$MBD0j~qH@z$Yz`$9{nz3jI7d=~Rt&8L|B&zW2Wzylz+-rdt3`hewG zLf7gpTb{`bd}pmmb$M^6!m`}TkVaW!4lN}>k3Klvc*{;*3BiGmuF^$$B%2vgWdH)9NFh|5_v1P2a;;u8mDoi# z?2oYHKv}X>1pD0d<2={RIuFE6M#JN`h^M;U1%>=yXGC%eCzcM^B)9_vF+RlSXykbx zj6O7Kn%1Fw_VdXMS1R$&K?R*zR4^m~k(2~_af;(x#53E-m%e7{*E@G88-W2xVT>L! zFh($OUPWwjZE`J+n9`S1-hKl3%Td(+IryT&>iuVm;iQmmnUxqWFjWpo!u02Xo=!RS z?-zVk(|j%CPY>yG+(folzF?Z*HsB7-#xObgi3A>m9=un{&3U_**zXLxcHO z=GLR)3(Z4bww_jzo*2~>smp8}fLn~7+3V1DuYLG`@#;u?cdA^y*V(klbbXOp-7+S8 zyp=^Nn0W^7r*27X*F`+RNxp3my^~7kW&2QTR`xJx`gPQjS~QOG!6HPmhh$JlJY`4C z*RjS%Ij^5S4C_}qKgF#hQHn)BXuJ{Jo$3^pen2B59XTGqjdMOS@iwvJj}S+9tX?(L zkf>Os-oUO-a6W{e&Zg1lhfTh|9kaB8LX3uaS>;@0D<}+|^YXWR0!Liq=MM~P zH@3D~tb*z|?WbfCK?vqj8*;XAakTmo)K_LF6BR42gwu<$=pG*U#jNR<8pfkF{E4bd zqrIZEEwnIP09%4sA~u8^K=z2hx}F)o`UvDD&wth?`-40wpkVtBw{}` zR0i3Q*v<|)12s`^omgGOpm~z*X2vo?yhhc91E^w>NZ$}U2K0Z^p40racUhpjrgIB&|qoREK{Jh{cNy*RT39JiU zQfTCkXU6%9DP}kejAWn8)m!VQWLAlv5vB`m8%Q97$m`y=7sB^i!c97@2K zkQ4J`&;gIk(eBfV=IU3W)A(mqmodW}xcgItjY|UCc^k3s$N9xg;yn(3?71SJbWzE2 ze7R*(H|zu`;Bk|m&bm8ag10^hlFnIObrQgb*%Nt)zyf)}3J!Mq=QU#ESU0O3gqGOc zq?>e*q^ZH@KF6Hr)Ee=sR*$sirK&fKRkb|6<)nt*-q}OL<(2)|10al?^N-fMxMS2V zbo*A4-ZZmc-anGc3t(gp4Y)~eg+OC8;ucr(FxOEtu&k^<)ncsblUjPu^KZmcJ{ zS>v>lC}oV{q?SUYDHs6f9k70)ol449NQKnrCBFL{%N@jCYKGq%I__*9gYf6qr7oeW zMJ3#Fv&QkPn;J~6SmT0wfzC5pR&Ar|QX@E)=Kc|kZQKDIU;szT4{v_;ovCRBV1 z1dv^+WMc{dstS>}0Q4vMA4;go9M*a<^ersWS;m9y^U7wC+)0d(FmM6xIQ%QG(Dd6# z{>?0Qu(j-p9xpk!F5tzE8z&hc3gR@)KJwpBiW}=ZjsE~Jie*qa<1Bh(p7q$?Nqb{& zWhJsD#Brc@wzUEFZYL)z)q%&*;<@W9wG{U?i|Ss}d=n>#rk(CQ+c!p28$_A&K_qR? zGNgRnMtQEs#g=i&V`Zb<+(~Qpgvga+3ZrNOFmeY6zau>U994&mu4A~gQxiipczF?& zfiVryhCkUOj=B6RWp6HZZCN!d*yL$04(RfL#s?cf4~~TAgP&k9YdI$kThSV6xfSG* zXzgbm=C}Qz(frF7-X)ej!w0wC9eUMI1xtIVXz?}8q}O&eLGau9(xAzi`lfyk~WIP$f9ckTBXCfm8Ed8H-h6?y;x#~SVTfbymSuIF49>@C#Gsoq008vIQ}0^gpC-F#X(y(p{P$XtX{{ty z{zbeSP2NUTSMD)G(C|l2r&nFX{Gg#Y z85lV0#c4`Vvv025E<~@yjkZWZ-leIr@$*Ce>~wi%*`?NTDG=(dU-zCm7nma5`h~>0fn)ps=)*Uhh_O zUCEy}d_mIw%i{~OuJ3Pq7?UcD-e4<+4bBP5o~xShZyGMC;Mo#25yhrt$Uc88x`0m1 zdgq^0gI`hp)LLq2mi`~QXl=u@%L54PSoTsrLKVGm%j$a9&w8|$8q_TWi*G!zuyMQ1 zif>#1a!1tqSD%fds;k=eJ83;Ed8+v5P}HP&Z0&)W)C5t;+5kA`jCIe{^GTriqE9mA zH8hsWCU%e@QS&eWfIH)ZjAO5U{iU|4r)iKX{{Us$tj1UJk_?4FPYlf2Jn`4krq}h& zShCy0pt81+^E*a59T8GzBYAOkd8FHFHu24;TnQB= zcSU&cbISwYo-xS$1y+;AnrYXSH4Qw?sNP0h8K&C`ur7eIw^rjM@Hy$lCy3oOh*~JM zJ9~XX@@R~DY-r97NFG=KU?04A3&0;%=U|ZCM!bok(pCPe;Ht>CN8{W0{ zJnHjWoV6}LTGI#HAk*xxKGy@r%`9qg9!5N10qOwB&%dQw)ij$84$d}~8%XtByG`V* zjJtY;7y|xvo@o0;gRIGjGP_W0B0qC9!_hXrY4oW)vrU6PUc>>;wh!D^5^pI zFDBwdNcI@;az`o&1E(bNIp(>Gj~ZO*HnQo_&mN$RLj`pXRNxYK5!jzj)zJ8w<5ASD zLt9xCi(?>-eto=vNN~fDPB3dG9}QS(I#s>(-HpYxi@D_tb0mtrHihAR^PZh^MHdAd z&2P}3HglRwa?7MCp)(=af`!4=VuDh}?v5zaBzyF=jnOIaYh)$Zg?MaYrbPcNwhJ@KP&1GBZc4@C%PM4c(p>5HdY!e!h_p(VKXE`3EejD$hXnNwuEtrQ( z^Co6#1fkr%4hdX<0p#*d3Frl5DN};AwDcOWnWgxu=SsL!XLsk_Cg`_C{;kvl)Q}5g zfOFEJ{{V%Xu77qP`{{qiv^1X&!=h_To9J(3WMat58Ju;%3@}Ob&1rvYANm%wzx)MX z{wAhg$-An2-+;6~RpilOW1XOybT$hXbAml9&b0pkiqTxcSj>v};BV_#&GA}UuFQ=m z?vUfl2e0E_w#O{QNwn6-obmX0+1&KY?G6i!l>tp4qS#zA%0pZ_>nl``U4RcDgxK?c>11y=waggI3eo_bKD_6q)Hq-o31lKxD zt#NM~C=wJZuevpGMtztMVhF6%dA2W_+nsEnFx-dc?kYHLoHi?$@rT5HBf+{o_Mv?^ zR<(^;;0NZ*XK41$59L)p8T?hzekfVr%cQwxv$$wvM$T{>2V&Lcn;rM= z%J1~Ztmp8!(^h-9;zTT%cR_+jy=`eXit`62k9uc{wNEEamtzG5Pg9EOrsvF{jHIJy zCUN%uHq-RTB$DQLh7?S+lbk5}p5I!{_-XMn_r?#Z8;c*ZMQ?J^Cf5=xU<1Ri2dF;a zgI_azdGW-l9QsAH<(p}hglBJ-LktytybwqvWN=Sh)ZYSpX|HIyM4x54hW9|Rm`#5% za?Fwll|dV)QdEe*9Gvt6_(w)1Qm0?pMc++MDix(tpF3WM+7oz-PqVY0;_mJnc%XHT zR(BsOHa3IkPT~30(Pk#v>NhFm5nlyI@ncx=SA%?W5qSg}j;|EPX%vM>w**yZ5~x6a zL7$m(oRQYPwfKAS!$iFBef*aepJh!8N`~rowRqkK%H$+$uF3`g`^+(h-Ho;4D)5ug z^Dd>eUw+Nx=rpl5>T zXmgM<2uu?FvY--1c(0m&Yp;v2_^ZUz=(=?CE|X(yk~_ehR}qJwh04}djnjD zgjLq^TZ0&5{7udmzCiWntXQnJUM^dYmPC$Ka&z~9J$VFYoOB|z`oy`ldaX><7en4Y zCj4@f#@fc9@%^Jsx3ry>*4<=Qc)^KdY=Se+Gt-Qk`N}dT)n_tXEym|0GN2gs0D5!! z*8c#Arn9)Xp559wR#q&#o2fhlkUyniX*zkbU$iXdNR$#4fMK6vPx-|}>&mCSCQ^b< z$hChG!6HYTNPcD{fJr0i?f7P>%*h;)tc~Vd#eh}5V12*+ew0Y^NZxDDhK%IK-AN>$ z$Aeiqgv!eDW^bIf>~zN+e;SCcZ3~mJXlfsCk?tf`Ww&-@+tEN69mn|L{V`$AbS zSht%TS>9KvINE)W_*Q;{3ftKULsWMp&bdmmb{wXY+d;J1|9 zh*ng{>x^{i(y9HH{^C^A?PF-&S2Az=nB%EFr`oMOH7jjdwWmrhnWPM;Q)o~|IX(XX z4r=A*W{j$BY1<@7-@<+B_xvNPW2D)~ur1^x&$w*v-<8P&2eAArpVY2I(+n|=c91F& z^A1ihYYK8z9pkbIrnfS+87}W5jv1abHsP@GsZueV@qwTIyKO?sgy&6l0Qq3g-5z5>R90ZCvB@ zvLm-y9YZ?D@`BkUW2Q0IpA=VDPYgk~NTH;39YDtej;H?suU2($4eFjAmdWp77Hc$e zNRi2a;8BL*0*h0%?&M5H^hS8&fd;Q z%#n|qb_ir{G0Ef9em?b|uU%Vijbc_wc5MFuS&*tfgba5)*HvR_1dDJbXPz>O*?a&# zVS%`HKT5#YpnLBkRI=SJ?o9-^4iE)34=89-%G0kh+m#owC74A1LF$ zW075auXCCsuV;qT&E5;gS8jWsUQg*%qP%;3Kns*vB!}hBarN!ZOJR8Sw$fii z0?J$E%*+^qF_y=Ey?yG>gkzV)9w3g!@>rG&iN0r$0KLfr9nL#?){otbZ)9J@(ncKD z!dv8+k`Y@9GvD63EkD86mji9w5+RtZj)&!CiwZ&K)IT5AwzLlg!L58vyPn@hTfG|0 znXY3~vN+B-7;rQ5e+PVeX1$wQjcoLtLh5L3;+8-H84xRgRYnvRJbbD%+n%+e1qUF4ADR)->p0ReLS#Zev1Km?_Bw;N$L|ar{;7z7)}5l5H-|_iD}(5&K??NJ$0 zB*M5j1wddM)G;KHpS*hYuOAUnwHQ8ysPsg$3ssU^lN)YDb&g317$kk;l6qu*r1T*9 zEuN34&8TV@Z)N_5(MUY;AdE~2U^CF|Y>WZz(;~W^Tf|p)8j8m{tHEaz20$rgQDiuZ#3+KMspaVSVFS_j6r#6VP_B8rIt2RCCsPH zz{nUQ(;r@PD2?YkwPuLdVzrDqq%q6(i0$Q?R^m@E5L5$>8;p{Ao@++mSDMFFk_mp# zd2@77Zotmmox^Du^v^xVrw}dN7J9@{`Mcq_3=Cm*=1r}J9*PL+dYY2=S+}wB6{A`1 zfpGF5`BjO`eD%Q~f_U##+mzOZ(b)78Hh}X>BsP}ET(&F`r+|In7={3Rrv&FX9l1Sg zHsa&syN2aIQ)L*d-{)Gu_y z2%7rdf0WM~G-yPHf#aq-93Hi|<2_qa(k*U$wgqk9cbu1-%;&<{%Cs|M;S zT?HMSiuaR4G@I{rKd@R`&BeS3t(3uXioo%^fsT4)^~GOZYSz!}pV=1b!sjGL*2>3> zm58(3yJ-w-IcBWb{2;T?xej%zmF z*5kw$H!{mEZ6k9mWt^h#&I1m5D9JwAs`>N zDbs2%V+SiePkgbm(lk36&8$LOXZ_59t=3TrvT(=%0005%0r`o|S@=WY*(dSu_MOnR zHj18nUR0te%sd;S)O{j*!M5m)8iJEsCYj{7B=%;GHNX&ExxDb5=2f31toRvKa6^9 z?_WDuUA=a;KV{shvG%d@=FcJY9In^sP$IM6pQjZSB0TwSB8HO2G08j-#$KkG))_zloc~ zOB_<#Lu+$xO#WY#Hb@^VafRw~I`pxpd8&6(U0Gh}+F?bD zOi{XKk$y$X=K~`wMsi5W=C$>WA5w+bM>V^7(kn+2#sP&kkQlK8X$Kq(*PC8ZlUM7u zr!7x^QVm1Ik!W_4Y4*;O%&eh|7(`GGK^Z4J5He5ltgrY_(dL2(Rx5(>j&cN>l;b$< z&N}jOo+}pq^If@Vw80hK#fDwvXyP&;BO9NmV7+}aTx`-h$$PThVz@)Q%#4kh7$=;O z)6%*q;NdlSZez>Lb{eOPlHXZ}Qjka*NYz$lnZl|5A^n9G(S8&3fdU1j2_}723!_t!GdWS4x^)s$DDZE3eK*1-znZa`fw&LrKansu; z9DOT7CDyH7C63bLQA?Hbwqf%gdX7mTb-_6udEnNcf_x^P9k>3|p88u|OKE7~k}bOz z9Y{Q|Ju)yk>s>v*xvgEv70r@a$1@`-w<9KQpqw0@F~)Py^I27_ljWw9OTLCumbOMG zg|r)~Be<7Rwrh5PhCX3FX8(;qFooq$hR$g0vWTh!;b$5Oh({#;JTbXa8^CwXyoHq(^4+Q6PcN}!@ zSw2p;{{TSyKl}s#0PEK`tZVjnnp@kW%X>3~k*DEUzR|b?o`C15=Z=-k`4`^b_whf* zy$X0~TJYn0_N^Hva$+AH#FW7#oBF z;1k68Afsde2OV?mSU(p2Cs^s)KZta=ZlI1>r;Z4wib6}FR4j5h!1;$<^Vp2n^o4xW z45>;=SGCv8AC^(T*Hewvu6s9y{we7mANYl)=rBZ!Yk3>U*H=@ByrQlYj1lvC^&W== z5nk;Fiu7rw)veOzHkV9|B)keU<*TV>C)a>$^OHsKg|&yobl0Q4ZDJ3z{iaOGwWD%Z zDrIB@{K^O*XASkKJTvjS;^V~LCX!Im#jP$Tw-E;tFhT;mJBHc2la@L4&tJ{)c*ss% z@|LZ?!2A3pWf*eU{R`FYB-JgTxUiKPH&G%SV<3!-AEk0$I`QqVg!CEhbvfgIWmzIX6{?RBbY+Afu&T*)29`c9&2#)VY`KQ1G|Dh2=`k?Y5) z;Qs(B=bm|$C{ezT^EPzcrS7{N z&av?mP`8^?5$abKca!W{6kX000*{*@{opnr8@^${8@7X8-|UOyOWiZZF=-xPxJl!j zrMyg$`R(Pa43^}u#~!@nJuBwR$r?!!5Sb;6VdQi7vU%j5a%x7J^TT?el1a>L$Tr{- z21i5B_}7UHRO3Hrj<*kX$E5sZ_@Sxzm&TgOH`6_|%&^N6k1NboBWk++(#l3Nj@ccn z)BgZuPmFqRg!L=U8tN@dJ6YYHHv3PT3g-t5nETuxoMSm774o*JsSBK}JZ@#*^8J6V zKRW4jncGLwmUNww$tuOrjkxE7o=-gS+Ov&$)0ee-vXWM@`nj&_)*c)1-PXB(I;Nj( z16;I=$tVVLqrY#=SIZw8ym@T@02sU-dlk*h7nbbtC)w>5=?b#4hm84(9ERg*Rb@Hd zoB>{a@q6Q>-ZuDu;jJB{nk$=IWqD?KqII93+{&t1PX{W*@OojZ!8M&?>T7#j%Zte( zF-WF45ttM>7+|9Tae?VwRdLDBmJ6|;Df$!PAB`89$HmK!3fx)!hfE6LCYDI|hY1nG zZFU0(aO!z*INM&U<6EsN$HVJ!t6gojc@h}f-{yH8m?%{M65NrT3jEgiH{)B63wXNA zUwiAh?X2Y8Ik%Xvng9*7?m0Qyx#}_NTAvj@FKRv_(<9T{TA%E&Odj3_loX36Il&zD z^~lG5+DA5U+gTFnse8Q&UmNwySv+wp)(JJeq;ATqdV|i7CoLObl_6NZ4l+R*=BxO3 z#m%C4A5pQBXt%zQyqh9b3_v?_!{#F|!tenZ`fHLW1d4z#lXC3K4W*QY#t9&9VmUPX9~|6h9u(B9Z5nH7FRlEev9aYw8IIA8dC44} zgz;P>++0be#_@wNkxCWYoPV6v%Q%I?pq-|TQvq3q@^E<>$FE%HrE~K*yS82RHFYg- zQ}JcO+}>Q=UR~WC(_P%kU7LcWvG2(0eJdhmhe@}#hGuZfCgx+)IXL?9Q(b?c+uThN z659cdpbkAi#~t%jbz)>_#886mBJGPgZbupS{3>GkpOE%0$22;n;#|n;%A^(CIAA;U z>*-QnYL?fUeD4~K^B*j21RMe~NgjZDS16AlWlVnZX26Xy!~^~S*GXWus;5fN`lKx+ zi}#D3yx?)gT;i38S8qb1T_^T*MoCs#WBCHctm7Q_{AwFmT1&MqR0%lBxA5bNg>5b& zyfz?3Y|PyRhkkL9o;^Pb>MZ;>_It>#36YSeEO5*S1KXj&sg$EnsYy0nxV5&mwGhWU z283;811fz_zc{STKTsAnHy(M|{#hPS=NTZ6!xdLp)FqDd?NQ1`{{WGOF@yy4z{V>u zY7x&OfXWrd;*7mJa^Ib22s>(5HJ*cB;^s?;PnZGAup2h62;;9$!<-7&n(-FWD^P%| zMo#0`c6tt+^Xc5yKc5xupvxvog#@<)7(M?04*vBG{nM?~QlWNS;4V*6dv*5xD|a4d zkS1wq_ORY9%iLqh2k)|i%76O!tO=nvS5n%-k`k?kIplUVzb(hvY}I_D9He=;T<|h* z$DVrCO&-!&wP@uVB8iUa2RZBCj;ABNH7;AoZa1;g+3AgDErU;zgClp|Q;z=ib6jhi z*kXshg6j7EgH4$BGNM;#7(bgq}fQLXyIe2EOK!*Zh%#~lVp#~JDGTKX-%lcxA} z^-W6FD~qefF( zB$Bndn!0zgXICQ2r>3)WGz#8qK@pLT^#w>MK>N5n9^9>QntqaQ+shFw_kn=7NZWJ2 z93DEJYrFAXh27+4J6V|>5;NvE%w^qy^7lLdaDDx$ng@YqvGEO!?VSGrX|h0~=tl_I z8v*j;lg2TY{{Ru58a31)+j_sy+Em+Phk-O}e+gR2sK)Z#$s)}1M=k=!K4POC4+nAe zBk=Q2A4bvYmJr(?DJ6?$)|@fgFxg^y9Ov>pSGoA27;iM&s887~Ab~K!n>oq;>h&b3 z9eKw**NW>_5MR%xqFP)mP|VEpJB45{IT;+TGJgu+4N>!RNsgp+vO2E;_;99@ZlP~9 zH{Fmk3}E_X0rPSAj`ir?9@BKs4J^)+-QH?^I?XDDj4$2+QL~-BdJ~X2;+gRK!s~0H z7P`Ij;<(StaT(!ulfXI8%EbB_*10xv`4@4#mdu4BSs4uM*v46o2V6Esr{Rx2bsb5) zbToxEvpo04S6Uy5t#t!-w--0-3yXOgLUyrb@nI&5%EAk=NDEBe;e(}#8Qb}#R+eQ}FYU>ll98334;gH!K zPp?|S@fX?t(GiPPYfyIpYn-PjA+`Z4Scc#jI?sp@!NA^4{g++8#L6gZxKn z`u!^p;%1p-(CRnoJa?@%&&tqA=Io5G86A4#2LSr@6?EklHB8b`e7wh&-uarAy!Q6q zY`T^dIgV(wa6~sM>6kN?jyErNxvr;LyV5aLC$|(s+fqve7z(2 zoY8~dxq;yyh4$u);GFZG2TI4#?C$js6Ii{JmroEz zjg*7DLI#P^wI}Z(bIz11=^F}q6wn=V{$M2yAHi>}P zyGIY5qa^2oJ*uv`sA<}Nkv#fTGZmUyQKWszpOkOs7$go9`g9nrU3wUGsM^s1w|N&P z3Dt;lGnO3ljCA(RaQ1!~nroCAzMz)2aLUH&DCEqNZN>)N@G;Pinf0t`>fENwlu}nL z_;L|8z04M|MJ^qe+c{c9Ps5z0h?zBOn~rZ7*7jPqKqa)a+7O;u~W##kqC@ zaG>P&0|%+=-m#6`xvW%Tk%cv_{{V@#D6XQmlJXfoVdcIKGDbPT0CeLx6&L&}S@jFY zZDUbudl=bDwY=e6mB~05Y>?O-?dh7-Z;4xEYjm|RTaFBH$FaB|9FfzE4tc?=T7Im2 zO{hU7v=P3a2@K*!A+d%ajOQc~?Ol!4hms~yPjZKY^hLbVb(3v&V_%*u+^9!p4o=2m zanowy82CPuSX*o>GcS0a1WU0=VOh^~N$f;;3pL4)re{c$k%)t|#)^B5mSjMODZIoa2Vd zoyP+wCjz?Y`_k2SCB)l6jkIVBl`rFi+DwSFmY53)4JYx09?_ zme%onvFc=Qi9i^}8->qO+~Xi*4o4^d01BH|(X=^rV{Le3(?AN>(6~utR@y)ZB=Y4qlhi|t}U4ASQ#Y)2m==0K9QHM*;r{^H-@!g0@V=vSf2rU2S60#v%~sm(M0J6? zFdIQC-@}4(P6lhwG+PlJY* zYRA#G{vy(}H8bgOi)XwZV~x@?WRM%?&V7eHe@y$I4yEP2#wLzS$)O>wnC=YQ@-e{~ zIRo)OhF5{@Z2VE9*a$TH>wQm6#hvt4^6wBZc5UNwi~(IT)yhFLE*Ityyo%%;uRwZdy;szJ5!-k+);IBQ+O+%G5X!buqeNvnU@%=qGC;>( zht|D!MbV3OU&YwUS8qgUM|I)Ybq4cb`z?!o@UF$?=s+hKsbK zZBYRb+Q_GO$})57&)1WR;`FdbNE%^r{8$iMqJv( zZV<+iD;5imR}9>dkaOrOhNKd+lWBRA%+6C#@uNwoHN};@f7tOU^DW*Flrbb_xaGa` zn$^(s%?j!mWS_z}j>ca(+R(hQV4VD z_p-yO!vUT~2U^aOJC6=p%NB(Wl?C9GiEbKZM2M23J6N5>@^SCSO6aFnGxuPv^E8Yl zZfCKq-W`Wn@cqrD#1p|CrTh(>H$dBz z3}AVTGIwEydUA7~D|^fF6#fp>BD%e~xVv~JkX~Als}l!CAPn=tz|V8jiuseomy$N5 ztp=lctK8a1B$DP=ERpg%4hBgAyD-#TiTAj^^7-vcIzO!cO;6 zF)}eHsRS|O-@j_@qxie1MPUT7&1I%W!^}@6+TI_%gPf{^o(EH!@$a;2e-R_Ng54p5 zPlE<%)s<9xyS6j_c&j=efHb%~#nmn$noM!_dBY<8Sam;2^(v`OH=K=O%NOpV$Bw*z zed50n+1g&~^G{{8ys{hSb=}WaB=g7XR(wg~i#T+rxmYakq);PVsF6uL4E4<>q438? z()7RWn~TfCqi@2=_Ail+KJS^k)MGfuYUDIO37uO}66tqV@heHT*iRtyTRd=slY{Gw z)cZKuYoZjTC3~&UNbtwRj~B?&%5Jo=WQeIUq#>VT;Otz3x1i2FE6_E4Mr%I{Yho}i zDGFEjMm%cwx7yyQQd$(fCd@HGx+AXtxI1zmED%*$}ZnA zqAd^r^4Q=2Pved&Yfp|?Vg1y8Rm99iv}}1G5r!c89-TXKYlggPNnWJBM}t$Ft2;RD z=51?z<6D*TkCK97iwfs);N;_g4l&;q1;(kQ-Q6_sY6I=~nUD zOJ}7$)wGEuQ#H%6Y34ZRhUK%*102>ix#9gz&%-V}#j>_(e$uhDVI#rd?K#{zINCtw zy=X;FDKB-tuYc5~?`V$KR``jcS=opUx~z(&L{E@UTt41ZgU38_dU0NT;!lav>$bC7 zq!8)*fsDxvAx;NiK+1#AjQdxg-rCIzF8ALf5^}jC^Zx)nYSo>s#Hi|uyOcDHas~ig zo_>Jw-!s{xA{1+d@AoA)O+;-8x-ca)4GR-3pSP(D?7|Hed zO?662p8FeFC1Z!PdzmjIRuUbby$g(PAY>n=YfJ5yzw`M&{rvv`>sPGD@D|3^WMSgS zjm|a)Yi-HTe23uhJ7%bV!ic{A0HKRY{{X*_{{Y0-GsIJGavwiLT-3Fz?PJ9P=-kNH zSqzdhhGYXI0Kg7;>OVn8iM&s%c&V=rrFT2eaWcP`a*H8iss?j`j4F;tTyyJCcwWuE z+jBdR%N%$F5?P0-=Z-12mjSLAZI8_?2spt|eNHRC>lGt(p8J@(FtWPXwWh+h9v^3v z1#m$gTAk06$l6b*p!BG8I~H9*-A45T<=mxkJqYho!*yjXoR*Cggd!K*rLmsFj=lQg zr|{U5N?)@{w|P9sxIA)t^sGInJhe59QYdK_^WJKcza<-h+rClw8q(FGNMR~nLn-rq zTpWz_=komPaiN0N8D)1-9^B!FQ~2}Rxv2c_w@W?QR>1xk3%OlIS6pVigpFlhH{OX}AE0rX=v$&T{)YEG-NpYR(A=}1#j@>!- zrB7S1I_0@dJ|?w=QElS+0*{pLJzL+sa^5OchG^~A&S)F=it2Kso~I|b;hO3-3wy7# zs@wT@b2eH=-SWqPNIY@>0N3KVvkW?uM}*9<;Z4I10SAB&e%w@2c6`OOlChzorM8}_ z629h;kDp=Pv}9ywwohPs)~2bW6Ain{e8xh4Shll__4MzW&C>06+()WkN#-rO^V~CJ z;QcyhKhmzjf978RCPzjXE$xm^U`;7Gb6pC~*DjB<+rYM^0!#Ak4hcN+6rc0@R}m$| z)_S$smD?N4n>X%L&T+@{u7(y?wZ6C>WYUK*#H5^pcq9Tblj&KSCYEKmN0gK?F;`{D zV7>Pa+-8b)joS-co=rpd8$&Zh>PZCu02T@6rtti`5=+&2LNb-xFaxFzbNSS=Lu;s8 ztacH-fDAX4AeIE+o=;kmI~!NVn)qm}qE_5;qbCI8)RT&dG^1kVx)mMZXx`+Q9_7`h z4YQ!Y3JB-x^{W~kul7Ec*H;mjdB1wizZj1l^V`$(^``1pZFj0#2x4cok%KVp*bq-& zf6kwIB(hAl%WoXeNr>0T&&mP71b$TS%@m4^dXZ^X_WG6cYApuI90JLZK~d^E6OPrq zmsfH~p(IZ&hsvFBy@zqyv3w(>!F%E@LJdKDh%Dn>>x_^Tg(D-UUbWNf@>yy2?e=u? z8d(-F7Ed&dpq<}Nne^w>RuyIMsP0mWQRX;*6G+!BIEglzO8_4r2Ho|}2R^mi z87Rig%1fy&#)35cq8Uk8bEkhi?LfrG7hml=YmbOsd zHO1eaRd%{e%68;8AbWw&;q6r}wMMv>YsZfTiyhlDv=T^M5x^ZiYdF+<8b%7&Gw-K) zRm54}x9@O%U^(X%%&Ruo+>30walh%rg>l+2mkg9($UT z!HW&VZ7tk!$|ICw79gWw<(2xff-%S#7&X;u(^}~oK9iwX+>2{zq5Dnj_W@);V^O#i zBZtV&JC-2kxXTX>U)Wwt9CnX2jG-n;+Eqp<$9O8o01~*!Bk->hoo8Aya(27Z(_^EN zb1ePC*^4JwV=U~lJH$g^ zf=6zCbvVJn3M;;n`%Un7g>5616H_on(IHdVJE#HwBE$mnoI`c7S>gI3A~p=WSyi9kWM{@zOxjDq)L!%ZB>& zCnJJ!k819nM;d&IS+ky^v${PGQ}H#;)YfA9;!8Vt5F2?CCMrWA480Br$sWIrWNMm3 zcGq8PxRteACIqxNF*(|;j4nz40PE)-+gcQNmyU0B2p-NPiDHH?E=6zP1%n*%f;sl* zp{_e#yn&$mMV_TS)!x+%(tYT&$i^4B8SCE_%|<=6H;caKNo%fHtkzbr>7z`w5pP?2 z=?rHZjtZ&g1mq9!&2V27ykgpueTQe*BU$$wh|yJ@w(;`-2^c+r?de%j{7SsjWH!qh zUE1Azit^Ig%U?B6>^ zGLo0NYB`O#k0qD)R zjt{8yr=j>(`raFl+{1Njt(n!_BW}oI70KlKf4YALUNrV?;rM>mZcW6h#^k`x$mPz~ zZe1eDBuKd;HEf>81h*WVe-$oCDBX1`R%*=YZ2U)K4S9dXu+qI5|T$@$W?QQL1KiYQ-65KRx51Wo~7;p#x9Bw}T=Dhmi-&XMl zi6EXOd2SWUq(zGa85t)hk;yr&e-n87RE9aC%SRfyZb2w;#|M$~WS_?*_pAC2ov3P4 zTi@ylFWY4zHT~|$*kEOd3PI%U;DT}6lUl{PFy@iF)mayemik)R#kLE`?*a5BRKr@lJYmxu%E8eorE z(`2=1qRQN7KRX{xI>Bmsi5gZq^43o@B{}1mlo->B;)>k6P$FA>wOjd@}?MaMx#Z9g?U{NyuJ! zAb&j8v!f{`IXi@;xw%f0Ua_=m^*1*tujUB?gvLNzk{FSdzyk-?ur4nyp}bprb{962 z1XfMBjGT;a2dL}N9=$lLz7_FwE%5?awMz?KOG}j_x0x?vN6o{PVYRmIQU~{ad9SN} z82Asv9xRcy*fcvRG`kqX+}@a^Mu|oj%K)}K!dGrK_0D+BdU$LMswH+zz1W^P;4g#P z?!63mdYfD6YqSWG7?os=4oOpyg+EYOe|Ei+>chi-40H>lnjNB=Qg9Ip>p- za(na62V7OZ8hF!4@P31PtZH*dVWirE+g$G8V+ezcpG;T6UlcwlYu_6@OQT=vwzFw} z56$3)(%GL4B!zxSWj*&1*P!j{eukY2I2iki@;RdyQTZRG{{RrQcF?>Dt=}`o){^~} zI3o;86;@J9zbBKP00O>s_@jBIYo7zWV$jC}OKWK=5_az4REAPXB}h2W9E0g!M|?!n z!}u>)@~;fnv%RbNQb=PE$GCHmfsdE3u=-bw>TziI+D5aajVc?9YwmAvUw4x#l>mT# z_D0e2C56T3wT=6*I2#OH3h!D)uezF zv}n{K#cncr0CM~uZ~*Od}S#6|sAm=?m=t1CuYvp@$ zacph%ODKG;Pfh*R&8n^hsCxnZ)*O#=M{!?He#!nPweY{iZwgavo=4-pP7jxcBWm|u=GJ%1#zT$d-M_im_`r|(1##R*>tj_;BAnm=*yo|(asla1@O`bm ztKtnV;_@#x{u}s%+`z#i;zin^4!n`aQU^8Ethr~{ITMnyJhR~+!h37)i&vKV)vdg9 zSY0);*u!pC-f`u-Z#l^R=>yxPW_Xj~YWU0Jhl_OYw~Z%C(-s+C3w$XmVNMZx0qS}T z*U)|swGez>)Z}@5viDm;=a$4|{{U0@SD5&dQ@qgrFZl9n3y4wL?%7OI6TQH-To3>x z;Qm>z*h-wb?dbrSikmsT>)9mdpFGOpq} z6SyyL{qrYeQGlJAD>|4YZ(8n}(04z5NAZ^II)HWCZ zFhM=eIO8?m=yK`$ZkGp=^4#iEMpjwcBzDelamXq#di3X-;JiWLyXiFs)-7X}EgoqW z(A-Qw2^ydqSyv@aFbB3hYR85AJ-YhxD_|s49$wjrQ5bLHIXy|~&25RN2}L|dwaMMPr(VPtb08d<3UUk&eHElywv6?psh{z_FVP;W+0P3oJ$EQ7Ol)UkE z-mRBdn&n7kNTi8m4DFsq2T`6dKE3PEr7E(ID^|D2o@uQQOww)qGO@?0yw>`Cyg@v* znbo|bo{B$&;C5niaz%5VEY+?4AZpV?ZzZSNRQYRlx!8GJphy7fwEOVph&W(mX?f~Q8xT>dtahJ5_cj@F*Xua>L zqb7p(7I!j@jj@>ovJ$G;ARGb#@BJ$lNhL)r7?R0>@`WEzD_;9j)I3FR1-;&@EcWoA z^3Z@nfI4>MamEc_vGAppy}7yjV%}axH!ZATnWS8DcBXO9_lWvepr^>a_o;l!JH1Su z4NbJr;xjNH_6~mU_ea04TJ+BfYF;1Ew9%_;GsyRBS{r*ffJID$l6L1A2c|!zH^Pud z6}x$sab4YBM!RjGMvc`laldH$r>W!~KN`HA9PsX;Xb|ZQ96oOsJj9~}j23=KBy-68 z4SBUO5tN@Yek){ ztvCMsKmPy`R;_$9VSEjht=P9nKv4uY2vj#hNC59WGC}936|MgO2nhcGpydAm_vrrs z@pX)=ru5U~9@bK3CWos>Z>7%6+!9e)k0*|Kz{spzUEE)vvap|GZg7WyGJ58=H7n~_ zG}tZtosT9MtYZ#I>DwUUo1#dzG0k%vGCZZ5D;8Hhj^^wK2eo>B(b>w!PsLqk=Ts!>x}X}YKF0@t-7tt3Cue{4$+Ww zN{~VR9Q60B9a{Qp>w9Zfw*n+lnAvuWafauDD=K%CuFi>BN0ix@SdUiI?UvqOH_Ru@ z^3MQt@6A)yJd0e~#EC4B#u?m&%z*o29-oy*;rQ+Ccie_?58m?=X;Mc8bAf}?pzm4t zWL;{7$UzjiVdaA4uYaNG*MVAZH{@irS73EXV}o=8IHrW{I{^8?JdBU0O6adFO~Y8a zzGRKFEOID2c;guUzolSX>e$p87;V*(5Cmk(xl(iUXN>+8XHStAQoVbf(sDm`ypi9h za1J@^Q&Rl}F2;trswbQ+5`BeNF5n#J9nY^A#WoqCw6%E>*=`y=uZc0Z_s)Gg_vW#t z{?Wd?d(I=%qjrBalyWiXc{u5wnKe8*mFL;jqqk{Hj(^e7Kw_k1k&b!{{{UK^OGPAP z;$6HOw3WOYg~XG83+*bro(MjN^r{{nwwh>Gb(JM)4pFB--dX}i?!8Y;oPUkoael_u zx?~Y9RBu#}WPn)p2ZPqOEaZY|ErfBa+J!O3v8DrLbAgN={{Wezn$gp#s)=Iq;Ul$@ zC^p-cC3hF#WOv=%?1Leol?(zLALr}f2x3)z#<(vWn@&X+5 zk9xw?G|1mG+%$IR-|Eq2%dzfB_4XA}j+&8eMr!vD93lxmRvW+6HX|oLOq})Rso3gQ zY^@GjFE{2hE;n)1^vy?gZjiD?6J;hlwr{>Ph5NY zS4vBhQl)#?*15P6MQ0RH&3e9X`q;?|TOW6(M{d=#GbCvwW)<9{mB|cGKb}2(t3Kk= zG?^`)X#y*Mtc(D}p1fy|*R3tZ;;f4t+oePW3z5j<0~tL%{p*!3Ygp0?mr@vh&ueQe z^CF~8!ud{joc8O@MQ?p`bqc`*`(&0v0={?c&r{E*wm7O+P{D5;(>gPVw(MQ!81?-R z6>CVIYslodE{;`7#xhru2W;aU8rE@1T+UbAc8Zf+YL^$2$0=xCk~s^46R{Y{i^msIfjS;MBs zWjs^dO*+W&r^?7!mK#`}SMvUKg-VJt&P=7jcbMlo1ihw=1 z&#%o?WVE!@HH)Yq5u0@^Sz?$5c2vLs9zezqKIfX$zrI^f5k1t?E+Jzq2tHs)a0-wI z9Ax9zcdkdoqSnGxmfa1%+nP}r*|d;!J>@cP#cK50u{rt;BPkHY>P`%b)>yKTrq zAuEvCz#tLO;P>onrq}Fk>|?r~-WG}DegKh_$fHPV5l3ebP&bwkV6Z1ov91i7oWcBLDujNFmdrF!;fz$U4wXwakwUcGd zn7MEnZI?Sh$3jWQI6c1_=kBj=CbwmtIMtQ$A%KG!Kf*@eYP+af+Q=l9@UKGnWu)ChV|%J0l3%i?lo?1v^W%0-NH`1z)*S8!BDlY>u)SEOL2k0dU9b)p zgTc-*)Ag@T@coP$ZkwjuwX2**SE4D_oKKs9TK{#Ce-{yoTw zBCUYSkU+|grw8Bj;-yR3rOdSaKI41yY)9d?vDZG?dXdGZg$o-iw&Y?6#sLK50Q>Wd z*QMN@Kfv~pX^=<0X||bUW-49PS=g010dn04>G)R_;GYlKw!3|%N+!0khZ4Q3l`PwE zavRXAa8If9=I@E4J|x#$8QEHK)z{o#Gbfsisl zJ^JI&S0$tPn_1E10vDRpw1QBxsq(G~`sZiL4>;OzIN;Xav8~%le9_HnZm!B18GMb> zh=3uTeHV_{BRR!7RQ;UO1tnvXnRP8YT8=$J2^67;gcj}$%nmmIshsi8QJ%H#ehqCE zv@0Y!oZ7VaY#J6&q{Z_U#!n-z56#Ycb6$40P+8m!Pfvi~rNnEpGOVf@P!aN;z>dd^ zftu=m%c^SHEO!ZQ4w+Hb7o7py9H8cq7`p(^R$7^qo;v!^tIxNqo4%#(_y3vXFN+201+BS8J$vrd<;8 zW4hDgySNXx%x#Ocum*DcZs2pCPo;U)$B8U;4P~T`#x%H+;Z{iFm&{o~bqvJzIL`y7 z2OL$>i-NU{owlD2<`AV%Anw5&9Dh3K^uG*Pi|so0%TPMm^Mk^*H4h7pbRt_RCF3uAyYMp{Lrp z4C>*IK>+#^a8wKv^{I8OdF^%a9C~8uJyg2fw%M`3VV;MsJ&z~7MXXD$$9S56nm-U& zT*weuqBAs0gNDZh^Y|ZHrE9Fb_ofT`Z?*ZCj^N%~09rnoA1}*-e+=V3ynMGw@ALk@ z5wE!EZFGTa441dApJ|bP(F8++Gs2QbY<$?j$p;?s&73|sB(1a#aCZhjHV1HfeQRpQ(*FQb z)Dg7!5us*LBTX{GvBok_C+5xw>`y$`mE38*ENDQ}-R`wHqc>}9IRI?~jiWfg1QWnG zJk+;C;-`oFJEm$ApAQc<5|}W~Ou#Yy=b5m_Cnp(=ihQFT^NeuLYVeZzu~SlaCuwtx9D+z0+n##TYTAaGt!UQGtKZw{%>yf} zR$)Uh`^p=SyMdCbMtJMSIj0t%Ek$zO(%7e=_{d47vdwT2mflFCnpFMkHrCr4XCU%d z&>kxS+r%GiwOeSsm3CD}mO>MJoP43N@}cd>94fCtWv1ic>hUgpehe zi~W<^o|&h|=3Ly!K>1Ne=1*>>yk5)UUx+>< z_|yIpKZ$nQvJI&pwbPhF=t0|(N+9sC<@Z^9{I<11Ko6ABn4 ziq29Xn`U;okPP#*aGd9mMSA6rgLLR*mT0Uk?N!eCZS5D!Nr)SNdf$3XWBC2Jt!s<# zu-UY?4H7{R=W8mesOWh*5JqI0X{?OmFwaI65o$VH2xgZ0VH8F8Z97zM z7|weN=c`)F+0tKdly&Br=YAwuf3_0SM!H+L8r#c~NZXEEE!U=S1$AH9*MAQ6n_spM zKTf<=5!@aYDR449f&O1ZU9P3!j|sPmS5UsuO|FRE7)@g0#=*9dNh1J)xg&7yGl5mF zJO;X#g=D$#PN;lS;oFFevS@dw7k2BouK4?c8CwJnNa)q)&MrwOvC#>ryRF64ZDbx+ zryP-+$k%AKQN;0{sC$#>dm6jo?RLw<_iYvAjNT8{*;3$+8qR#-W*^;UYzF@T>(?Rh z!^MukX_qRibffoj&HQRO{7C*)(rZ=`Y4q(rFN5{%RB4_p&|!%Z_%PY2TW%g)>y>T9j0V8YaCpuKiGI^j zn7?QUP3^q57O7`$zIHYPCf==#4}ZqEZ-+N`J|g=khh?5M@m7w;=fk&?kXG^_H#MY( z_<uAmDl zeVvOg#XJlzQ;}!l5pGAa6t$F+5!1;Pdx`p(9!O+*t}1n33VH5D_^z3+g}ky5lIlU zG0sj0B#wjEHFv_^8r8f>aSffE7gsjPDx0;p!Y;saLqFXwjD368r1)>(wXT=@-9k3h zu5cB9bt<62&p;JNW0Coq?ZaUz*J@U>A=KnJ%@#Q>ekf0=cxO(R#i!5T=FX36WSLe4 zn6?<;H$%5Qjdf?>tKWFXR`DhLeii#>qRP^kET*`HcA!9^9&Mcz6Oef(y9Bw1!>a&k zEp%ujJA$;)Ko_>+Se|{s^r%1KBDB<|^A4vDwiStvJ6nk-9mal8>^k-p^f1HFr*zC^ zQl(CZBjZnoy5EVsA0~}w<51QMDPyKbq(cS23>#=vkAfHj^KsLsTHv*91I3zOgkLbbHOy-Op_!E*b7FZPqjN+ar(j(upb!+SUc_51HHHB$}+x=1&Z%Z-X4sNHHT4 zG7d=FpGF5AJ*u{wrd;?}!Tu;q@X?E#i#XcK-XRy9s?snFcIO{2Cpj7JFl*d3&)N&X z2^6h=4uh*l2n5V7%*`1mfK_2!bnH%R&b&?WbK&ofm}%B`5+~c4cYTV|RRFF43er1) zjyn#u}Gd;ehEp3^;cn;Y2u^bb(?`<8(?O5I?iX9H}`EBI8HW1#TnC@iU?4u(E zum*aOz{Y#xsVp|~T3W#iwVks%yuN7#+dXi=9>+Z^DN|ZLr#$`ZTsO&H_tr65 zTSjAo2_}sq@})b95PD<{$DEQt``(zxH>m1z>KcL4#o>Y_lgTTUEJ!iq_UBp|g?Hy!Rx#E=CE+ z?caf(D(1cArKGT3c@s*myJUrSxsR$!1O#&0%+i%)M51b{Wft-kJP&=ibz!($+N3TBhxr`Ikmul2jhLf#$b zG*%M0-O))H>PJlR*A?nkdUu5vNw$W4X6sooOSDxALrE#{7>$I8X+J6_W~AEBFV8rt3%8d9Gu zg@QKGkO^Fq$<7HG&MQCu7p}Gc06??<0FeIx!m<2K@q*l0+`*^mdVRH|fwm{v*K05Y zWwDMx;N!2U&3KReCU+n6@1%d=xj(|DmKn)6Bx@L?wL3VVj(<0Lwc0#^$%wL*QJ$Yt zdye0YT>B!-vEeTTtVbd-B9NfxkIyHsew5gugz1r=DJ}fz1fRQn^-u;tU`9PTsb+Tj zJZ<)XfujoX?Ie;8Lwv^|{{Z!~Tk~Ct2&k9R$#kz9F^WbjvOk;U?mb82RW3A{wJk;~ zXykK!b1#-q50qqe$o~L5RdXX*-lIS^f^Z~-@~kt^gZh0dtg}cgRyK*x+AhMF({4`O z3=CsGOy`5gTDLD}sx2;U8F_R+C&IH?t3`+-+`*DUS;+gqdSnrk=zCY6UCnbODycHu zB!_bsWe%sfJoMmxmFm7DlHX3UnkTxR#Cc$@)f<^kNKnU-*jJBhQH;s+M^b{B#C zs&I|Y?UPeY3%2WO&E##9&SQs>31B_F>c*dZ`h?QjGz{uoDP83400Fxh=KxnP7-(bi z;VrqkzIy)vo-48No|~#z>UK=pbIP(6VKjL=yMkC84xRmLuB=p3W>LJEx2tLvb4?wb zrKF1vuFALv0~q;wpOkj}YOaD}he(p(Ge--SF{H8M+abW~gZfonb>VGV;z=TSE;5B- z4iC;i1e5vIo6S1+Gb!?Bm2tJ;Vk7{JcE|(MHOP`tO8c74-Q3NZGOaX-WOv7wv4R*3 z=r)upBCb6VQOzFI}K2oBCe6ZHJ+HtIM2%@eTy07;T>-LQU6&~wP=zJD6dj_TrN z5dQ#Yw6eJ^o>|*H{cEF@Ng1bOerfJCOUUAeH7}EvZdW6@tQ|v8^R6dbtW7eIiptmv zgWHS`{>@bLD<>pMIFBHER>6+QRWj@)gS5V1u!M?maR6D@Q2Bq&qfY zz|A7=60uOA44&kjO2h0IrZ;KtM_a;?n<{2`HWVbg8oTQ*;^x!wELQp&5x80pXWS1qUAyjHIjy2wke z8bB}yKmMUzFNn1PabrAFL_E|AWp&1K4;x7B*N<-1&FFIpwPc1F!Uk={rER$YXB`d+ z!5sCisbAT9+L60GH$<5ilPoxF@DL=NuZ)jJvvrioA$ub++QQZYi}Mt#=`#!w^n5IX_zLX1fw=6WLEIMyyPM7bT;WTOi|(bA$Ny zrfXKap!;3jr4&0-M=)7G@ye=L^mcdg!GkPR*k!N10s{f5K09rs=j!kw~&x z+pAnE938w8HsdSu@DHv#isbbF02aeHnI!hH^)eMzC7n({8DeqUk;txFR@AScylXk$ zRF82!T#xdckJNvSGR{aO&85eg|Hj$^YTgyIvCuYX*FV0tVKG#b%!C2K zkAOJbPbVE$I5^K0;G1o4RMeYN(`DJHSj)AeXFH5%3P5r4<2mb&+}A~-w!8haFalc> zc?|IfkT%RZu~Jxl;f3qSz$enYEK`@YZA)#@(Fmlq>S5~8$FKOV!uwLuWw}|FBy}#q z{KilQ;+W)-)bev$ekSoP#f`+RcO&dFH~e_CdqiY{#iBoW5q+Ae6KY-^n&R4B zJ#@>fStD@UgpAC}%rk+=Jn&B&tDMrjQE#ql^4ZB`zi7BYY-W|de=M*ZcF6reKK0QG za+Fl1A9XI{QnrcicA8F__u7V;eQ!HFT9vz7&TX=xM)t#JwiQl&2qV5JnuJzbPK!5} zYF*@ocgP9q8wxli5?7p_csZ(?YU(pxT0EBV2rNRxu&&%QDFlREj!x|7xzFPtiS60- zWVeJkxQzw8a1WCiT!k1pJ$cCOgIFlJRI_>-y%vahg~v$K`% zA-KD>Q#-@_Z6`c|o{O9e4tD)3u3s1*5qM@Kx|8gISVAIIA(@Wi&OjKz80b0tYG*ZW zWV-YvK2u}G+HRGnth$RIv$2ggB$l#C94x4%kPw6_ouGnnZ~^zFkH;5scym$Ue?&TT5v88o|Dqml3K5o1;%`P!og9+(}ErE+?Hv2&-~ zUfC&g1@t~&?*YU`wvzZAMg|WYbI{kM_+}d`(W1$9aVDcRte{V@;y3d71K%M5;Gd;- z)KaFs(>ijLri?!t_`>(Weh-Qd?KipAZtfe(cThZt#|)qna!%q7af;?VH{#3x01!;p z`m<^mXom=}Og{1BE=W1*e=}9SC1`rCv1ep$?O7~r;wq~(+RY$bZr_aLDZv;6jkU`7 zeo1WYSBW0&^YYu0vMI>H_TV4RyJ1cerFFTTN{!Eb4@bAv6=ZWFO%0;O`?+p6OByjh zDB3_Bd-IHRuAjqpa>-{9k6pCBkIXj(S%?4;kiWdZA2B^WtB3F|yK`@OEr^zAvY<=$ zsaYB~&l^gdF*(Q2>7FaM)NT#Ef;IVBOvo^*!cab7cC>&3ISfzD+Puoisd5`qe(lbm z#GWA2^cy=EZmnUx)9!B~5?BeH1leJ{Na{ln6f*Dig zYb&LaB?NFvvdVG+^{IUFD9ia5}7~>W1zp}5z1h~`W z@SdYHi;Gdd)ut%rqdi7gH$rjrAXle@jX1_IsIPN(2RON|hrIYl;n#xvFXAf=LsZn} zpH7Bo$JliZEkZcT6$@I$F79EK#7+N`QO za7HohUR@+s>+-kRrDa3Iq&*K_gZ}`n>r=An&2D_euq}l?TSo>!?Tr5b`n@Z*rdNfZ z*|(!(!{aMq{{V;hoNva@f_jFn;td4&OG3AkMO%3V-M*haxhpQ>PzcrXeRH&r*1Uhm z-wXUrq4;+8`%>^mqHdNjEvA~qf%A)m1h9+)&Rdhv6P#Dk*6VA?i*352eb^o(>z={K zKVQI_mOHIZIT5X{=7LDcDBrtw=y@MM^68#}vi``TnsJlR%B~_+q~|2HJj3>JZA)6v z>@=B@Ju(}4Bez?-!oXX`Q{`z0`U*s>P7JWkah7SH zBXq1gXR`kQ2*B%{;lbk16v*l&zx!-zo3`CS9LvXi8IR~I)o(QTJS*VaYmIitPq^`3 zq`qanptXuWvP4G?>Z&&}#{r20{sL>=r4i0a(+82^d&U4QM@t#j#(w%uD z!wG26+!Z@QN5J}IpKgDZXzE&IdQG_dRmzPs+1*0swGA5FN_Z?6{C(%B5MO2-Np9Gv@Ow{c5I>NR;5 zbuB^-CdT6K`d=#274syKn+lj2$vuV!bJMPC;vd=*_K?2#lWTpWcwH{6{2gRKy}Gww zn&@X81~DHzDLnN!=p*>I@q@%av=#l^NWe_vpdj6^i>X*SXzFmsWFrB=FXorT9m}KiPX^ zwz$?UTwD&ak?{7gc4B-6Tt*i@r`gys=Th;DKI~~? z@YZb(;vG?sgd10eV`DHZ)~+(dslg?7?)^HRYnbsz!o4%bTE~cQv`-x96Sk?S8`XyP zb7;s5NX37622ajEE|s1h2}e;@dYyNQqx&mgy~V0KMezPJBo3t)JJ*621a;^!TxY=h zt2>X19s`OiDJ`U$!r@1gaD1>g+?*9)NgQ`?PfGO-TSmUrJVC4JdKBXGQ;)#W+eFq= z1N+617Ye}sxvqEN4}`p9;r{@NUIqT!)2<`7j^1eQCb+j*HaTmI4>t#CAg4plee1PE z=5IqYLHJ_cGWcCB&5#i5Had;N+Dtgf`AI|Uf1W++FAjLEwcRdVK5KIoUDq_2${ zNha3hce&MUhB4(i&mQa4{{Z#5@v1|cNAf*-bLI3Yt&P@`acyhoJ+$!2=jnIIaLDR& zvpMRgrcWaxy-MrEmj3_{tYz_)wZGXl`(#UB6j)o}9l(vZk&JSu0Jn3WUPm2nwq7;x z`H~Ul`Ki7;sUPNzhdEF>5PNabvV1?{4M)MAB8ytKNbWTo9KonqvImOx-MWIOhE^Ql z_sH*9*;I>+w>O-0(`IvCG}Ldj{{R@`&Tl-$hKREVUzlf%3=zq}$9nC2528h9Ne6^Yg1Iy+S_ta-@KqO<1PIFM{cOMA6O{`5Wt*h%A92>VmZ>QR;umqE^mwYOK zMh;H^4ZLLg*TY{3`~om4+jyRPn~4D;O-ELa2@#0FAq~D4jB}c)VWm4xYD=8pya#6S#udIA2@KfPM(yi5=pANL~1wMS5t6U=-3~?6C-2wWat#aSBFT;-s zTYMGqG|+f_SW3>9vrneOJ4jqfo!`B7P<~^R=ugtRC}1kXS=pgVta=|h=s&c=c#q-V zk8j*+)>?;$?XBgqv4d25bba!O*fPg~z#!p3zz6AAJ^}d0;x8ZQ_ZLRi#wgZP2HUvg zy^!Doo$@(7>*tS$eky+h__SUyYT3J4mkvLAXm_y2>40k9X9^} z#F|fqbuBi_Nwd_NF)jRT^8!&AZ~+`CU&l4mRx*@qil)+cW?zcFE_j2%9x|}7n^x!r!9_rP9{{^j3axV9pl}1U0)3iGhfo$ zJK0?t4O$sYkxYQ(;j%`4hw24;baq}I(QTqK-sx5tL7mdaA8)9vUmticPO|tttljDU zAYFYMQ?-swmeN8aawIt$`s90duWr$QWxt2^aKXF7@y#y9C8wH3B;%epWAv_UP7*Pn zBwmK}aSl-Dl0FI5EUl!U#J2|GF$lS7BW#i+`P%_;+~jlUO;GSf!GpwhjE>5s*K(OQ z_Z@v}(zU+{#U7{PduS{cOL%ot6mac3RRc-@;Cf^7tZg6Q?!0dz)$Oefy^K?V^NjT8 zf=4`lRnP4q9%#vp-RyOGv~ud6Cb)(>t6^@pKQvdMn3;=oQ;whzPa}g}Zja!f3SC*P zp?i%(%V%)WIA1l(gx$t|T#hhJJv2FFkY2|c-p4bvlKGRXMi99f3AdKO$6w<7wRFD= z+T2>)TV6E!bc~BU0?2LJfMB3(;PZ}$y?7X!+G#5#*v&N+sx%YA`frC<5S54Qw*Fdd zM}3Y!0ke^kNyb6PzZK@*Gw|Hjc6UZ8H(ZqxMC}+=olZ$%oRDy&0DAM!I316RHJPg3a^>BgA%Ce@TzGofCQsffow2Kuz?_18v&RR}RUJOg?j06; zg|eUQg>mO03;xO44=>bkKN|XDP0&rok9lL_O&-QOc0Oz?XBhJo5w%9`q;c3}Z~;9J8Ls~T zPxxJ>Xga3n(;04~kV(1!0C=(PVgik-Gswq&{)CJ_A8GL1{kj_uwMfQR4Lq;qM3`a@ z_dNh7g%zZ+k;F|#J2IZC$n&2cc;-J4!)XY#YZ;I8mmQb|;lV8amdk{h^}8ZKS2lI&#ak=fN!9$!>$F zQ^qSU+fcZ%vUni5AQx1cNO)r&?tYy|PPi55`iz=Y#f^!Hqly^Y{Upl&069~DH#u)| zK*JH9n6EbR3AC}fQw-~MZqsGsP!XS;k~Z{S!=8Dmqd#b^VMbSWv1S|5d2cWD+wKZVLm( z5P`>Y+o7(5{uI5B{S7uh_zV94*w*~)2)lty_Ef>}uCs{#)IcO7faeA9fZWJGL=vs;O-Y|hx$B%2J$$m^W8KmB}Habs_$ z1#L>|&0U~}YRM4D=l~oXb;qF?#%qtzWrp`wh{Isl2_$SlSqh!uMneuqPv>1OpJAry zx=iug+e){O^Lckbe5x_WAm`V&;Zs@YlQfRkDtN)IwF~PTiA>VNv@Fxd^Mz2#GC}G# zu=mfkay}%RN!V<3&8kvDUg3!Wy8*Nh*0r^pYxJ?6?pULa4asXY!VozFg#)(%dsc3T zrg{3qOC7sUW~>*?cnih}=v;0DXYsC@Hm43|^hQlx*yW_tUL86cvUjYKG<#2CG5$5^ z8f-VZ9-AB&cGnh@ASC&6ERnw*O6Q-z_Ng_k9t}#)TU$+~?G(%c!-&pUby2`Sd$(?t z6~FeZcH-sK+sg{6c@eiGZqDqGae@Hp(=<-6wTk||2g}OK(`oVF+DCL&8H7q?dA!fz zISK;x>yQ4mbJ4ZEz3kCmI$TKT1WgNl)4}<%&OqX{JYR8hr`X#ov4Y-U!dMiBjJSSI zM;@P@VcNrY9C2J)Br>BDy#nW;&mi?5;YB#N8@HgR*&4T&v+4^pbKNAuHZQq>+=MSo zV|PLN3acimYoeyei%#GT@&T5UBcLZGMonklO+42=aPs5+0s+9seExME<*uEi7+{7| zb#xRwvL*t@{>TTf*XdmfJ0@gwdS0;=^|`%GINA9sS)EG|!2GA5@bs#tGpR#)Fw_mr zxXTq)#D@btGt&dUarxD`B=fu|9y>{%BE#ip13AW64(IAUD;n#{(=Fgx-Q8Y13s{?D zg(Q$%Ny{)ttH-(_Pw+1xYm<1V_`+hak!yM~pZ4(wTleSH)Uj(1M zjP$|uu4BV-m(?c_I>y^G`?M}d$8I$+pRjS z-o-?)w~)so$9WpS>dc8WF_s7c_B?Uh>&0}D&2Mf+n*&=nH#Q7PDeT8A5VT` zNri5;D5YjbiKJ$cnXu(|436VGo`SURUh7c4m@KOf_6U)xgxq&%WFGpcw>1=EM*p5qq5-iJmc$H`VNn(_;q2HO)5wX*6k6RG+a6XyXGH5>~cQ} zOY3{M=1YXNlgWh(POz|M&l{PPbDyZ#$TezB1E2G)&$QKad2dW6CoFxOXi1V*^P-Re0tcf$HQ>pI&B>?FB?QKp*? z6@mfF4a8*Rk6z~nzKrmfhu-T@FzL`~UQNsXruy6Dy*>N(Rz!KaHf=36AD#pF6uywVA%z_lRNJLWPZdL)e zduM~kt$kPFj|{+c8=ITk7n4$ocDR_i$@xbP`lNRkQCh6;z4BsUdxd3e>^K?DFiFO16T#Yg z>APcsJBV!~w`GE5o6JuyBw=%$?c7cdar#$FVdC3g7fC$vM2_2xvv~mLr<@%dz6r+8Q68M$kNTKnP#?1_xb>j%!5E=IF`@xAUr#(m0tvR&+ z01s*c(oquG-P*<_Fj>l~W;rSXv0RoQXRdyg*Lah_aCn2ugHgTH)q)HLWk)fnB#q>7 zPdN0?wP$J;I(&M37IwNUv%dC$C1TQt1%l-mjGv#5oyP~QdQz(dprdrnDc}_inAoCmGJ-K|KB5oK@`}*23pg)Gjql+!(GRl@d7$$V&%2<0Ebq zXC8;wi1mA^wCj0Upi8rDYGJlqtgIIU-v<754ttRm_tpe*&n(3{q=TjUJ$+VE= zash0f0CF-%82aL&q;%`2-fZ5woBEZXg*-sf-OD}Ij6rrMn{yHnv9jZDIT#+G_v4D2 z#h(x~meZ{C{Vv+`QM7o~XSj4}qrv2^=5eqSpP4W~CmpNDwO)gU8}ccY8HTE1m1n z9MP$+hOfm7`P;>Mvt2?S+SUA(!D%A^_&^80bt$n~Ne3JejOVD&TIAD3L8@HIbv1;# zWsCw-ZwzinmgFEHf(CgUWO1H2txt{r02QRv{9k$DYw)&ueU;-wJn*P3AV5@ZQ{Olw zRQ?p!EOksQ2olm{Se7_q0Sut!OAdZu!~@uj`x@VtRM#}3DL!elH^4pz)9*EXLs8bC zkzfb@J>;25cQ|m00`hP@K;Y)R0i=&h@dlGFlWx(f>Vm`UcM2b^~m(0Cr#QPv?#&nwI`Ge;b&A~eM3KPvLs1y9r(+wqp0V{4#| zL;Edby;)jlw@N~=Bn2UIIRt`6J0E(RNAV5i_k^t={>nCSEQN!^ZG}e81F0%NJC8s? z=DMm>=A^k2?0JqWnUm<6FN&8^o_PE}6sshel1regIskGPB;`jPKpD+;KVj6?#7%W$ zbrkcfO5ry7L6r+0eqa=woRD%&WLs%^jLKrRX|44JRaO^Lge(CBw>Th{;AC;rzZzaV zyuX9Pqs(-+xR|D%k;N%u+mvrC*v2uQo`dUNRGg|*iql`~xzz=xhnWc`)vse~PqY)gr%{!mI||6^WRhHu7dNjX# zUA3~&9;Cl_O(WKx*HMKXI@w2HM+76Ya;B{QY<};5uLJjqv{CYEP`|)(7tqsav$j3G0Il znf&KUWH__w0*V~k|O^Aq}+3Z_eTnQ`h)%6eDBA;F7Zc>?k{e1Jzg6vN@+ow z`QY;mQSQve8_4w_3}>+V=N8`C zVfkV}JF&-noc{oydg_cl+cP&SJCt<$Bd6KN9nI|5Q!Y!~Ou?DkjI$H>iNNIh4uDmc zlm&0Uk-*)adB<9%sB0E>llf7jUS2Q!vyAR3#tCE44#)GaJ@H4zofA#-VwV2^?K2Fa zw9@v-Q1g|RVtlo154)Ym94R%VszwV_H5Sg##2zTqJTIi&>N=(Hj@I2vNh2I8fN{^M z=rPwLftvVF_JIAMwLgp6r}kc!w-9)K7u^-8`}2^0)=Q4wgSWY(_K5hUsd&3o7P^L` zW29*@jrN*dtj{~=YhmX3LEn%D2mo#vA9$ZC@P3VJl_lM?le1uw((@wR*HsGZnBA^ zVxuDEX3oS%Wv^NbYjj18F1AXnq;R3O9Ag~`73W4U=H|YqT$Jr=9S4Z~ zOL5}87sDDpyKfXa?wR77TOCRbqhz@cwZQ4MfIi$F#8%$$aIU{OI7ggg}t4u zO*GP5#$h2q$v?XyzG>Gq4~UnZGq>?A?w4Q%g1dZBy%)a2ukks7|8XjzApI5 z!fjU>>1DR_d?-G<~=kjjNmBcrGz@UI(_ z<5rh(YHjr`GW{cBQb6p!OSban4p ze-%7&2ae`{2KXVyt*GA*D(3nCOcv0ezIXIIagpomis3#L$zky-`oqC`o~<68;hQ^) zolfamgA_qd~M=S30!;+@ddnoWY<0zydgxaaA2Dp)Nei( z>(?F_)&6B4HI?hGeAv4WsQ#73QV-%=owVk@8EkSMDDaMzb#WfGD_z5-&548nx2eSlnOf z*9cnukp{@P{{UN@065Nlt1b&WUx#`pi!IoyY8Opub7OIqCSa3>VVvV2Fgy-GC)XVm z)i1v8LR{o)wNg$yuIb z=I(w&YaR)E4;05^aS4LuZv+pOI0Qb@57P#}Q9c9y&hu;6Ul3ROZS5@~@e`zBqG?P2 z0HeU!IdRV*XPWur<0tIpuWJ7Q6}4{==~{fVX>&T?2_cf-D<%U=m1z{?A|Azi`NPMbvreCB`$}j4Yf~GD{6BE&!rTS_09SR? zZua%B6!;VHr%1Z^eeov#<4}@In|&CuK&!N3;ywuE{{Ro`Uts)KndAMabfApO{{R%d z&UW=5y1Y5BnLY=yufl(ea$}$DQ=U5J;XlHw2RU+7SNllI83^-JcKa+(4SYH9&Y}A_ z_^VO6w@cF=wP+DDu?8b@I9z1?N$X#rdLP3N30qp*PQD(od0I7W%^#bN2lumI4R~jd z*?Yv1=OOjGAUV(cu#lSl4%6X3VYXhMyItD4c#5VIf%WC(Gb!)|qVPH1CG7D$79Q3a{x{^446KNMWRxNjG*OMm53bL$wTgdG!@yKM9kjgU4$K4I@$Ok=hT$O1`GWbs(`W3XcwAAmu&8BKr z7P_N5{f(}pVI&+8w+N>PbCc8#{Noj)_Ki!!=4<^&Qhj0@hLSXiZKNtw%NxFP!6ytb z26!CTAfFE*y@o##T57Ur^IbohVzqz^5oDtTpbsoHL4U=Ub-u($e*9CEA)>w=WIFyP1wMpaX`;%W>*S z>)%@AP_?tZDILAglkDF$!xHBh+5tr>0LM^IzB?bC6UJAbE%5|X-fFEBZznde$K^KA z2@AL>!wxV=AOX(<73*;8m$T`18hlZWKH3;n-EH6Pdz3Z+k&3U*as~hcj+Hg=p2o&L z&BfXEHZHtXH-#;3mfCf@j!8F4CYYd7z$iPmC?^9OMn-ejCcL}E7Pfvq)h`z6?&{Xs z*}vn`*q0Ju00`aFmOVQAR)XJZHx_!LNpB3S%F#PrNYgqDU@}4z0D^O$W74d6e_ph- zvS^mtOZ{Fc8jB%5I!{3r~Q+V_HQ$@3tZf)cV zb1NAx9hIGdY=CgYfzET^IIc45#uv95dfGph@L@so7v{(qz#|-RE9h|a>PacLb7Zfm zxBe#7HA^>%+4vbYnVfp+c0oc`G7GP^zIMRnq_8eo;|UFiofB{ z3*C5^S&qg5<%Uxu%2bt&0Lf#3Pad7l1$xm@f;CH_+IUCBQ|nr$ogBJUm-coRo=`oI&Nv~b6h+*VzcUZXqNX2 z+j9M+ZOV@()-)UaW5N>5;e^8Afg5qW)cXK>JPZn|GtE zMd!GVZLF*_g(^cfbKC|T)2W!@sXt>Xi9PI8ts67PtmKL~r_~lF65K$D+f-!;ARan% z$JV3Nys32dip`{g0V4d20_VPXW9!!#`c~cLoSJQ!f@@=R{%b3th;M}gf`s?#7~=y! zrDfXqA}gu9%bWeJ9neV_va;u^mchc{ao3zzY-b;du{UL@%j=6h+G1G*;1y|Yy3a(d@G zxH&&|xm$Ufd5O4E<-jKeU4wScdF@`EC(TVZv@w&^n(E&2MUrN>Rf=P=S|T>F9mn`o znr*z+v&Q1u8{qP7DI_ZRZbwt=itoHds5E*{*&HH48WaGi!E?AML4nRmB=*4XT;<2Z zZE}4+3)?$;iQ$D~lIa`ejW=?gPFsxh9WhwP5g9|@vJO{!8XB#w?xCgqoxaIw13q?R zVfQ_{bKj+LFXq{xyuE};w3s9L@EC!C^&YsdMb^9n;_W{AS(imyhMEavMOe3DIKbR+ zzzht5$6nRvi)NP^S!vGe=$wU$Swb${WPrU%C+SNMD7fg;UPUr}A!190xoPBiB|<84MH2Av&9^8M#x!Xd=1z@_bT*l84+s{mq z)OGLLtzK#dPY@ShEUbX+^74ad9FJ@P`c==~b}1cZn>GB=GGpwa326@HXygn&=;#3? z=hu@`-9c}m&2<@MH_@WJDH|M#4{Q;~89h4VnzI}eTj^0+wZ*-$nCHrA2bs5MDsXYz z9AtF%q}H_clGf(wArr{Vs*dh?IRvm=^XtzYb$7zOS8Pcy?bfHiDF}G zxTxcTK^Y7io}^aqiaaR=#-6&Rwd=HQi!YfRIN#Id9G=|so+@fdN~C$8_3BzpMWk2o zZn0_on>^NX@6}cI0UH43Fu2b=k&}XcnXCG4r4G4pi!1|8GdslZBL40+5#+uVdCq?7 zjPs0DJ4Ll!AMFrXM{e^G=WHbyL+Ph; zXwD!TV;rkw?$4%20~i(MV{sJdHz_A}7k1F%EIu5|ZD)C{3yXW1<+GFQj%Y4L^ zK%i{{8TIYOc@_77=J7X-uCzZ5!8O99gaJD2F|P7B=bko^>(iS0zr!~AJl6MC_Y)i2 z&m|#|(a!lmV8E~-;ei`)NXXmAucTaQu-nXMw}oMOl~q_WA`y;bX2%)lzo$W8JskGF z4su+)&F#|P*U09TPH0`99{6wIuZ(;<;wV-BV36mNy$^I4eHe{$|vwNvlN^ zwLMZfG)Wgzw~E6_l011~Hi8avI5+^P?sLfeIlVjKe}#255@|%+ZUnrRWWMId%kw)N zsLwz?hPn?IY4Pj&Z`xOEj9`v>cIPWa@V)1Ubh&OO)GjUVW^pWv zBs(1KMZ=7d55!m#}JR0vjW$@R;5<_WeqSlRB8{$*g{g5g_f`HpZhGr%J?J?4v}Znx7f zZQD&-VPMp>~`mnGt##;dx!A;jTp4GOPOU6MI4a8BY&9kLwaO`>GIb# zdvAAdW2sze=le$CWAitxsk+bw90lsw2Rvif9=|Ud)v8TNP4cGgcisO0!83JQc1N6i zRUQn`^cAtNx0SU6GQR0zB*Ph2Tx506CvPLKVO~jN;mJHh;yZ0WPr6w&?8scGWkkTo zRv70a10RKb3xDuuSMjE)q~Ge7(cb8>BYC!JrWgCWN6W@JUf*0+*M@&*{{RX2UU=o0 z&v-o91a|C-Oe{&l=Wk(-dz|sozOM_NVK6v;Sxw4%`Rm)J$4Zms)t+x-`#Shv!u|n= zSZQ@6xV;;V!y>$bGq~Ue3$PK%Jx(|r*B|jhO=8O5OuN$LmVGMLHFb5D%S&Y}szWM1 zT#_-tIqTBCrMkXYq?xWF7SX`Le3>E)8?F_74`O}C737{O(RKR^V=sfCxYNbalL4Vs z`JDXod4M+6$z!yD4nQ9Km@MMGY(lAB>vwK#tomNvy-a!T)sG$cjqpC_;s?Y{R{sF) zu-I8XsdD+410cJvk%_$k)CRc#m4}tBF?HJwi*^ z*-WsgG9tJkOAccP=HL#e2ZAf><(*QgKYpT7PR88HNyzZauZVJ9Yu8$Z)wJytbC}*i z0!g^Cfw`EBgaLPFJ^ckc!JiW~J!8k3C7tisTEVWP@|GCG&ngm5P^cR)7a8LMy(d=v zmh95j<`le+$5@z#p6d8Kiz=O@#;OPfy7D>5Jvxma3VbX70E914(=F_EnDuQbe8)>Y zfn-1II+*mQl#tz(fBd!^)3bN&?O14)yiqV9=vg@Tx`%_kHh+GL1LO+7Su-~DfvNZ z&gnDHcd9z9ml~$29JkjqT+GLGv)o9|@-YBqi8(j}^{(7DSt@IqGUjWcx~mw;bD3Ts z)wL^le3=EgM_tj%GOFr&GV#rQpYUhkE~#Pgk4M$KTjG1WPY+&`9NK1-n=*+cFv6;O z7I|mEb{{U%MtBwSrM7_$<*Mnrf%{IKFYlq2H2JNcJh(mA1LhvY{eMq{jhi}URmpR~-5hU+{3oGl ze+R9X!!}b{>0TwUxR&Bcu-8nUdZcS4fB}GvK2QKGGlFrCf&Tz$FNgjX(7q#1z8JX@ zS~8!tc?jQU7aSHbF&HXAQ^x~7n6J=C?B=uhFW~5JCQE0wv$&2(V^8^LE+F72!hi_J zws;1ze{SE|{{T<%aPUHCx3=17jwhDl7}ZfAfX4zTL>R{78V9GPPvK6j@dICvP0}N083t>HQ{}>$`^-mAr&HR#?)W3{Pe}Mvq>*P5wd=3%@8$qV z=e~O2f3iAz^{Kosp=dt}d_NQxa6@Mf;tj;d23G@<({zqTcwR?p=R9BWrpm@PXc{@h zV|Uswr7H@Yf-r!Lf(g!Ba3htiIiXKhVw9eTyB`v1x@0dTA*N`-X=Gd?MlqbTwiQ6n zbDVSrxDOV5a<#W-h637)j<8$GV1aR;y(Zo64s(@IhZx)sUza>h@%vKoeekr>puF)E z@IKhKk1pOK3BxYj0mm5uK^<#{g5LiC#hUH*o8ql)G|MeMAuP7GPO-$Lh{_h|PfYc$ zTDYwmT7O-Rn9+N))qWv<(b3&`nsm3Tc#<{-lcdB1Im!F@IB#`kka_`3_{reCUgP2= z_MPFomGI|=L^krK*?ig1NfJrrsRtkcV&Bev4TU@lPNR<^xPsKv@^v~hvPPF9*rJqax0Fk#OV$tHi z6TTnVcvk-aMYgngWov-#aXjym_$$w#>TB101L41e`iF;X6Ii~qI-5oe?JTmuRgWcm z4mxCY#dF`ZR+84Y{vKn|p_q(!h~!T~kNWBeA`0N*D~0$mb|h zLPiS_)kkjC)Q{O`!d4y}{?{6owWho_pJ|p$d!+$zo0}P5t#qjO)X$r(&o=SrfP8!K z%Tn<5oPIJonbxkp$p@Smg>0w=gKp#I9WztuzX5(M_*cajI{mJnrfWBn?%q~(Rr#@! z22%%)-nGgvg*-*5=|2v&8;=rS8TH*xNNfefk07%tENCTMw^GNpKIpG&i%s~mrrb>q zm+@P}dZB@e$qmy-<{<}Z4h&?e&mC!U#p=-c+UjsOUN-TyuDv#ksr)|scB=#;c`ax9DHym%saV^$d_mFdr%i9+E&kn%9^%eNWKoW} z2*dTL_Hpw?dq?EY3y)IxKdX$(re0}`+-_ub1Mo+0FCh(-%ob%ktBzH!_8PS0YkPDD_^d7b9-aKD` zekJ&Ge{G>?u)*Sa3{4DjShc&HNTu*o))bt$q+%Yg$5|2mE5Z zoU}{iK-Yh3WWeAut8D|f&9@$v^EbwS3;5ql)hw*MQFRWjD%_au#4v#c|XVRggyxP8#b5XZ3k1E zSSC3Z&e+cskqeW;IoJaHqjPdQ3dwUr-o4Fj8R=^tL_F;ZH1Ix~cw;fbCe~%Q3L1G_ z1LcNe(SGSaQHsRX^lv7}=~|P=C6w|LVRWsz;gFN^KSQ*hhZy9H;MWg#rRx%FkZQUl zlIog|%x`12EXR^rnTq3To^#L)SFY(g4d%6KmJ`JtqP`iUge}COh7Ll;c9IXQfbmI2 zT1f<|Mcqiq@Q;o4&k5=tWwWGr5Vpzf*%_laJ;$Ny4TBko2mB7vu9tqdYpG9mLdGtxZ=;6_GnO&!EL`=$Zls*^ zTQPplc1x)Fns%XmYO5ybq-fE|8TooJ!LM2~=3e%U^Cccv?(BWh;_rty&Eefb^F!6e z_lGX6TqW1q8H%Y=HnNaLagSr3)kol0#t#U17ew(*uDz{Ar`<`e%Ob_)Nh1~VVE}-N zd#U`Z%KjI4v&NqY^oiqLMRhASM)PeZghrANVZ7j;oN>=%Ua9*$>i!M7hgJUogpW(o zt+oAAQnk7H_rw1HV~QJi2ZtSqfmKuQP0k0_vowDI>7TSzzC7_xs|1%8 z)4ORp1eq!#iV=}Ke(fPAKkqL>MS1k6Y>?WsRm@xeXOpLx~lBK>$}&Ccdw%D{v&*G)fORP@Uq8GhiDtM4OTWi zJp-xzD~9-$<4eyNe##*=C3yAyA5w!&)S;V+1hKJPuszD)pM2NWv9HRbz6Dh~)~P9T zJyQKiscJOi9#x}%q4_`Y(WJc9q=#056x5`>xwx7*#`xouNQ(PO;NZ3fPkh&#`0HGI zuY>klCZ8OVYA02a?8g}-JRu}ggMc`2%Yo_AyI33mh@SG`APGS0G0IYVhnaxlbb`9=-Xmot-x3rRV-Qbct0KI2hW>L5Uxp(wY&wonwO*g@s(D2L?&epJBjGfoW zSf<_q#t!q3LykN1TYd-BtgrP4fLq3Hgj*#55xPd)a7a9IdwbUYz2V!-jcF!Zi#sy* z%#R(rGd-~CcAry|f~TBy_2k6kB{rcgEO=FFdpP^2pXi<&7uV5h8n&aQ$dN;}>}0zR zg#qS45)3aObA!&&)DkuPVQZwmn{x}iQdwK{WvE3QT$j2mY1b|2x6_&T! zZm)2MOpRIMogsI-NgW%KNGj5f`5|&fPBK6>zdnnrYtmi8s(GhI@{#4Zyk(7h5|JV> zJGU_yBn~l-b682WB`Ldk9&DSFxxvTq_rftrJ&uX0G}o7gH(OHyl2uYkAH8)!7{LQ@ z&Q5v?O=D61$eDE03`&;!<6&-NR57RpGU!MouwqV0;~DhY@kP$5X4jgXw%fBCA2qHO zVQW~+Wmn4$jFZXWj(uy-b?rX$Nm!!P?dOJjxZR51s}$M@;BYa4&jjZq*1PDrRruw9 z&-5>yHQcJ&=B;_EPkpD!yh9!WMspllAH#$9i8vX_%{s<7uCk{?LHrlOI-%*`x{b$B%f^4 z6^#|VhD@}D$X-Dobg{-W$9~Gv_A$C!>Hh!@Xxd2_dY{AHGVf1=P}gTE=Wm^Db~Jkj zEPTKR0Ox_%jw{c6Y4C4b`(?%Uoo5B!oX91OEC?Q0>^2{mo)1CEuS(N=adoO{7WUG~ zE~9YjT-@Hryee+OvZ(Y5a$BcP^y`0$b~jhcr;jGd5M>(NGI^gfWB>*Lz`;50N4;ZC zwknIdSNyDN7SEpB!MC>imhRqrNphh+(Hkf}ykL`$QC(ldd#g(wKV7%GDk3V5S(oQ3 zHv_N{o`j#my$8qM8}Ss@B=~u)=hNa)=36VAHNuh(FnVq0obp2-TJr5HMe#kP_mc}I zRnLAN&L#{8e#J5l5M|RmT3m?_=~e}g&)h2r~tg~ipgLXR9)`BqikfED+G58fw?cdpVgr`qZk zYS!&#_KCd7esl6?o`hfqY;Zk(xHUWbt#a{iU?LG4g=6+?BL?J?lag?Rka5NbQ-k7F z#a6^S?7FU>sk|`B-Lr;$Ct16SD|@TxoRDC2@ndM=CL4KGcR z-ul{GoBTw#7g~g;?FKgf8~{;3JvtnYxa+^FUkx|Iwr!>fV!EDUmiCtj$>$JD6STf@ z$KKEX0AHQb_3sdCTFu3cskeqjh);2CdhD>om|TTmc_f3?Lg%AmzE-_@wUoI%*squ0 z{aEv)xm}`qMfJs=hho++NIbTS%Z?b`xed-ql=i?KNaq;oT$YpK8+#dVEd)C?MUz*97 zSoIkEi(NuE9VL+Mh>ba2^1S?u4I(P#YY5#yPh-1{d(2$Cyp+4jRoe>H3;RC z&SqPSxf0S9!y=VXN8QH*u5*lLyD6+(Tz#A5R<`T*#Te7hs$ItU%W7j$3>t4&_?+C}Lczv}y z{aZ+}mg3#!xYPX6fWgij2+z#KZNlvXgVf{>FZOjBh1R)$qeTeSV!wqWEeQlHYTRzm zMf&t49xLu?Vq9=?nw8vpmknO0rNjF;>DscBFNp3U)rQxK7$HFd5CCNak+_l<=1v9= z72ZYgw@uvkz_@3swE#b@EDNF0c_{{%y2obV*6Cnto2BAVWzF7uN>ZA z+akDg7$g9DL)OG9YL9RTu(kJ|TEX@+noRFFH9Opdp2N|!Ne_N?; zYMOba_**-AIXn@_>s^>i)o^pVO{;b)Jgp}q=N&^v z@ehZuq_(=ABLig?MjIG~z&nDEayiXNx^#`Bi4D9GvP$2)k7n`@bcU-HO|@5P5*@0E3JtO` zBLT6%>N)z?P2$_n_(tvEvvLKsyp09Q+K@pb9Zu1o!;UMF@U4V8w}|cbtdZOsgsamf{O=S#lV zA%Sdc;Fv3}B8ur4c|Z|@OJkK7;C^-Mz8UyI;$ITk+MQEP)Aa2=;UrZ_W*%wDA~o2< zKQTOX9B0s1ror%AMevWq+uJJ@k6OEiE05nZv?XN2ZOJ$vD+N#nI+APVaT)d+gp$8B zo3@&z`XT$Q@?9fB*KF=A(^Ixfttp`|JD=T^bHd?=$`lcTalx+B#MZY>CY=_Yx=VXk zGD#VK-H=&H1StoR+mZLODy`$oYc`dn-RcQxd1)Lfw{Zq^P0DvW?qFRw!EclTd8{_^ zMe^KSXp1sFrE1UqlNGb1Ze7l001>rzsRsazSH>PlhdKmSzk-x z`@5N?WRLA|MGotP0oj?G3W~Vu2Xamb&PcA0NwwAP?lCpOYF0xft>Gif+E_0tLHU5> zfH!A2CxARl#hSI>hgnwt09Mmp^J?A2WFRLcnMqe@vH=AckO}*sahmM>8+ERDovq(d zzi6#yP_GTHRY}JHlsx2T18?x-JXUoulay6N)xSSO7w>h@^sP$%>i0*AYYj#bJ*k_{ zhE|GWGh=ea*Bl?0ob(w6xH~@@7J}jLG|4VC35M@1W6O3-KbA;1ImZ~|)3y&2d`i62 z?<{m{2`%QeXIOO?mJ=RVdoJ=M=XNoiu;-?Dtm~^k4PH$wwi@Ji+P0v~(Ji~Mj(0gh z7jOJ^(U&Xcjk_CDmDApUt~RI}3WH0dK>GUIj12?!CeM=XpOF&y9w zdU4Hc>le{nM$#SfNU_L7h?{`g+$d!^V10P$&!utR5M2+z`WwT2;+t#ue$ZvOYj~3? z=Xw-u;eg9$Be?0_y#ql_Etz7oDJw}Lh%HxcA8Q6?$T?teI`Pju^x(zKB&tsBFDKl5 z+{tqi{28`}NiH|(vs+JzA@d_&ED%V@W-ZAeoB_zm#&cVXrpu!1O?4qzZDY4SV%<)B z$jNQZ$@|$Jy@1bZ>a4}Kj|4JBX>Db7aW>=SsXT?+31t}Mahx3I9`(a(nt)FfUtP&0 zH&&5G9$S1fw0sVBa!(*0Mm}!6$HGpd##Wt6Su|x+-Lghs#eW%N^tKw*WA?la5M&Tbxi{&y# z%#O@(R5FDGo;Kq+$0HmX;eI7}ZaZyhwT%MD$hNf$3d9P@Ai{teKwPi~k;(zU_04@o zTSApMdm50lXC%Gnx#%tNYWvBG<<&}g8H?rtm@YSfM;YT7_a3#i;XfEnYb~soMa`U1 z?q_L2$0p&CBJSrI7|8E|?_52ugFctxnG;p9OUwABjS^U9kQNRDC?Ji2IUo#m9>+MV z;a?HQ;xvLQRK5l;w7;A3#~CgZhFmV?Brg~v2iu%}qlTK4RhIt%KSBHPm9Bd)jl5xb z9+7VGG&qck?upa`%s7x7R#JYbCv;zh#A5UzX-a0vQ*dpC_Qe2fjuy zGsSli>zakGmj&$B`eWQ&$h(Yk@EC)&hg^@B>CZ~>JEgdnOVjQxE}Bg~>}AWi`BpaI!I-Tq~021#rC=ob$=)ocDHz;~PzC=u2w*udG&AgQRxrxme}M z8OBPVOrOH5YCbBxlft%|zMtXP#<9XeRyBE~Opq9yV6Pj2>CZ!1{u#KrS)coE*H6&y znPy9idy9axOzx+hqbjSBo&d%%#d`Fx@TW^fXB{`!?j|#Enw8Ga;g`dc;rm%+x7H4- z*Y?eAa<-B*WTQJ1mIOCfJdLD)eQ{n*@e|+=hO`e4TwNIc({Q(uL@iW+vBX?|7B>>W z;IZUq*1b!@I`Mrw2|?YItx!@WMhe7 zY4r(qSlFl*;gm!&aC;B0Q_o!WT3^DMHQjPMQ!`6&STBe*ad|A3*DONJ2tgS@K_y55vPl4rgSL-_{41pRTTLkg0W_S?IZ$Ms zyL_Xd;BoEGHTv}iqX%PJl-rWI9FbgShUh0{=`D9mA1Em68hELvv8=P1M+Y?cdyRh8+h8^#a<|1 z26z$#hD*7mxYV^4B)!o^GDv`^Cm2EwGrIr|I{RnDZAvc^c!x&+0EE}XS05AnO1Ds@ zzlC&5MtgXWADM83F7I58-FWX@m1?eSH9Kk1e7c*kd|2>*{2;cPA@h7kHkTE}yqdHR z6Z8Q(ZgzQUgJ__nskjpBMBH!GjLT2$mM&|{?wlU zydm(b#vTW;(%_!@`r^k>xw^g7rSqkk!$1|!@Pab^0PEhdl&UDXGMpgKjPE>k;;$b~ zEV@mDM+}X$>YAgHHikbaQcmCqILU3i;}w@-;55{{aJJqd)a3C5sbX#!S&E`Jm;hV@ z(~@z|^*zqVWF!!|CrMg;qOexn+$; zHxV<1U&P>49vS#OVd5W%+J2X++kLuq(&L^*c?@eU!_NEJ$mba+@B^<(?tCNRf9-7t z!um3%UjG16xt?K+;s`Ds*q>tJHU9vCSE1;>8(Ux60pl@}6T|S!jz|2xFzr=FgXeAa zVLG1fhK`Hy*H6-P(Pjyy`-<9@!!W?Hhnis@LisvIBiH+ zwC`?XWO!CoibX~Tx6D8zjxpNC28ASc5T1~d_2Q{?zlWN4h4j(p>z*PJ*uesyo69&3 zeup7{;78J^e%Y!Q;a-|?{{S(VKlD`9Yv7-R^)DWHocvAkEx(Y@ACse4F(g3<8$=V3`2~n+7qW=J<43NT1pU81um-|P;^L$PCM5yK_ZB(kb9S@lYH6E+tzlN6@&aZ2K z;hX5Lnt3hmCbuJGj3pqGhXYjk5P=y|Mu7f507%UO7q8-32>3?_}=_v8Dj zN4F-uN8pW)yK&>M7hB$IpW7B++1E{PZf^{tH!PqL8yv3$jN`s4?dOL68>W-t{YOxq z^GLOk<$DH+UAF|48TSGlkC=h^*KzQNN%(u=zY%Jhu7#@VI{v9WgzDB&#)RRvuxt>d z06^>ORqjVy5U$P_;V*?(!2bXO{0ZY0rQxQVE5zJzS!_|C;kx41h))y`*V-`WVQ1r?a2Iy3H@*#*w4jDnEq0O`?QuVO=UY2p*-76OKnGImLL7#7obJc76=h zf3)B5%98;y9Vl9Tcetf;-pCE&D-VN8_jx{?7WxM*A7_Z11$_w-=K`1}nO(VO<-@kQbvD%RrNh`{o+RZu;_ zdxKsV@Q1+uCzD*&u60ia+TCgKpqlpQ!uQQ1?IeBJm^5kzN#G6v`j4po8hi@)6Y)h? z?And>?xcB>P1I$MDbV92Y$?zD1e)bhoN6@ITAT8S_Nq)ahG8bEXshtEI7y@cdmcL-Vc2@z+MT4$s~C+-8$uNWVLXfS2VEfRZpbg36mkqm&CN1V~H2YJA8w}w1% zmg|br)GoA3kF-Fx(Jb+0BHmauV6jqg7y>$jz&Qt=^^>97-p8e>weqjkouQITp|w^e zQ~TFIP5>Plm)kYv)u|eu{G(&ZsSaZ**75E23!Ph5v5#4r2zE&8RGQQVoTQ>?4)ScN}y%&l`7|O0JPE zipg$^cF@XheE0cOa(6qY&43Pj4E(soSn*Y*{+|lNZQ-VjNyXj7T6@DB544hixjA9T zEO1F)Gmu3;h}wp)Z>H(@FvI=7Z5POkEG}SX04_qW9F$%g9XTA=T7J>R!%O-7$2*F4 zw@|c`!`64!wjLhv*Y@PfO}v*@(qYf<26wlX8w4)gU^Wj>Nfmoq@cySRpN(ou$ZvHC z-q-A~NYb^OSjiu~1&V#&nMn#U*MVH$!izmt`|Z-{cJ~k))!rH?!olVY@v~!jhjIS! z01vy<9?@s3UjG1QTE`r1q~F634=uc=HhA5BUCMaD$?gaz+OWhqQJtf&&tK~PMv5*y zPZifZH#dg#Tm2Tw>e*f?gS6_(9(b}s=KyUzbDl{Z26>~X+(F>|8rC~KCK&Hcs>>wi zA2vuGh$OMgV~xkqXWqS5&q=k_ZCW+5jyQhP=1t~INn~G}E0c|&=aJlX&ox@>MbRv8 zrIza5=4-RMRhlt|>;mr2&^Z|c9S1!JL~$J_uj*V{N6(XZ+S^HnS*&#n$*)!L*d!)0 zPauGL{d$_w(e5?t^gX>U zM=M*+{{TqYtE;4T-V^W@#gjJ6ESbFnV{c!|Xc$0Q~!D zf0zFNTDTZ=-E%?Iq_eQGj@`K1HRH6NZ(;X9Mq)`kws^)k6|c9&{{Zw7zyAQhIKTL+ z@he7imp3x`-~8E7smirIf5qDE#)z`dZ5`e1ylSFPC0{74i?o%&$OIhp_UoF{v(&EN z!_(c^AiN1IH#ZTyW;3)6z~elgNcnn&1dg?hr%i9CY7<91#xT+=#9&RHX5~Jaz!}KN z<<8!DPa5wZj?R5VD6{YNQ8wUWyRtVZg;A7-`uFwak zL-okBt$4=z-%pfX!E1Jos;MLrq+5{fJZ(QJo<}1jkET5C-hFpg3TE-zU0FmWSo36)6<5jTJVYj)|t>M>EWivF}1f_Pi z3}u;#P{aaD2J4YrWsbQ#y3`Zuo*G?B!r+NvTQA~7?=)KNUy*UV)mL)&962YeO4 zKGoZY!&HSnT8n${b@^@TVw+D>L*jf7adYHf+u2&KRw&_CU_x?!P;f!{anuZtPHWHg ztGgMk-V2>F8_{f@Kv|?|FumAg`SLsFuBV7RLw8~JtwKfH41hdeChg70eBgBibjjr8 z(p%g?s$0Wp4uL(@+%j!xEm4Eb03$)%zRMLSuV69(8T!|yMx3a{LR#CiC8vE3LsHYz z#Zp>ndUl5lQr)uKLb3UhoNgIA&&q>1>G2ZVS&VxUTP;7{+n)AJ+_hX}GEo>%PD6PMAa8xyb9@178i?CaI>} zTioBH`LkT=z{aY@NZdB}Av2D89CxluRQP{)s@+`O+C?4oP(&KdH22#KXbWxx4qGat zra58w!{QnBPZe5kv(@DO&y}O{o6c2HaKslJC}IYGhmsFpU4IVTUHDf^dt2FJmQ9Kc zymB8gv4C)LKJjH?o||by+CmArUR!*Qls@_9V~ z0{{)Dp1AbOT`y0XOAGxgPB$~&M-qRmXOt3{ZOhXK8OiCp@U5>EYBsur-(q=87Sr1Z z;zhSCa=_);un*k;<2^{K8Xd-|2CWu{V>E9pHwf}P4p(yFSg1m9TcVJ}ukho8UGu2w zLz(E;o|Y~#QCE>Jxu&+3x?lFKlyEJ)ZnMUKtd0oaAY;@Hanp{R*O1uhwvhPCQrF>` zrH)I7NW8_|vaxm?4&xX+bmF~_#8**Cc^SFRzi|}%m`4*t*xo)?+m3_|oF2RoYopb? z4`roEsa)G>!%C53N4SyI<&_5n7>0gQShph`27PO4ShYrut5^Mc{sNUm)$DoZx$tYm z9w748+QQBL)bdQy%QGy9E0b$|0YYF^8a|G8?Iy82oLAFU7 zoE&oDKqs6I58kg=(sbC9MZLF%IitSwM3OWPNm(*BmpM5H9S=Nl*CyYKAd|z>O{!Si zrOoR?v515K%`w}xj7|>WfsVQ9jx%2=Up2*5o5RLjs{J%?=g{M+TGz2x#9kuN{{Z0~ zyu8z{+YZlXZk_}4g_fpg5yt6)LjEY}ocmgm1Dt=b^fXU;LS(Y9uwb5-Ok8QEj zt`<)&;#O$lE^tck+BY4*0yqE@z{M&U2*NzAb9(+>XB=CT<~C%y{{V!zYqH$wS25}B z3yC6*FwBvu-LSaE-cAQNIqi;o@B3;^C2j4eZ?)Usuh|*h3WwU@ZaHEB#@<^P$?45$ z_>WxGJUQbVHMhKq541y#HtlD`OOy9EC>z&~GJl9?&|>L&UH+-6PkE+l5X8%DZf*s* z+XQZ;20}B9`~!>*6rQ#2QKjtQ+xT9`Y!X_s#;>Glm$yqD^ycEzO=%1ZBmvJAE%)lUj#Xdrd!2wu%>xV;5IqQb!|r+)v4c&usqydn2D1_>E<(L$GR2 z(OurEOn^>zmKYn@U<|lYcJYFuyB!t}73!LHnWqai(~ZLHONr;fkS_9f4CE*ZPaT+b z&TFQnFWp7+FTYOj)Fsf^@g}^wmZ24k>fhM#T&lp=+F4DZu1|&O;mnTfpc( z9@YFmHO;bWQQOFnJ=AfJFB`ySk#?NtEsh3z9;UhP6?`?1!oC`?zA0|cB&`&x*DhfS z%tElp+(&F~9=mHwZvj4=abZ4@a|O<&3oLIO1><=@JKaEM7#mLBhj(+%D@aa{x|83_ zaE`L&M>nn8LpA=VJQ_x=9sQgp-dl(HV3pQbz)*IQI49Q}SEByQI)1OGHO`}b1Tx*M zO!|Bx;53+Eq=0~eLW9>GSk@))gY~_C#yZ2_UCjoGB=TLXF{GPHOS!V`jij#A*P$J< z4SV;5MxWqtx^1_eVQ}%ONN!AGDAcGRM}gNPobpf5*O8rKlyI`c!(007D{}71d}*&s zp{wd))Zg1$g*OvUkwi;38Dd5V0R7|qT`^usag%*K_pfPfHPlToxYEERHycSRr+6oK z-vE{Xj0*02b*o)n_+~4cgo+iIU>M>BNFL>R9JgWUde@WuLD6OY!_(z!V{Ya_vTIr6 zjcu2zXB(YB%V)992Q~9qeHijZJsUD>npZ|8#M<_Ua5YFSjNfVBb1O?2QbLYVi-4_` z50C-CuR`!XlXtDz$$x(}g`JGdRAtLZz#QPS1Gkab_&WFv@|8s$^lAPUUVm7{MbaAlFqKH0^4S zeyFauX2*-IH5lhQ{+TwaEpA_QlQ+zYtP3e-Q-O?Q7~|#Qty$>y7nblrt6im))G{Gf znkd9r$j_MBJF-h>gTSf0Pacoq--mI_ePsHsvvj^TyC8QUKKf--kDHh_J4S0AW& zH^g7qziCSwyKO$!)K5L!gsR9!IY3V*pa6gR`k=X@mp3l9PfbW^p6jUi<4^c&V+&i_ zSVyNt`^UGp9&wUFpqCB8f^xXP_4d!7_`qLy%TAQueX3cmB^M2E@~Bj7^Ugw??eEh) z0OQZ}KMU*rEVpXX3?c8p6@{Ly`+97v$fJ2!|!9FXmgd5 zTDiFpMFvMANppg8{2=7?QCa>f(Y_t{h2G!(6a7>yvN{sB${>%g%NnrW6Wowac^ub_ zc;n-DiT*0;!WdqCUQ}Wrw}ApdZ@aRpfT}w59Os;x;QVdizY+L-blJ73{F8mSi+G9o z0+rzZ0N-6btLgB#8gyOK)%W}KM`SSc;_W?8x_%1T{66@19O?f639Y8K(iLdsy0s|K z_u2*t&UwfnbmKMZspGvad%gNTy`jy269$S~_c5$5jBW&jgN8rQ*T_G!{{X=+2l%Vu zjJG0(6pb~d*R25wAu9y56Ak-S;Hbm^2;dl z(b#~Xhm3MdFXvwEE_yNLrxj*Gt!6v#`=MTR3I=JFHjn&3iOOiu{a?w`s>AGDp(AOa2)? zDXyfm(f%x?*D>Xdx~y_KfypE*@sC>b?;QTgJ}1*{ZtiXTQ>5x`3N((g+w5`x&+z88 z_SJT4@gK6Mx@U%bDbe4>I)h#6I;vVC+{V9STr1o$BjwLrV>}A|dhxe{V)#knKLK3) zGw}YAr}%;IUHt2$IA#9NxiUPD_nT^$U=Bt=;=eL}2>3fm8cvC-&!>H-TGbZvWVkJa zw_AP0;Ch@4dy~ko*MEu7wtN8^MqK%~phmq({>i%l3MuN_E?(?ay#D|r<@j}JHSZjF zcTn*!i}f!T>!(z;Ykw#AI(@18Mh@8e9wBwe13AyR>i+<2+jiA{J9sM4auYzkbNI~= z{{RYXI#>K7=SPZb8x1Z`w%P89-CE(l(}BPoxW?@K*cg5};-C9PYu+CJ0EP8qtY7$& z%4=I~F5<~7^w4hZRoU%|#>lt|%*!q}FzM7*(u`u7Y{fa$wui;q7LONA`30abfX`Rl2hAB)W`N(0TU~Tie{S z3$U*WgplTU9UsCiscQZ+)uWE_C!bH#w4E_N)o>vD zL&)q-L~3Mq?%BdG2Q};(4})~C5%|l+8sy#|)%-m*zlD-ZNblw%NzpD?tgO3KV3`hMv%lJ(trMz)9)VE@HMZ-8r(5NTBR@q@cK*vuwY9vyv5IS(h@n-IW?_eEqUTN#J{|p|J|b#A68BA}#{U5Dh3R+cXBEdqxw&Oyza9QnY;#F471D8K*?02SeXwnxHCFN7Zkd^w=$ss4c;s{_Y2IhBkvd4;|AbMzv8l_883lPSjO}f|;VITiv#D$QtmJY2wekAq zy^r>WU--M92KfH~n7RJ|(U$eV_?tB?_> zL()ut4!#*~n`n|7&5QvXfxU@;kw2AsXY7-urH_cbWjJW%b@1)LRvfb6#yb0osquqH z7Tyo|QwX(J5$Q6s0<0Hv->5kM0KS3zE4uxj^k3}D&lOyl7TmXnrv+5!J9Ywp992)? zY?ypW9hdB%lkj@eZyCF|{{YbIjQy^@9Y~r>_`6J)+O&%umaiI|3z)a7M#t2vst^0; z(z|cjK0hzuwwT0%TI%bkY)r%YQ@?8lBjAR($}&Wj`2PUE$zD$?mK)@HmunHHbH4DJ ztiBV`9u^Wr)-V-yIRtrY&3xJWY3i2xABemicX$5)9?2YwA9z?_@Q^N+uHvCWh&^BzMRu-%R(i7I?gWT5FjC?<)TYPB#&A6ZYH%5cPUR1Kih0IY2 zc8#h#^c4+{!@mye7W$l)o-c&jK@>t;iS6Do67C9xy7EZHcS4kHW^>L?_c*T~Si!G+ zJn^L0J`vJ%n~gG9mJ6#}0Sts0lxKD`(<6^s>l5~K@O8$AX?FS(t4RVmXs#q)stG@O zm&e`_i$nNNt9J)=%OzOTSgNsmT04n z;RR<}o70h+LRxBmaOwX53?A>rmy$(1I!=vmsXe?mmyMjTlM1Rj$lW*}jeU7_rs>`p zwbJytB7YF;nsd)~m$$=gP|YGKE&;&KBS10tK;Vu=eEH&wKRd==7lK7+jvI*zC^;E7 zkDLy~lj>*?#q zr&;kDduxvyM;xM9E~k>=A(2>lYWUAe+=asL3umw)(U2J7Lw>AHkBk>A_Ja_4eQc#;R1bDu4bV~n-{ z9l-UjgT<2{4(ax>=`vhfUdUQa&^)JUXHkzW+XR!Jy~pM&mC!VM-xD;ZD6X{lvFAr` zy%pfDc}klhvhC zceg{$Qgc?D*wB_Z^<6Sp_3Ng2JfOZz(=jWKtQ`w&%l`lgz|IFsz_mJVhifH_+GMe5 zc94sgxsnpeJ3jJWFkb=G0f3;k0Vfq-R9L^Zo$u`AjW>;}YFUzH7%JG^^A1Vp9;2WY zv*N9JCe!7IO@cMOx+c;eA&6xlhU7Yvo}7X~83Vbk;}@h>&+s|lX7o2SjZ)@qH%!#@ ze-T~@WMwxt5}5x0)JVjKV2jH!BcX4-l6zGT73rq--rQM31*FnuYn{uT>`NT5QWuO9 z!1N><%+T~XG~;0D_H?<0gla4zZ2jC4&c~+UMnJ|%{3hovr(` z^6mh8W3N-zlp`3*@h%BRRAx4jCDrAQt)*&L7HJfb#c^(lAho%27x#;egOSE@gU3!l z*zpFbbvBzNqz3AErI-S&QYDEk-dQ41AZ+u3 zNZh$6p8jFxt!h3K){RK-399f=_rY0pe->EHc`#T?6XeeVnF|s?9$JvT zod_gg*KP3wK=BT#r`!JkV(1Xxz|ytjwWLTqf(t1^#|Twh(RnH}&1AQRbqlRFTU4`u z?F@aMYsH8Nb9BRZz8wvWKMRu0OT(mo&fv}av$)mb$|L9 z?mzGU0RI4IT|K4o@phQgK|S@f7Qh8tDN3gUA1a)WL9~&Rk6Ow90E9N{{)P=h{{R5$ zVNy}iyL_%WILY6X^$)V%N3U61*-HSmmNkv;?%O_6Mj3K?fH3&}9OKunjgFrMr}o{v zw+n2jo1&L~C1D~0jut_J0o}(Wk2IF>zXK^s+dT@pcAq|_ zYbny_j{ZB9P>##AyATQB=bjs{0OyV{bG%L9_0hG9g?kyJy?B&Oa_l>p1*AdN=)g8P2Afo^z>qX4colkbRmM;5S(UKwTOY7(9Xq z=V`~jIK~BOS61OJj^}{A)+WHurpKqQlNHtV$zv6)Vn&;uaUuCafz%Yw)6&JBfv@Rz(B0~DL!`a5 zYLMCaDIp3n8z*aLq5H?bb6J|D*N8sZCF8^`m6=0*q4#-@h2v-;hXj1Bz~?xwlTYzg z&xjC|B0RXrBVo@30&)*dPkt(|+dNO9 z=$8V24C)tla+dw%00%!W1Obq6M+2PaatPv)P5ba zhJ9%SSJEI!t!(Y{D+U;0C?u}(52vR}=5%{sF5>HD?diI>%%=5SS{6}~!N^d-)NKa` zk~7>2gT)$6-k}}LI$p5%8iYkk8=HaiWSoJMjlICg$2iVw)l^`oeY^hvU5kc_=V?Bn zpxfJ6*jqpLh=_cP6J=yLE%S4d0B|wXW3PJ9veNAT0J6p8P{E-MLNRvt7P2d|ZRDu{ zlbnte_s13G{uc1f=ZAIY&@b&Vt4@qyyH<_zG6N{dC0loKf&n?=y(dA_?GsGfXwyxs zLo3|fLn4DK1%LHOpmh05XE@;Gb6DbI7%y^H{=V(Wl$%DC_L_VrV{U?JuOkwr&CQ!J zVi=5->~^k4Qgg={&U$Nq5<(^6DV=NkEw;_x|b}J*0?nYIM@A+690o$HA8D2cRx{pho zBxxPPK|Qn(;LEw$316KC)i}uH1JDlbT3T+GYc7V`jr@@;(Jt`uN_@}&AgeYH%hL$K_QqFOhqz$l6XGfkFsjkPlK%Jk=|RZ>?d|v~4yEi}kpdbQ&;TUc84N~h!FzOS* z9P&M-(#9~7E*S#3$zQu6ba*&7YnBOK>n^p7Y68QlJ%V2Z!5PBRo3FtBOB8Q1JIi_hZ^dykIxGi{AM%txF3E$9x z>DRYj&Q58#y_)<_L0;FBFWKpSZRWdetIQ&f+C_w2G$LTk#1K`_C`JZ&QU zX!nCkw3W44WD&y_sd!o4lOcp?doSHxyo?e?4P)GR)%-!>a~-Xm(CSTgZKhcx9KHz( zNX8hJIQf5v>s>yVtay9j_KRz4;#*__a~;pw(HC?Nwa9Kz0!Z3B9(wh}N}s*^$ldl| zEkaznXlZG_J=e8OCfeRx5u};jT6sLS*pV^9OiMQkK5S!;m$iD%o1xv?+F$aQ%i09!Fj>Ys~I`ElaC4{->sBvR}sgZHq>d58(w!!3U1Ge#>(tQ9EU1jo)n9c5p!5A6$;$4l~>u;;9KzT+m#;rAA)QSnPLtJ-3K1CXPrL z+{Q$Z-P*K{(T2mJalzp5Z~z?S_p3fCvbonhKNZ5Hm-jY%Bz^DXqDdJ9EMS9@aKrI9 z?OhI&s72u42HY%H5B6M*?{%G^bRe?#&PIAJIRmY7TGp@T=@Lz)>XU02Q6(l++DkC+yaG(CoHDR(F|>>x4lA7T4yodpbUS$Ne8}y>58U58kt}6L{IeVk5&TCW zb@o2wC@PVR6jIju{{WE@*vZvCFnEH_{>t9!Ng@`_zF3$Po6Bd&1d;O&M|^-f@qunX zh>3lC?R{?@(1b|#<)AUik{AJ$BLrhOJAmAAT%NbBTk4lmT}sJ$9p$*Tx|vAID0JtJ zt(<3`0XXO}hAs}Dt!e2GmTu1I_io1LCx*#vc3^M^B-g(TB|cpvW?i2{&@Z9!3{cr5 z)@^Y9QItxIvUz2}*qkXHF@e{U+}1|5rojtochOk2x~h-0A&9)5V*ut>8_JG&&H%w2 zabA~Wb*gH*8qYo5i&-s~*~;3qPqt8lXxIkkU`O4-1Fmb&HJfMGbqj4i+Bj_PC$wQC zlB%q36I|4uNBrI zywKyf`&98GMynL583%MEAzX}%@~$)L0UXyqd8b(TnXTf2&Nu|O5W*vhM{@aa4sv_v zIOnOaOAmsgoLg6Y*|jAVX4TGz#6KE5Q}Gf=^qmUS$EU=9bEj#sh-~0Kp+_120BGfp za%ps5g3om<7d|i4+1hR9EjrYYA7bxdPy4>OuX6Bz!yQ8BSdU55?9)in;dNJHaAAMWkpxJet;5Yx8NU>z3T%5US}ZBAf%9g5JG_d(opSUvJFn zbsV%?9y_djJ-YCZfi+3IKc;_Yc{(f_n$EF>c-C3sjDBKHqq(O1WcWvAXJi*xk>u1Z z^}EYPG0FR~7TU>y>5_8BzK{6znEn`;LBan3g>j^>{!iO~g?Vr745n>ES4Jb{@lK#d zNF6P#LGSHbx^btm(T0kZMRJG1y#j4F_F?h0ywQg%uA-wi0o1Lb2lcL};ogTFzXSFA zVG5#2sM-he+XtB~!TfM5pzvLs4f`N?fs!O7c!3XG4n?5B>BULl{X6?V;D?Ca?ShZ( zOJb^x_qM!5;QkfqQM973*oxlmNhEV$1oRkf{C|3}Y2wy1rCgFavwWvKo_mkZuh@8M z82o+XsW!0x0Er}74bS|ut6vABF?jb;TYWZ2n0R{PL^8-pW-6p9=m%Qc@ZObsed13Z zr12KIIy$LH@>Bs69zPG|R^qO-GmM>&1o%bZ-3v|lJK()a^H-Sbw-y?tx_+H&31r%C zWR6FgLx8vo!iz*Fb;fDI+-&npsW_hGtK5!KfWaFIY zJq9t&eBJQN;rzNE?CId`VmDQc>FjP&WDIvrs9c38amEB7AFsWAzwu@*+J}TkLL$`8 zf9@SH{{Q z6Y~vX?Y}$AlU;}G{ifUKzZvzb8#UQ#x?Ikq0CVQsu5tJp<}G74H(vw61fT62*8~3B zUbp_sUAMv2EAi7*c7@<((IsX_02LA~*xUu&L+P{Nwu9Gobj(LGXWuejIBzn#G;H z?whUZP~2M^g%f>_Y3>o^F@w0OksBNXj8{41Z`xDAek1smsom&y`p1i{bzLIgLXO`@ zwF@NEEzFXY24bZ_4Hyg+j!F4@)^U=GNwcC*l-p^4S3A8ujlYI`D{N56iX#SS$YoZJ za-?<6PDf++PI1P1oiD^s;osR4Pq~6Y72LWMcDC-#jk8PyQIdU{ML&gjr;EIE@mA{N zNAUfRfpnh)$ElK*g8t+CM&%<=@;spq*y0EgnU9ir#xh8(&kcUgcV03d3=_p3Gu5@d zKVFE<7Kw?qC1NnlM^sOoR3P?_?`Pc>3#w6{{X{{D@xZj&nsHLk^?ox*fLuX z(17t3RodS^bN__)w7z1klf#(y5aS3F_(`oku%#BPXZ3d(Cv(z&WnH7-mXVJ47kU2x zzwc50)=Z<|ezI}Ln{AW-0Bw@Iv*13z@oP=^XQiJE_(x6FG#%44R}x;NQZpGCxFMuo zjgioBDW4wxA$&m9z8LB{cZs|J+AXBA!@E$5@nl3{B9WB<=OkmVdf-(TvE}mYcRFv| zb6s!T^zV(@9G1Thd^O`8G-Fxtx7n{EU=Cgq5J?&T0A!34?5Ebe`}UI5px3@7d^)^? z=d`}kq@FN)qk_Xfffdr~KOHV_yc2V!YTpd}VQpu1r?sO>x^Z+W(d^FR2OQ@$;s?c_ z@RDmE4D|W!Ztbu1Z9aQDkF`axFa^wedyo@vP&v=mrOG_XC%u6}T&=#BK4|#W@b=F~ z@gIn7G@0%fO42M7Q2QImS}S?yo-`}bhjFI zl;7&3^Owv?NzW%3{6IPFS$-pj`+MQ1#ora2I%*oUHkR&t!vxW)k?ePVmFoTtwf_Kw zcjJbRxZ7{yFAYLS{`Bwx{G%VObz4)5TO8lSi*YB$jdVE5{{X@ubI1KWild=u@qfZA z;}Nk$O%0GL2VdV;Me#!1NAZ)yBo3YqC;tE@MN`o2-~JG<7qMU9u>Szkj8}Ci=yFtg zqnEhT8LchpY%Xikrz6AVEVmTM~1E2TP{A=te zZ9n2q4}aXK&*4#0E1uuiQ%Jva`tmOMlPH*JkSKAk!2Qd6>)`jQ+?Wea4nBcS8 zvHb|fYg|30CYcq}QB2dw{{VpV3hBN9sgK4H1Htlv{{YE!71{p) zF6D1Ck^cY^C&RxQrnI-zAp2w)wQK98cM8l`XhXP##~pGqI6XyD()D=lv=q6ug3fCO zR)Jx)A1b3@WHD2`jlgXOxyF0x{8C$c=;K{VZz|LKO61QdUD-uMECTc-fO#N+p51GT z(fnQE#j}!Gl`bs7L2Vp9RlF*Ms3gWYDl)v0fWeLheo>F7&eBV<*N>O`OpdnKMxNHu zirK6a?Lvk&gefC-6rdwu`LV_UC3xpG&iJELwA3M!Pt`-)-AryY>*<9lU zX9GAm!F%IPM^Vuux6&=O2_ccAQwv9s6QnwwhBq}opjDq1ud5cw*Y{mwEJX+KYdc^);oo@xoiDhIZz)XCRkc9lJj05t50q6~S zyo;XEWXdiwv3}oC(_xb0S>#um6k{ABXFg}PdUpEv&mySn7q>T2BzELxP<)>>G-~n~ z+Z$K-jzB(xKTKm>HlqiL?d`P~*^*Zbzhx?XvZR5R&&o*~af}_u+dh+^uZOj(eI;%z zZxwFZFE;pYD!j0LsKc>55PGo40CS~B8VO0OqoUlpn4b%ECBC?`d)vpo)o$hcIkIM0 z?VN@qfA0|Fj31jL8%e>&E7Wu!4&Oez zg7iB(fv#xN%j7ARKeAsKfg=EL2`o<`Mn=<&3}cpArPqRQ;`=q#<-Lrvmz8&|y7`<8 zsc4r93i1wmjAOTnUca}S_CJs0L)Dh`) z4JB!>x&HuKekN~w8oExe9p0=S9W-2DF4SluP}_0AQ!0(PILZ2qV4T;ZYSuGr+C99N zX=$YD8+Kfy9DsxYf=ovX67NK=<8k?GOJN}5#GiJY## z28p0)@Z0H9NgQh;v7b(qI=X?^JGePMhZ!E#!GCZ60Mt(V{{REe_}8p>v*BzWBel4> zitc-zK%51dFtKgjwEpaHrvzlKplJ-T=GtIq_Be(4I8w9Ly)6uf*U)qNXZ;}S2uOy&3D7TFt^jSNoN8%M3(Vf22pX0g(N8%=L^O_ zBBrEXDmI0?+S>jmbshAH+UWXLsp1Vl>oZ-Y#hh`-rRJC)JE(R6unz-_p5v!%PX>!k zSHxPR)=~MFt8X2jm>~iAmDrFmoP>;?a&kv*ou$W!EwxQLMUA7L@h$x6s+bQv7*0IYuxUulEwHjru>m5tuyWoEez7{@zM6)t%M^U24haC=Ge&0EXR#xi`Z zGM)5kb*MT$ah^SMTz;*5%YQxM>PaIm_KoM9B$EYfZCnn6pFnf%P0{r$Jy8(cu(G&}5|Zy$K3KGRP{YO#J)7uEuivdy>)pWKiss)86zW=I9{0pxIFYdD>KAe zKAnAQ71pPDHO#TPyF{gbd>z2p%s?Puf-|=Oaqs2!?Jd5w6T^8l$$-LHq?%Zz!wy+T zAY_g^W7KB4zY|;C$E4{KYENw~mBDFmB)5;tgu?7LA$Dwq$_`tpW0E@7ki#l&Hg@zn zRPU=M*1Sn!sC~B6LVHa<=EZjjfIe(vrW6y5Zu*>K@#nlhHI#dMcirXPFxpg(BFM&DelxeYIqCtgQdoL)>873U_nIjwYRz8`-oTzE(Di{F zR_?P;99xx@M&)!Q9socvJAt2LUWwuDde_7HezB@II+XX4!t*m_KmkByQa)lxJbUNT zyobcM4JF={sKXR&lQFexSW3iv(yWP{pevyO{M$wfupl;T(>@wlwzs0(YMPzO-dkz; ziUmhhkjMg|X3qoV2j<5oardt;7X|FprMA}jy#*^qH!&}??N>*)xz}T!)#B6{q-!;3 z+@h8RnNA2+_1o!=)k9dG^6SH5KQ&~w`yv=6jmk9O%`iK80F^zxht{rZo-7)Mr8-M# z8yi3Z#~r3tOt#`#!5JLq^uQUeTg4i-sLQ2W#Vm1dD3Can8a^kHl}=6r24mN^UbWwa z=H;{gzoZoBqergjqVm^Imipk{SlCM{+gr_WBZ5$bV1dbI=O@>L$?7>@7~N^U8MuZE zdnoU~A&8Y136%B9U<{*LyK07x+F+*c)&%E9dCN%B$w-eXr2sQEliY_lz&R z8K|W8_R9NU{{TjlV1*hik1-r}>9l7bdp%BTpYcAYW2$P=Ug~j;G)mEu$qM|rKXl~o z{vrf}ISRS&NjHqN%RL(Q!%?#PRm&>N6h2!rp&eZZ957N(ez`nixvN<&bz^B|5suo* z_HEI|uesT_5CW(@K2CGjapQ{i=+M`lt2NiUIwGTeRhgYMb6t3WUrIP6Ew!|}Q*@G- z80C*amizz)wRIcO;G1n?ICUuXTl?uulS;}_ALU|#G0KdBK_>^6^sH?!T#v&V^^e-E zQpKH_U&&J}awtD^2i=Ja9Asb&R)l&y`ktWLjr5V**vzcxY4H5W7MzKaU(@L86ve&oytcWnDf_;7aeGFrzfWSD}TYYi}-h19ZcUD zH7^O>+uc~{vD#?&A#kizO3^Mj+nkgqXbdr&cdvB#Pvb3H#M*D$?ev>nHQGkZQv!rW zKq_Nm7y@&iK_oA3wep^+s#;!OMg6Vi-m>5-zE^<%P)3tj)fVJoFu8N7J z=vsn7dkJ{k%t>v~uF?V6a?OWqId!_~JV+22l|e_|Z0NWM9+9xc?bAihfzx9rB$+cA;Ks4Uy}1J}?Mzy`ec z#qrws^5J6BZ=}D}?vX>oJli3R6zZk3mB}P-T=Fr}qwy8CzwrY5Q7Eg|mswG%+v--A?TdA^wFFAYM&)L1gC8Sy&IT*jp+*#cc%|

;6obty3H04}@A*gVxF&Lh!k8(oJf}sQ&;k+N3V;n*@+K$t0TF_*daS4A}T)_ffk; zsN6O9WPFv{6}HE_KQ_>Iw(X!{*FKfSYhMoUt|FETi>Y-f?-)T0E(~|E9qNkwj#LxM zzfBE! zt)pk5*tMOGizT1g9?wxCSq;CKauJ+uV&y`d1>+g+PBC6hsTehT8LX~#D|9kds)g=kAIa&SuI^%*(jX0HyFIcxDV30~;(yUj)Q31HJ8fkYBY zs9S3>;hTU-ImS(UXT!e%>7E_b1MlZ@Az>Do@C;*S;D zTi7x~He{47ykS+xQUKvl4E5t2n)+YCKMZv(CtmP1$!)}(dV_8oFR__&wnI7Yj=`B(nNOMmbb?~0cGU&Spzo`+uX z0}=Gl3Fqj+;>AFb*);f1H#;bA$EZt^WXMOaB0l7ngAATFKU~ENvb{cDE4q zbQw4b4_<(C)PvTwjY-stl;dO2!_sh?yGPRB3G@w5#GkTWtzo6<@?G4^uOlo+3LRJM z#h8(|*9usN8E1Ok-9OIE_;~fL!5n2;#m7*FR}b5qL*X{=(7Q zP_fc1l|)vo=U{wg-9T)7%vk5%rM3OA{B;w4?^e8tF@m6y57#7`>Q$U!Xsv8bN*whY zx%UUcUk2RhI`*}9e`O_`&*AB9q4TmBzwU+y9Zm=s>&6Te_yR|CCgk?!ZB;@s8nE>Lu-5>Vk@m`Z+)@^@db8P6z zFuJzKn63AN5Erj{;-g;~DvR5wsQF#LF2y`7BOh{CerMCZc(nbKydI=rO<;%e)MX$0 zKkHvfd}NwP+rx$?Rg>(xm`%hFoY+Vdek6hT*T$X~__=%W1NK+9)AYXsO+KBZ%(ioQ zqDMruf-`uMg@hI@BNk**KmY=3+WsJT7vkQlaiTu4<3*bGXzpdT(|k1>Su!kf26T?# zVolDOP<{J?MS3b$OHRbfJk8zPR#ow&X`;=d>w2BUI!(32nrju*C0JN()P=+C4UOSd z*pj&bDLKYXbvozmLE$Y+;*Om^g(B3vZ*gIz-N7xkhi3A&`DL~#f9SWi3dni2?JiJ17FJ%2U^4K3G#$-+##Y!{MI0utfk7FqK_r; z2kiIY?L*)ut*%<=kxk->t?&G;Mq*@>d{W9Iz$a)pMF(gZ$@Q&o_(8lq@n6NwXK!t9 zq*z{f9>)67?4nqOl+AG~tE#R5Ad(0e<08ASg1#=$JSXsXMtx^l)AYF7Te1!9%0;*v zse3UQub1yWB*UfM>slU@JSd*@P_Ca4gUZj z`+mRm&1>`T_L}jotEhOZOqTCVwbUA2Cc^6CJ!1B6G)ZuQ+=Wf6yL2iUyPR?9v!(d6 z#+q%sajv7UExIUFk4@I5jTJ!P58fUsDpsdTqm-WGgdIw8is*eA`)PdN4m>&rf8;-J z$NjRJzJ+Eh)p@VZZyWqU)b&pe{{Y1P-^BWko2S3qc3x_tMOIZfU|9*y3H9q+z7YMW zyldi%X|8SjL#@Sc8*D7L5L}(u&sL0kcBzgco#i{}p;DtK7{{sfXYB_Vlj3*5c>uvp zY1cnifd2sOweC>@cq0|?HoftSSl6{(0{;M2@gAjnb)nsTvd~T8_)`*b_q?}V!{r=t zTiP$}r>|aLJO@$HRn)Nm07}xQ8QO4i7H&BNb?sLQ)cK_DOzBIOE3xhW01nlE;io<( z9Bo1;-b$XZWv1zPY-e^yz=wj~r-Jp#Iv{i|9E0Crv_oq&zEbRM)>B+V7JyZ5g zlrO+P4im@CbM*fJkBJBLsK0A={{V|0s?D-7bAkrrbMe)Y^8MWp;-@m_;%;metC?IwcW z##x25k%VTtB#)U#MIhFbe&V;0ojA27+S60QFZ_8XoBKY0!XGzHBX0Hj5fNZjoyQDi*xpiP@vm9VJ6x^IXD9Yy?PJrD6zrt zM_7(E3X0astLOyDf%;cX@gqeV_rU#1)l+nl>8W)X1D(+YkYnHF$KzT!wW2xWB=uj*1Rbuhpkwrk2EtT zplyelG~xihRK90NBd<#IE3bz77sqcD={oO+^y^49G2shsZra^ZOl@+r{Lx6HVCO8n z_r`dwJ70tvSM3pFf8yO2!rESe28{%9>DCb1L>Bh-LPzY;G zl?uVA+D-el0Omj$zaP7u8?f{>^Oui48u&lre}(lg7U;ebhr)VBfdnwhwy_A>=K4pB zNSPnNicUN9{`Vh9uk1(ge&{Eg;e-DGZ11OE!jl{w3oeLpWlY0-?S$D3_`KT_|+PYd1Zz6>jAR2S z`9~dzz^hIB4|sahOBWV5(XWUQZ&NjlEn?06!xr7aJ7;ktuQfIHjiUS=u!85uUk-E` z^pe6S*8Dz&Ua#miZ3lB-P%I?pHzB6BGvOu3`G0PNJ zw-d)A2XmKI2u8v(4%g>ByY#OT*8EAW_)_{k2m3A^7SU79c*-=3B9gf!Sh(AjD~#Y0 zcwFFnFOR;$zArLO9CK+?T1gC+i!;RY#>^ZfdwycXj0FUB=ZiZ?elw>P(% z5z}twnNmV#%6z4l=Eg`nco{tUabF)&tW`_Rd#Qg~9C?TP%#+*x0iEFc%h{lo>r=VD z)NC*2Xzbf;#smN&{G)c`w1r%J-*rcP*Q9ASw&FWME|S*w3afE$#`DbF>KSsuOEQJn z0bZF5G0<0^cz*Ln@Sd$Lo}I2iDc-P5CC8B950#@1GOf{08Hg%ydWtoz4(`ubzq`BE z?DYBX<1*W%I*C?7?eijlHn{Jc_s1t9xhlh+eUtV4k1Jc~cIo2l9~iCWoYxIJs^VqX z1W?5wEs&c^h9_tso=$zSS#awTX)CD3r!l_~<>4(DS!B=3{$mhuMstD51e}V*(S8)` z8Xmo+86~&A(IJvImSc9Tk!0dRxxg)j83Z0MImJ6!@LZ7UX5UF>)NU=>NiNnGX*|vc zWn8L)4`Oh76HYLrA9gajDptPdVX0f%YMNG+V;#1SZKX!BZ@ijiTZrV`%p^a?!A3^X zt)6f>u96*7PnCtOkBi$?zOfAjovPT6vki)&V^v@PW+NHM$m`y`GQ-4Liui*~wtYJB zbyaqZTgt3Uu`T7SV2t4BZ_S*NewEv5_WISG)wZp#mv@Kfg(13&at9H6RBv9e98Go#n9uDmaA1@^Oc+D4@EN-b=(n>B*s;eb)OqyS@%=ui5? zzy`6cJWUR@;t1>?M!1v2myzr?@=xZ+Z?t@$F5(9}erD;(z^_4)`$+IT;-i77-`;JV z({m3ibGQt|5Tq~e$6|WqaHn5V@dlx#-A-`wL{rVUxVZ9=`9yBOMQ!JvGFt=_z|K#b z97ifvkIwcQmrjWCZBts*f%EZdcXK3@qASD1EX*;(Oo&g8tJm#~avX{a7 zw0~%~)U>!3V%BXuGaucaSj5EqtUzE%=bk~&9nPh3uj;yO>F~v#p{i(?F}uNU6oH!6 zw*t$wv0#}bgUBJ2^T-&QPmL~bH3;;rV*c|{o)r15?rep^+s+#YBX08BxZFWy&l$%( z(UswNE3T^g{{X}Hxtp?k9iw={O+UkO$7a|149OUY?^<}2#D};Ds7Y^^=3oyegN&XA zABtL=Sj!Fl&Bmm%7lmZF3<+JkRPD|;ao2BQ^scAJT7QQB0JC!khp+A~OBRB{FEtWI z8QR;VRqj)TAH~zXWavH%@dmkX3Q43`=<`TVnsqp#ig+I+9g%~$j(}ujkWU@5!}}=H z=aRC&U-)rr*w663p{%6IcYgl>Z)J`@B3o&ISi!+8>yo(ujvD~;-vYP)0O2Hm`SiU1 z0Ktr3{BpT%dAvK}?+t1?1X|Uscahw*A#JU^n~mr)2KRjI-+|XBla2*=clP&)KjRf+=5FC@J2>Q80V5J=gk|#HkSSxgI1pMS)*oDD+(1cF+VV3MnCVI z^%=*0+ez`xo2KbD_8L93ksJqWi3Vhl=jIEJnc#EpUZysLYrET;-z1H8XV&_6iL^_r zi!*zq+FKn-K#Jb&S~rxcZ7lK_&h9cYPXie1SpFjLSB89JadS4Nucb60(n+io>?)AI+Tc0$eJ`)xrjG|1Y{3mG91 ziJ4<8@qw1#xDg)%JbLk(^V=z<(ta#n!j}sSFr;iHwUj$~dkzjl;GMj3IXL$RTI1r$ zyIEqHe#;f>GzkN$uGq*SM&Xi9c#!EbXfK@GZv628^_Iz65GJ}eL2P}gk52^ z2}hkQk)Xh6TXN$V>bS@UC;anWlw~)v(R61kxvk(o8C_mly}Xul`O;;AGq4h83zOV+ z?Okt=ygRA*qSyNt%SnICmMmCMbK?-q`&rzN+T`rTO&5H%o{5p*kaP3Q` zjEQ4ntm<~)yT^AtiPwh}9m9@G1*?!dw11v$<=YclFhI_p=mwp%Et(iYj4 zc-nSWNf8w0Ne4L2?&AR9o_5z^Hm7Q~wlPC3ysU^)8K#w;_l>wD5&$P}<}qGV@k2?~ zFTTipKdJfB-V_%Qw2G=u^M)WELX*>{8KQ?ZN&D{C`;BTvX=Zdk60K|-!#4WM**Y$p zaTUut`M@gqfb2rU<|TO~91XvPS@52$+Mb3M!EC2X+lej`d4MuNWRZwLmg$BEjyiGP zwJ-E#@b87jmwSBI5?;mS+`C(wYi-O2nJ4d41hG6|V*`PjR9XATk10?Ny25+&KBM z_m{3JJ#$y_9+e#PwXSVLW=L+_Wrj&O4VE|r4xAC`Uq4N9SCpwWX7o7XuI`z~U0B~u zVH}XKdz)RG5Fjj6u>=w`!3*5uo_djp(pN(kSAyz$`?(k!sfkG7z$$@2^aN+y*S54%jc)e+f5;qc zu92PK9}vkTH_^?n-CD^QSyhx}T;L26dFTMgdfdM7&EBD>-pvN5HRbe-e6tKh`O%Dk z#FK?kSOQ7NEz^qNv^yK?9aB=YNnnh|o7zboKvJX-%1HO^jtzGD9;M<9bL^IuF-dT( z8II#`Syh}Vank?|z3_JX^WRzyGUtO<`2*KNeGkHRUK5%dy=wejNjl~1LaR9I%bs?9 zRI&OR_ss|5_k?u|%X?;MqSK!rX_9!1LQaZ77(fScgV1xxuNKohA9-f$bK)EOVQ!L; zV{($nMdq#BNhQI@ma+Zt6b1Q@e3O6)#(Gzg zi;X(=btd%cwnRO*J56iEx_^wkMI3r7wvTA1%L>3Rn1ra_Bl(HWLu3$h)11|o@%Epm zcupM(`qCJ+AuC5c)v0Ks+8C7~NH`>H9Ci7HazAC!JQsZ=Hn!4V!2~S=-7*$ztL0d@ z93}=bc;wc8z2MuQ?XR?4T|*_ju)O=Hm+dU^mC8mR@$};>is5u<%_z$DTRx>eR}-l5 zhs0x~YKvv3>ejlgqv00b@f&I!0^c%_Hjty94?R6`ZGJLq`o*hQ+S*3M&0txBOk<4Fkh} zW|BQd-r{>etlnI>xr%+NSTde)3wA1V(YU0j;h}1FyYv;Ie^Qp9w-$Q!yxR56#o=w+ z;?t#8p4@_@F4MyEk;uWx!QhH-izLuy(6wfQ`cJoNkUPO;F!IH!0$G@oxtAql!0FH4 z$4t$2nRNY^OA=kS>7c#ay*Ji))y{cbIruiLI6pdpc7y$9? zAP{mlWOXC8cG9Em9BlS3Jgd~$({-x|^uc3zyNX&Ux0*RgSDP7WH-biY=aJhy^IWTH zpKR0~>gpLD+#RgSn~)rWM&5+ucja2PHn%<|@U4p&Z4%zjF=p)}ZFA*e_QxL;LAgMsA@L`*%^M>Zx;J?jF4CNq#eL>&=N8; zz!l`54*n8q8W)IR)o<-1g<~dFORP_dB4Oz+fP~XiWsAk z`tO1V?>3<1oaKSs2_*f+KKV7{nx~DtVdD#gM0U3=kgIGNr8(>K5?h~4TK4YpCc2t5gUMAZ zkDGBABr(oEyN){gY--;Sw3}#VhCO-bbYR9EJxq_Zb}Tw~?rVnCelR?GY*#iG5;e4r z6{onDAny4=WnP%~sg+CU*-?cWF89=*$Nmv*Q%y_MF}>U)AP37bbs&uXU;TRX3wQ*I zS+?iT2Sti4P2bkb7RNv-w!t$<|gOL>X~nxO9|!q;3ZOF`vj+(mxtJbqB-gyeY5AwnJ$h;=>Cz(JapZSfkHv&QyLmuY_%W zGkBj(_$guGJtgOx!@6a>aO}Hlcs%{G1^}vtf4zcm{9`=@XZZHl#yXAFjr{u9y}EmW z6tS#QTugw1en}@*&N0;(53fq-_USDSbw$bj=hQczH_(0>+1dDcJWUUZ?Ju>{oEhc7JvV^5g-XMtCYlITYU-e%l@xu<_NVv!~mvzhJPqvuk}q`WtsGERe8Z zh-7?XBf(Leiuqi83-O1EwJkePT{~LTEsL$8n(ZZ6OUTNRrsK6aW&<1!Ytb|>*tf;) z;~OgpY_#1IP@2YB(mAIRN9C)cvH42?a_ieZ^_RA&{s@=sm)d4`g@0!xxmCmv#uUk<{^C@!)^h<3PUA?0i+?9~IhZYjp!Giy{K9p~&T>a0lJ=$F*o! zd_MRQ;ZGf0uBopK|Ggxc>B~xytQD=2+ z`Q)(7p_`mxRaXF4)SnD~F?=fUzlCfx*t`>QuXu_*{%YIl(abVrVuO5*&@R!y?OF@* z>&EsTI@d0IJ@B&f+T!}s&Nj8wa}c(QGdGtb3@8E1upEFqv(}E z>nP^i3&=lq${9<juTKfEQI zl^E_vBD${te$0L;@u!1yE1h@Zp0v6(&6E)G4gML86*9)YSG=)l1k=Y@3V`Ovpzxi#rq@cKMZX2Z?pJ@ z_fv~jz7{4XP>O(pcNP+mdUWEuwf%yABlud}#iRJf>Iid_96`}Z_1cm*x8+@T$B)_P z!=DN4{5>X-sB0G|Qq&gF#q{YQjYuCd;TH#*^>zJ_TE#%pz9jgkPb<;9)ogwXxB1j4 z^GU_PZAC>%Pk$rh?-%?U_`Ber8u)9&FXBt9o85VxV0KwbCfsDci`@DtguzkvQN==am>dXIy&>Eexi>0{p4$_ABgYueLv!thMQQ{L`yuEGsGI;CQ$$l8R^L2R^8XcAB#R9@cyqA zzluCN;)`hP}}-jP)Ez2YE?RG>@en&YSTx{+AoOwSKRaw8*zVJ@<{yo_CsvjR);vTQ?{#!p6d_2(fjU?Sd zP4$G)vv1w;row3d45Bz%-9oNSV zKJMby-rHUA7Kv!9B(u9>Hg*g0Mx`5!w^B$09`*KF@yCv=qfx2+44PmD57;#p1Y`Y? zzZLUu?O8l}_2ss=bKq;w313@SBnxS(Gk*9Pn=%4aWPICtcB+jiLe4GU<|ir1JEd=t z=l(qXp{%@r@aFHsmfC!gT*>8Td9?(a1szzHC*~yWHR)Pk?SJ6-bhxaqbj??4z#)(# zL_ySx$J)helpznYgLa%Hw~od+J>Ck6xzhVZnw94rj*J6KYPnGha|# zc(&K!{mtH=@Um&H{59fB2$AMgmQkllF&kW~DzSaZxShQ2=dy$Ghr{0zYubcX+9sRf zDD}I@<0pDtu$EqVCnv8xE2z5g&x}8`h27`Ey?IP=?MUqBmm9N^#DzYC9YH>7N~G+HZvXH>&6!3An%3d^xPzK-X|hX>%Nw*O0m&I0+DOBB{t6WOe`w z`dTlH-Ux<7R`E`nuA}bFIFJ7TLdAY#>z*OjG&ug*p9 zs&V{E@okO1qxQq&n^~SV`#L*3sISfmSKtnvDMEE=-`xqQR7+Fp8}AQ|OX8M;sA`Fy z4r+Q`m8jHVZ``u2#KKTyU&{{Rwb(C#++UbQya zp#+n*IpX1gB=E{H>MP(YUl?nU_%Fd89M$zLMX&XXnDv2mZjs?y<}i>(S2@lh>$j=J zc4xwVH++b-pB75k;2@cQA{QU6Pd~Gr+SfyFS5gWqWPOcs`(s*7W^C;IF?|NVjhn9- zPM;h1V>D#5sO$2sPfjycWbuE5J|aD&pAfuj{jnSk=B9*jb_f^NHV>UO$s?8h_MwD-D|?w@|L2Yp6aqn_8w z!6p7j-O27q#&B!6@DGH1HKS?r>G#@&&aW9VCK96Op!E$WphFIP^c-=%W z#2W%aqa+3AaT)GUeDLBUUhWmDt6%!_Jo$4Yz0`gr+APw?V{dbO+gjaKx7)1Cf(&Xi zfr6RmY3bDFxDOk7ns&8*VTIHi#)=k&gGwUZ$@{#30Q}Frb~?73;j7&?%3VhODX&?4 zv2Ucuxnxc1tsqdqWRB~{%sAkI=e{=8wA-m&?P6PWKviQ^lraGCNhIvx^NjTC+NTW| zTb3#O%+!^<5Rr0pHi~K+wN=<_<3c9)CZ7+0>F-dDd=%g>fa6Yj~&L6 z!7c5stsD;VTgZYFa>iLpZz`Kqbpx;%;Pn+g8`){veT)yLH23=QXi$Y+>JY?+#M>!z|dE4opGm+)VREA<2z2&=uSBt?s&uE4WEUsBpUvuHluv2Huuq>fnrhu{D91* zpC2zc@4zOwJ6#)5_>*}Kr9JQ2;kcB6cG3Lp#z^wi4gl)Pp*bUVc&nu9)Jk>Q zuH4ULZD}78Y%DG&g8k+ibaJ_bM2iqa+{YP=@_t|goZt?G9z74jR&m`~>KeSB9Fi{) zOn%KJ)$G4!Nf&0&$jGPxAck(-b;++Q*1j0(*BY^HHulb26>+xe;&f3S6c^kH`{aXx z)8!a&B&)##cHEqT6NAXAtwx<>zOMC_ z_UOM0dFnUI%c0Wvcg32sc$>s3{wK2O{ zy`{8Xy}m~*li2b<7JMFkQ^gW$mzHg&=oay<+dL68=4|6JsZc>U&&_}bQhHWT_Lqb| z=vuh{0Kb<10Q_#gg-?i9R<@sEw=Hocqxl%RmDwg{`_O=xW|KG=$sC>7=}rFtg`eU- z`VzH|{0Ec&0E(`-2?{?j4jPWZ>}J=k-37=HJ@UJaOU~?&cdnisWS!4xQ0cJLoi^6sUff{A<(Q$3Hb9KQv9%Y1Z~?*31KzUlms+~Dms^6tUq2~Jp|Uv#(DBgy za>LfL^c`AgJVmJ@O3<))ytmj+K?G+!fN|<^*1Omq!q3AoTxwdBlRd<2$rN_`q8oc0 zuq6FI4%OjG)F!zjy@gUzx4Lrr3_6vakDsYq+*_30_BCYv!YK!h`%Xqc?gE?-Okwy& zNw04;7Hi9B;cLy$42Vklu>_tD0mm8nyJoiUyiIDldPdPlaT~h4ax9M1BNLumc6)WAgRCRBw7$C41*11~n# zra3&u5t2?bwYOkkFB^Ie)t&IV(p`VV`ghtcqqtrBt|x`^+Z5*nC^*O+SPw%}K(-TH zT8j&4k|7~>`&+WdB7?UB*Nk(?>&U0+u-)3~*IKovm1`U^3<&K*NfZpjjFl~p0q@Di zNzQAUD$tZuwao6~?0Yt`@h;;>y1RueTxso{l6j(AMdd9m0YSt?x^GWhLLYv41@rEE}K_L3pX1QzPs|cXF zeHz+0xQxdPds$ZlB&Y*Co=M2fb3+YHu3bF!H%(}pG(IP5w-RD+BHQgLj7+jDawr)m zmBHYg_U-H|LJuC==_5+ktYWaa)!BdJ<~Lc%=uRUfyhLD(0m%B-mWzw2rjpemlJMR( z$>E+Y!AWo77&$$`AJ)BxNz?Ql7e^K_X%^#OyNo5Rx>=Q!?ie7F21y4aIRG4zdvsQe zD8WUm{{WCS`xJg5{8)b#XmHx@lKyF=R&-?uV*!acR&4XfKArmv4+Qv*En~y_rM9P} zEODzAi4}`rHzc!z>Q4ubam92VG}MySKd^5s?Q9io-Z>5#{DG#*+>2?bR%fxrVifPP|m$>yjot*>2a8l{H0W9DhrF*-CBHwK6`XYn2g27ShV7=zefXAH=S_9_`TLxlJ!dy73ad)E3h` zak90`D-pkQAG?vq9=@K{>3%iPZ>5Da*NJRlxPmyNkL^3wIah>6Z1O_qlibzLF@;WB z@4v`7UdNT`HchB&%jLM*`&n8+g$_U%Z)}`?KaF}`nso$tlHSJd350VYv->fQJdeB? zm=U#GoRj<@ae-6mz5=uOccz=|T0^PckT%+;-z*$5o)6wDfx+V>aaq>BFw#6lhc-3Wli(9`k zR~!CMR9+kIk9qd<7i+qnnzrdR)vcs*N%lDIE{T}o zj^fUKa6WCvI3FQ!`@MJs_=kr4MR%%9 zX49cD3%$0NSCJlq0(%R2(w+zvvazn+3MF&+TYtlZEqV~EyPnWNp_L)68mI51~JrvLB>d< ztlrJ8+KUy1rIIEuAoBw<7s75wU>L7n6dY$ey5p)o?MtVi;=a}HWB7fd()=rFY2p|1 zuC_}gFiZQl$t9E)9YZ$4cV`<|aB*AS57Tu202qI0Ygg(PEk4j%GHxMWTy0OCfyX|i z4mdd#Y2$$Ty2>+K@v zPf1YOw2uVG$#zv;$isO1!#K`3`HnqRT8dDVqV;;{`gJmQZL_@iM}2Q+VW??(q$@6- zizZst)>iVout){|Y~+6d11-T7o38l#!#Yd8xpNiGrltXg295(PNED6vC9=ep1Rckx zUTej^B3WwQ7V#zZq%8V*^p3dGCg6X81C)?$- zla6zN+XUqG>qQK^ny1zyJ?*j42}&(WEbR5)i+ZN9bdyi2+(W08Wl3<`fyuz=dW`fv zy7aDo-8@0#D56V!GgsEHBA7^Lkw}T--f@!MG65LiaD6M-zhiHOiSak%6k1A66|2K( zG^}O?&}{&4JCT##zKrpg!1(k(w(IvEEY$Sv4cZHMEwoE$W0vAGCD=(!kGpQ;1dK2~ z?1|qs^f032uC+SjQl!=7&x`DS8Tiuj&r{Z-va!)EBaN0%w8&*3#8@148?p%W=~CPL zH_#*SY&SZ;jtrB{W+#hEvvGuWUEv*9ckvu^iv0)pfBPi-FxI{a-1x&#()A5@QIgL7 z?kP2mLRkv3JljcCWl~PiPaKkKA|DIs{tnV(i$KwIT{aPAr8_)!w&HwEZwpLG#_~y# zuv?Ly2(8v1l%*T)&l&Ll0PL@%cwfMGUN6(MNhY*{-aDvuSry%IS$49qX7{o3d8{{ZV&syww4J8Ap0C2s!!^tzzb!OKST-|XX{=ynzwt>=nGjiSBeamcpOF_JRs zPD1j#heOHhUen_56!^16*1S1?p!_h>d?|Am+Dr|pT|*j3zD{Imb_FZ3H#bwy9M_TG z-d)M?zr$81Se30?O3;V)XPGY0Op+3Djh{9bq5cj>O8S%bfJKu~@a#Xj{pF^u5EDov zLR!t0L1I7vvjLorqPf*QFX{Q4%BHrrf5`E@Q^ubWJ|JouUYB*_Uk~Zd+LfUD9*to% zjFNq(HCNv(U>%NB;5R#QU2NX~b!|uDPMxQCkK#XxHA${C%Zrn7X)v@Xt1K#GP&Z*) zcqDf!wUMS;=~{n`d^u?X>9OiE>voJ%+U!juC!8fLKxV)sXY#Lhy=~tebfwSS@b%Hh z*5VKFt$mtrS91?(B(zrFq4AH2JO|;g1L-;?^~Q@gh%PltBXs8K@zg5`BuovwWA|gO zdYbi5*(*!X{6*qTdPeZRmth^PtP2bg!@MS4&iuYkR%=~G!4ls_ znmNSRs~L^F*@!GY@BlKL;}z8Y&bwjp=ZGUsLfcbR(k)NgEhRFc&Be*a;s;M|D&tNH zF_PSwRH)@7+qe7^8}?hgPY+3<+D^VyJ}1#u>efO)SDxN^mP7;l-do0gi&lOxS|#?q z@pj%xL@OtT^uSz#LjM4yOV1qo)$aiOJeS8Gvp0eUopt@4;w?50w`o`7tTTsl0sjDH zf!aq;V~ke-ekV`jy-VUArF-G?G_dHJeU7`QnY^a9nJ1lG0(y zzL8z4jGx1kUpM~RP8&h+R?L6MX+}ZzucRSgloHs-Y9ahd{Do8aaQUAx{?n|h@x$R^ z=V_6Lrcao^;w#2p%Q1{{V#r z3hwjWmvVwK9SbD_>Iz z+e2QTsn1*ZXT#EX>%arbx(<@tEK!w0t^Sz&G2^~_bT!s~%ep3srT7oyCB~HZQoCqC zNcUFh5kch4aHKnDJ^FMVYqRl}itKzJABHtcuL^1R7dH__ac;0!D!snv-5;RpbHVCr zkAu8en%9E-eQn^+4A|)~HiTx>rf8W*me%f2aOdV3VUJPVR+6_Xb~2~&C3Cs`mg6h% z55mX0o1^~#lTy}a?R7H2@P1NG2ixts{{XyAZ2rrw=6nP2nH;cNkbm$hYUlp|Z0O1O zG(ZO*+BW(0{wn1E0GNM~+`ry_f2r*ff2md@mgc@B{{Vu6-&k1QX?Gf|vc+PWa)GsC z<{tyu_+Q8Rq0_XDUiD{WX#(Bb6i^go^X515uE*iO!ks7LMb*>kdYrem*7BI$(myF5 z1c9-c2t5xx*Mj^N@UE}pZxg1ErA*d|a*Y+;!kl6fGV7mk2srJ>di@Icci}rP1?hHr zR-F>XZ*GBdqXo`6WA1ATm0dY=r&8Q@t5r#ujsc&_cz`TYi4e{H%c(W+#DaMuB+kC?FHcbZwq*TP4TX=6_<(hNm|-e zmN^?_(FcRf+^R>+jC%Jc(zWxs7VL!h<>D`h4dEDcUl{3{3d5@(G)D!X`%1+d#>*>h z`Bhn%DCF`OSFYS>e;DCq+u<(&-X7jtO&LF^uOV*{A0K=__?hEd#%H#>xwg`@CVrn| zhUzl=eVzVqs!&Zn{R%UuYL6P~dW$4K+E8y&VzmDNrYDX# z7|7?YMSUInHpgbawNHqp8h)V-%T1{fEY~XWFy28bf;GqD#Ef^%ZOeCVXZM`G*>sBg zkBxt5=`?GG(sa#w(m>Z1%(i-hKGlL}X9IcZw;@UI*WSKq&~*3YSj&Wjn;=UvGR zaiO1L9PVAe-8+s!_phgaXrBwu;%Tkog4o^91(0zHu2}r;Fb`w;o_bRJG4Kwfqxfzp z^erCB+Tv7+Zm+Jb8*@#xhEzL=We2Ma0DlVbKC=uhEp>0B*vlOM0NN(}&IiFi4*W%? z+pOl-OB28EqG)cUhEya0!2paYEuOhQL7z&~f8i{;5yw8Jse2mlORH$ekqJ^6rBi@+ zD&yCuJaGL>;va`&zMjf!dy9h`$7mYZItz&bQ6U2dU~Sk?4hF{WfPCrxC2O84@m8s& z`S%t!7m~bd4YF2CTai3*mBurislfx~9E^ zw~KHgB*9>K+qftr3Pvyr;E>-pacfD?w5Vo^-uqp+wX&LDw4@PBvvTbWA1pEf+^3eo z$iO(r=H*gL+AH6>G^Zq`W=(CbYC2Z4KiVVH1h&zz^6ubQGM%7+rN_&`=YRk_g7 zf>^jJS$LDTLOKJ_ZWmI4{4Z}Fp`lNHx3Ii&M|mQoh^}yUhhD5UpOt~g2b!s2>D6jW zeLoI|Y~dL$Wb`^sd*W1jZofU<)D!8Jg&E;RQ8q+lX=ev;$Sr}+az|X6vGFTgxm&AQ z??1A@K;vvS2pQA@SDZ0X$j2%V9A~dRec(+;Q1FJQt!sLV3zlZMiq)<62}^AZv6gd|V#A!{YT%wQI#Z1*5o&OIX#W7EOgl~&AVAkG&c-n zwY!a8))WOnF(Q@#1N=pfKpk_-;nA*i-41K3n+fHPbrH*Oxw(Sf!v-tY=W~Dw2LQKH zE0c#=n_RoI((iS5(=_Wg3Z86x*%?%}NKhPLoDQWy$2g_$N^V!v`s`sixbAwL#foXR zP`;hx3sI*8Ws^*iCRlD6ipeJD*Z|JZ_#gv}f!446hRgnd9vlAv!iv-UE6VKdB>O(0 zrCfcA?#Ft#xVW-rdDtF{D-h_oKQ=RrjxbGc{{X@VXa4{~{)_(rf1CdRh^#%5j+58= z^+qXoN6h+dpwzVpqMdDEm5(vUHW2gBF*qNeYWiy+U@xVH?H1zR)dX9Vu_2m5vM9$H8yNNDps%a6 zeFMXmHsev$^haHO{q<}y)Zf4UA1aKw&z>Ccz=nPj=wrnWQfS34u!WgKK} z9UMWGJOPrp$von|qK;h$N4mJV@ehNqV}wN=*%y<{66BN058rGGPrPYrl?LDl4mTH5)v*=}HY!g+<2rvCs~0t1bTNo)a}8sNdsP?t4ooXz=vz!x%l zUdPGT+CGTdYR5Fz+FWfiFnHahbj}V|Gn^^hd*lLoj0&4i@a~g+rLtHvOl zyO*=)>t>kIi2Dr7ciapW|##sE=V?ZNi@XwXYw;O+uWM zCC#bTL*Uuy9gT#a=ShVVh0U><@^%PkRl-*REdX>&u!_${#2K=V{zN z@`1U=Ndm5GpAs$n9sQmE00}Mq#g?sc0&>ep>L+G z2Pft=q@?$hb<`be^e$ceFKBL_;_p_n*EKj>dwrHOD2WwNvuF5-Q`2reah#4``{7@M zF7@kKZahsQ8{rzm4UMDjj4%6Zk-7LdAd~zocqeD=Xg|6wq|IMcvtSwQ(QYB!=A?x0hIfc)ZmJp;7Xq zKQIhQ&Ye6|`K1|ITcy9P1g7<}Jyrf1=;5@hD0N>GYJOmi%-U{-lcpG!NhN2x9$pte z>p9!B_sPrgU&8$Z!+QKE{wdTgF5|b2BZZ}mq~j%i(JCqV6|n1}lS;j41@15PTy0;&+GqQQ~VUlIv5_tY^L0zjObK+wKQiFW8oCFxo>mJeG~75%yA~VPB3;4<`c^*C!nrdz$j^5Z-E@BG#>KKeVH|l&U49rP2fixdDr90Q#TTj8{wHJz~YN zt!uXOYSO`O*6AFv??jbH$s^?8fC7V%e?Ucs*X(>zq}*!S<%>zCMSZm{01P5W-I$6D zeca>_KZmzNadM?5+Pm-P<^3a{+H+YhhF6NbC#`8#^Zx*7UR+508+^E8`A|;fQp`9f z8NlH3c>|P(Pln#|3xR5Gp+_5IWc&RIR@#2O@@vt&M`Hrp$7`b57P-_U-mN1LsDq$5 zBOMzoaKA4d>&ou5{a@|KAhw=omxLDil(E1k?*9OuE2j%LI)3xl?ljYL&~=X%+3Hc; z%XtOtSI!k<7VYIl3hl-@9k2i#0gh_2>e`O2Ah*)3C)K{q8_O#+#$CBpQgWp2P(}d< zpO@)ehlKoLFNSqjmR6D`cJil9%%pRM&wr@)ub}=H_>)W21>=bkVFe*;h^&G`DzW)f zab4LAdxOa*I5mbR{gyZ7x-h9JN#0Dv_-Ci;l4~~wJUt|4Wk}L!VrK!lV7YcV1g3HU z2X7sD^lujUcKB%*?+uinXj{1ElI5-c0CqA_cLA_vLrCX1Mn7ywm(KrpYdYX&gE{+Y#pZg9X4G5HJBJw{zE;Q>@_| zMP0uC0PyEEoK73WAMle&;w$Tm`(N#AyL(e4rri+&qq{B`H(!~E^~Y>hWSaEaqT4(+ z)5m(Uq&Df~9jK=RZc7g=4$Fc%dJ63xdwps3Gc1!_PbL|RDw0J3ZiSfjY!1MHGlFt! zi`0HNYIa(DUKpAScq~O2!|E^&c7hGS;vMX7OnHvBo-0W}w2RP3= zM*te+b&D}$<1JH7yJ;9%%F&}pShxs!0nihU{0vh(O=8+y5Xk}n7D4I0v&_y8)=&x# zoUU>*PB{Q@F`jYnM-K}{O5DwEm5rP^X45?*Q1ImUa!$7~J&ut)ainoie`lgO7ZyMe>oUOdA63oo92Ll)Yjt>~*5-ThI5{DNL8u{LO?PPX694^o> z4hLSnu}`e|~uT|2{ZNvPW-c3N7)HHEWDm4S5(#z-IrWdwjv zJ6qDe5B-32t!Kp_8mEUwytC7FXsjMvd387*XhSP07(hlyA281Vs69Zhw>}|geksy5 zHMQ|~h@tToq|a|WcNX@GYOzKwHBb;L^}!5sf4%Kql?|+RCHu7R{ao~K2BO*i%9?`4 z&gmYJs%>dT*CTbt2^ly)iLVCMyg>!6j1k@GRxw;iG;0ZN8tzie9?}=LbWj41mLt0- zsO}AQ_8$bMgZ6vXwYTxliG{VkkAHI(l@;dOMF_Hxg>lM~ypH)b%8e%8J!(pSLxy!u|Vre56L*`}X)V-4(W zHr1VEia?;`>^^GaAM)2dtLLBDAHX+0I-A9l+jy5)@Z(y|9$QG9L}uP!8~8Z`XHRop z;rmm3G4Nl*4};SW4ft12v%b5yjw5mte9=S>S%a}H*JAblI^h2RU{%{|8nQ;I4sK=iM%`Go2@#_ zNY!+mZhL(<@??Jc7J0K(3**Q|Uw;(bcr`yNX;;e$=Y zj_oo&;2DNj3DjqjD)yrwlf~W%fu)GZHA@eZc-(f|eB65we=78;m_NtZVb2TUd0QQZ zZjgUUsV9GRlLr*N<$ceeG;a^Vd8~XmyMoRbjn9SeXS@v(?we?r0r=#MDCe-nVSE?y zU&LJ(Sh;TucuwEM_tzR^OLCLmrLtQp+}x-?F&W7X)6%^M!y93`@HTOn{26@Tti$@# zzh&kUc%#JA1M)VFZo>o+zv)+<$GEMze{`*;#9q}T+P?n)nazA0*S}~fei>+1o*eMs zh1Wxl-))|us9X~)skq>kSpXiKu6U|iO^=BozWCv9;~8|xyh$dRroG!~Ac#RNl^lWd zZ7Z7k7xr&q7r?&<<-Nf3n{9y3}^1B}pX_nfoYX1Pi3tmc)0qx|+ z5&TVa#x`)*`mwyFbyGyY;mX?Qfh^a+p9|&NAp0(eCct7c#iy5__z)k3bbkzgW_0+G z@ZZC_mHz<5ZxUT;v)RUDu!=ybY(U!4E^~mS6&M4hPvSdw{{Vysz_NLSy7+BDUN)cH zTsKc%abAV{DZIKzgKj)Ab0KMT<+i+qSagEs1e9af4rL#OX<9Lq;-tHoQIw-jQj6C^ z;IGb~={kIA=z5AepL^sZh=%Cyv%W2w}XXu-wqd}aGY zd;#$1!&@H?%cWiFvK>PFTFSSVOCqYAa=#$K=dF6AKe3LJZOx~RyjO4O{+Aj50NDja z`(Xh!uYhC6-`3+Fs@KtZ8637fsYa9?Nh@{)TJzmD3(Fhp*0)(#%$ihSigHw$!2|FFes%A^w4{D#$B%_9I{B~p z2Or~HXTd!gb&nKGuQ@93FvieuOI;5C09y1ga7oQB=bMg`e$BqC^*evs9&5`B-vruh zZPDnKKf}9hfr&qkRevgz;Em(m{4e;l?+20P_-fieyNvC%m4Bvx8rS`zEmr#D!NDdu zTPsxzR1)7S9R8I*fHYt1PlTTo%gPz$@YF59zyR7vN8`z@sxRHPXG{2r9((XR!#^0V z{ut=jUKH_mlc(r%dEaNVyn$Kc#LgQnJqAHv*ZV(0$=N@c8%+cAQC7nq(0+wK1VAH7~Lv;D_4 z-p8=2KX1!rzhg0Pzi5j%)O~}&_sReRYr66Iiuqgi^1SiJq2SW~IMX!EF7Yi8K$={I z(nR}8Wt%S8Ipei{l*Wu9jdAx>Wct^^fACZ&@!`t?f9J>_{FzGHRQXbOd#}vOGv;ks zZ2tfwRSNOcs`}SOl2&B)v7_-@NIwCzT}#3im(x$7>lSvG_KPcI#d!?z39;xMeq;C! z)%TBwJ~#LiS<~(3ZyD$@HUN=dQiX8CuU4N=r4$9in2_{{> zZRFENWdqbIu7Ub)@lxjVHo?4{!(X?~XC($~sn$ zz))fMjd-{iTWPv_2+(s~-p)L&oCG90OW zqj6lg$Q^0kC)G4ryb*V!Sn4s`>ldatEo~D1P0rZ`cLoCp!#j6l?*e^K3;R=DeV)g{ zEP4Iq(FflVel^B;6XO%!>z0xDl4aF2(;t-UaB_;kVOXfa1a-*gk9zoQMha7mYexDl zy$$i#c!{%~@khiRPALVhixRGx8zM(<&d6$$VQjq4N<<`ZnoDF7mw^sxmlYn!Nz;zo_Vf& zNV>I}Xf{q_5=rH|OuK>-KKG^puT~I?igAg`thij}u-^%IsN39H2#FEySkY%Wz!@bmgOoWSjB*L%iuKP6>AF^jtzF*h_xezQh2E3eY6tz-nFYg?v#xk>d9i%=o@Xel& z9gm0J%Uok}8sUD%403E^xD`cifT;{{7-#QeH7k5n@a^TSa@;|0zfWT{P+G!ciP?t- zXi@Al+dVUk=UDh=#NV6CZGJneYiANNqK0uRcFK}BWSr!5&m)>OFA4Z#N4&P*tG1aX z`oxAGAC=0NuC`h z)+s#nwX>d7l2F|3c04lVu__o4Kw$>Y)dXPDOLY(yE5OZCv*Ms!kC7OK#^6Jvs zSc8-?u3Z@AN|nz*SC5;V`_}ZUQ=@uugIDkUT;7F9QmJU3T{nz8OQyD&q+9CN`aYkh z$n7LDY;Tye^M_&182Sv7aop7K>Lurkw%J?R$vaAtTfU+~kDD*GGDzq^7~9Zhx>>vr zs#vs_mR2xoml~kmYaXV^?wBN;Gh=B09tiaMj(#79+TF#g-0B;2y9PW@RiPKi*=`?aK9_xp`-TeJsNwvn@vtR{7Gu^yVy#RwVdsf zy+od2^D;VM^V^aQPyYZ51H~WoA%EpIZ~iX4^G0O6y*73*Osg{p-f6(MDmcopV}b)< zjWxx8-`z4Ki~c`MoKQOYASFpR~AKR?SZ z&RvNL{{T1~9KR36rd?{wCZ=s)R`EnhZ!A_MCU;4l@b6RQ>UMk7CIgMuZuK&Lq@c*@@0lY zZ7a;uO2Ot>{KYM|1SwER3SV@CBnqK%@v~2xQ@OFypuF)`hi=|w!$Yv$+kmAIMdp05 zrsO$dHyzR7f^%MAz9>4^jIFPr@fNG#jZ!9&c3;FUR7jA^$I`?ejj3Miw_YH>RF_e-(QL0;7^9FF{?Q{zvN>X404BiH=mj>QoBoOMS zFe2>BP4gnS#@NOYNdOX9XEi>p<4Cj}H~T^h`>lHB1`r5hvs-%@Rg~i}uwx+eH?wp& zLbwBpaio)vJZ`#M?$oma?@GDxmYj{O_>5^WX}1d)v%I=lOWU22u`K9hafVDCt%Axh zF^aRG+vqp8S6WquiLKp4*7%)nrTa2uD$5*2v6;6lXh!dpd8F_VZpgFo7xwm_r>3Vo zT4Zp)+bvYcSf_>Pr@#rQT{mFwwFS z@+8>atl%pC@kb87-5B(zPEpd<{XXx!ex_xWuCX+lwYG+A^c~OST}cU@WsX(`QvomikU?Q{Z{hg&TX_*hnVvvnNjIYIk;!1^f(SwhAPjG;-|;K(-n*&Y$qTIQZt%c~ z0BQbLjb;zELMV&i+V| zcg$Un$sR%d-qKE2Xl_kbn(lpK%0_7H;TTmi9hcz1tx{{WyU-%`e{W2WnJ>NmQ5l$xHKWU#iO zsoXWpliW&($l@>>KbOppAZ{59y}=dI_@R6$r|K8_^e<&?u3CY1-d^9dD&VvcM+~F| z0TZ8)s5?DLY;6x&wzHMfRJ)Qzju|}LJE6YfIo3%EnPu7<0Piqhh)d;3+DYV(#1nXf zREJmDYpP#qwoplH7ndYZSW6tGcB-oe^A6Mjamxd^0a#R~?`=|#^}4A53%OQv9B2Sw$M0Z;$m)9YKk(4nb;pVR z$6(hwww*ld%50X z+aV?Q1w5{DabG^`mVO_5*sb)>w>vzfDVTzrGIs4e5>HM?Zl^V^8WgKu_kMj1X+|=4 zKBw_t_Q%BE48*qDpZ33rBb7_2nj(=0b_I85(1Ev;^8SB6$?z}7nqB4dj|^%PK@7z0 zw}{H_#Nz>1oQ7WA0rVq3Kzu#$lK5A_@Gq6CSzq2-%X(#tPUb6g8=DeGiEzX&33Xmi z9P`jFt$l0ZjSa4?=eD}Jlt*c26vj!IoJzsf+be}`qnvE{fF*0rp^UAD=A}g^8}>b_ zv!GoS~aELhNHOCb;}FudE`yUm z30uo%Hs!75z{wbXLj%CisNjQM)UtSE#m)Z!6T;J$-FEy2~l+wj15o|cx7#0kqx3z}C+Rooh zw@F8t1PhZQal7Xrwl`$*dS{bf?c*;G_;*&bztT0i^&zaftVv;@LlGi1RVYe;Kn;+@ zyNCHl7$UGN{{Uwn3u#wYT8*3Xc-GfGucrw(sMinTsQ+_gAp z^uM#;U+Vhpoq)EtnVQ1g8Ch7el5rXX^O2mKDL5S}O)kl!)}oRd%UfG}*79YxiUA%6 z%efE$a1S7!IPdANf1`MB$18Dgx2>poDg;`5_fr;$*aGLw&H>0=DFJvq43;ryz7EoS zKCw=RQWmaM$2Op3l~(`|RVdIagvOaOy)Ja>n{!IVmruf zt}Y?CmIStD4$^$v6dd3I^8M^*rFq}PEkjiMIDl$QmJ$V1YaPQ%tF)Xh^Xbn6rhb|C zEloZx0d#E&m>PGT{LRWtm~bw+?;z8kV)@dwzuJUtaST16H<ft2P69|H@BUlb6PKk zbsMX#TUNcfv}x}f&6usSTm)=}&T*AE+;9$0*NU57_(!N}_Hs|B`OkRaX7e7@#Ejq) zFu;7O23rF?RG#?k?DZy*p6623-Ux~$aoU@fbtQI&+(G%i>A+RvamndhTAcIVSio#` zOJumX@*^{*Wg*xxY{nC6fOsD^I%N9IOhl>A-HEm$qNgU5H|t~NtxLk%mY=Il9<2=G z_B7iVDoAV)xX-ZuRp>gm!%<`4$QA6PI*TGZ0L5f7GafK|1#FR#_kHWpygzZQ&EspE zYugxg38RKdCDb9eRgAi+DpcSzWC4}P=hqpjb*q)L)FPiywU$J>eUZlr*aDOo8v$+@ zgYx8_o$1!bP0le(%;2paPFW|TJss@}m@b}_AneZQ0xw-h28|iT9_YE$U7J&i@6Ju_V&E1in zM&JfB(z%}#f5K}YhHUI5@w}=u`D2$+vbWK!;af{|c!V+u0&XSz*jYwKcD_m4bJZ%2 zJvx6~PKZ+GvQMeo{@0AUmxKIQoR!h^t5qI_?r#v0^#avD@4E88*bd)zi{nN;P1GNT z8sfG7GJHGM?Y&U-r zZax9{sjxxvq8$GK*JKr^{{RZ#;#@mI{9bRz!*+-I*P8y)UJdcynecDKH#*0}L9c1( zk$ILs*l=6&m}s?-%RWKMC}G0_x`S%U#iC5FJ8Y=0oPfj-wJrjCOzlEHZt2*Qxm9 zz`ilF_ygg+Qs>7aT^`~+IK`w#qgzBE53|c2a;SC@ zxYBkoiI^1J5xHD($Tj+c+mG!4?bvusfPMbg{{ZT*%+J|hOS{*;HF$Q$(@~d6xsbZY zdkW>@K2sgaGCAVD)tlg-i>^K?O{SlUy5x6jqJ609NfL>CvpHWU?|s~j+0R;}+;)#{ z#L6mL%WJJ~+~@TF0JYpn@b2m<;agbzCw*-@8-ze4b-59%0&|jC2>k0`_B_z<{5$=* zVX8?Sx67g1Gqh)qn&$Db$8nr}MMv<@!#*jw@WfHvd{EXj9X88AwbkK{(g;;n1`>%m z+n$l$LQdg@K)eifX5*Te zrlh5>^q2V&D5m9i{ap1g59^m2C+v5mJ(n7d=8+DaXU;!=aN-Eh;L-#5RxgS?Hy)ef zZytDePTH#%g*1B$RA)%b1eQ<$E<)#O9;dZ==YxI;>wYTIeii8{@gKyHTIv?Bt(!=t z0xdl)!ue)3^%Bb#4{R~6>%)Ju{;hTKJH?uP*Tp{@d6t&85`DVQQ)_vpSv>y$-MIwh zF5WZKarLcKr0r$>SS1*)ZGWqr*N?m@<9Rd>0)J;}Qw=*r(Hae1W3xvRl}8&(Z6IM7 zg2-{#o`$`X!7e;K;%|o6+nHe3yfm6@qxeLy#*;)(xl<^tABaD*eU6*pUl8Bj_~XX< zvsvla7b_m4t+dfQBy7MkmE;mR6~}5Hvrdg=@MaGc+UnZ$x`OGJ_p(B70gX_qnS8Yi z@(_iJobitQ)@{^MyH_1J!N%O%`5!ob)=RhJw!Byo^F#jtqwY<8^X5ibKX|+-9eu0w z)Ao|lEcB0x*VfiIHy2iM+t|Rf&nEVf6V60pa574$B=;vZ^t6Am?}H?TJiTwl2nvo@ zQXT%W)bmMIpDMn^+AfSaowq3d*9@)y01EVy3jY9ZNA5oI8u}?(E;7etA}hfvYvT4`O?xJLS zXIR%PLlGc5vSfaRamhL0R@cTaf?x2h{CVMhGsAb@Al7vo_~ey;(}Z_nko1O7qhn({ z;F{j>Zi}G!uG>^iOX9c0&j{-}OieZRo2%>6myJ|qiXW7|Jun-doYcmn=4({%(V5o6 zMxun1x?kp5`0e4Zin`G78u&|6wz`{6&@G_UrJ8GUMcSzjNtFEL`y6NUuAkvA{3n)Q z3;r$Jc=~Haxwz2`I$UtcZe)@ut>y#~3lK<9xi}T#{{RmBK{ty2A%9_NUle?Oq-pn( zm~`vewb_`4C--Y8+yLA=gMe^49FC92&8S$-s_7mS_<`{Y#`D?5BH!Fcc_UfCfDrrN zELZ`Y@Bta-tJ}wwySsXcVP_en&!c~381e8M!r22jx&HvtnMGp$+7XxdGV6{TQMdm9 z*{g^6AMj7a{w(-gqH5Z2#NBT}w73%#)2%@;!-<_v-<)+IvG-$C{{U!DgT5*7f5Vxt z^>2w9uZgV%%t`&9c@YR%i2<@?JdFBOIZxPIm!Z0ZKCymnZ{~e(3_s}c z;_)rSma5QA93nFk{h~an01WfaYiUkRML4yp?cB;Risoub===6Or}iUU{7`&<&y~mX zO8pG*Z-OqoWY>RZyS26ZKHUAO%I@8qV}t2mng0N??7k`_ArE+}mLju@eyOGX0_N?Cq{>FL)Iu)7mHJ+cX_=Ss4 zdw$YP1hTu9Jpcp@I$j zt)y%&85z{ZTonLv2?w8QUe-xnM0|;@S)=f;O4K|#@q@y0Y7$#X2c4%!BMQKe9$CnqBiGp@j#QataLE}ZK6PP}(DZJ<-WBs z^^`fKEgK@OIn|6~*!y?IT6@djUk^yJ4eK@4#Fp)y+a5*79kc6JejRC(+IX{4yjYbI z3k%4Z_K~_nwT3_O?ENdvEqqh1-*{HjQj10KRlbN>ng$*#xsD*rLpA`M0!aXzbt5&$ zpN*Hk6>F=VC&OME7wZ!zoo%S<54CoYj4|540&p{qYk6ZS#WeJ}F~QW7lUmsLoA#>t zv|kBA!*q)^$p5q+?4flg5=r?oNA8PwzFNVGV@g=0TTD8TE zz17T;5&e^OCCkeg=W-Ot2Pznr2as|;YzOOJ6!6}Ksp*#X?9fzeXa(eK1 z&N2B^t9a32MA-R@V;|4kiuz;3-?Nv6t~6KFY+L&d;?+!&X*UzgJhO6CsUZmr4hRaT zjOPI4c(upC-yB?dtrv$ca8+Iy_z|O8oIwXFRq2Eic0r0G`&;o zHnCe>T&qM|0v3cY3VM)#O7%YiH->dMZS3uS*#-UA?j?{zy&~r&*PeuR9eLup-voZo zJ|)#6cGWDS(>BP-y82@t2~{KI!Q(6jbHf}F*1b#N@9gj5jR#bY`r^*e;wB2Ke4B$G zyUMa;j27#If;p}XUMmf%RZ@Pa&rWR_b5!_IGipL_59``?r3K}>JkZA=k9;UVd1NT~ z+qtvzbHN?4T~)V&;PE3}N2Y3*77<#)A`r)6BS9cmMRbXpM*bg|mK>Gr1!`!11Mv=< zc#r*qZ+{~*M=X%tCA(Zk0&H`Vz_#TigV UcKRcD&9zq&ZLV4!^WzTS~D|(tTOER z0h=QO3&wb_m#vLd(tg(O<@l9tcRV}7dWD~etgc{}N4eByiOMk%1@mK27AKL50l)*~ zT<58-@(&K|wM&4OZE+36Z@L*>WsUHlD=1Y!0-w3K{{X;-4RzLI!@dvFp|G}+y`|S;C%wAn!>Ct z6z%8#01R(B@Nu&^?-_V@IWAziw!WJ0&d-}~muTWUyJQR=IOL4BI2GoDNY-H0E}_(( z?qz72HEEbS`5X3yR3jW<9CCdt>76q2-%^4Bb*kFkPEmZ}Zqh`Di~_iP^Rx^eNd)I7 z6ze|@>AHv6P0iz8S%xyfZZ4#86M{r-xVK@*2b}k=iW#03lw%hC?)qu@51O5qf06Qq z*Ms#fZ4=FnCAZ%3L3JT9OMo-LCoBf;q<8Dmt$)I~)PLu{{y+Z!*w@oqUx)52^@D$L zdnC}?mbbBmX1P~bNf`_o&gK}$%Z>--(*3G0`SsL)@8qBScD)(qm>5}3Y5IS|n*QhQ z9}IXcX7Epouk{!rgGjqs{F`Xv6Hm32mMyrPs*jg|d)IN|E9f+z5+d!XHf9J#1P1d z;yb-+H-a-HvQGqeA=S^wu7&onWjnzGC*(UuX#W7hIj!qjwWg&6me5}Pn&wF&x3&e~ zMh&++uk+aokmVn$#HX@JjAMoINK zJev9YLD0M*q4>)B?oDECI{xUBXLMs8d{L5QlQE)(BW_2S+2yg+8dYMUQ6}&B8p{l$ zC9a2%T*jfGM3V1>Zl$t{8^B|C0m zQj3ezW0Th*52o*9;QxodgCHiGJdU);ejiIE_00OU^?`XV*Y}!+=6kmx$Ce3SlX2cvRF>f9<;7*~ z6kVG!wu)KW#@QsA=&L$8c~KOPnke_2vhu}_2U;E?hRaPi zn$DX&-h-iOvBz%|+C1)x06CP-9x}}%w>S&*R3~&;J@(hg>4wO)G?3D7L)WsdSW!@27Ccq8}Bdd97 zf&k{DUzTb=*Y%-Nk1IJ355nK@iO+1BZM~4xwCk8`W0LLXmPsUyOOhHSV&ohW7$C^t za8E_x--b4SYS6SxD;tUQWt8fgiyM__rC9d?l!1t4-H*OO;Xun471!Q;5p5?#u+`?7 zX1dhjiKMr;62z-B0PAd_NZ1s*yApA<5JAG4tbP>S+uQ2q32yYAYGCgq@Q0RZp_gX! z^0*Qm`^Gr~cO^p8#8z>HmF@V7bmbK8YIv8#7rzcySzS4NJEu==1X9`C-Z49E&;f&o z42D8>g*f6qN#{I&hp#+Wd*S;(6kI`lHmRsL*lnlZ5P~?_omNQp5W_Mz%eQL$#GHUL z%XnA9x)t`lf2uwDPiX`QsxR&l65t0OY?5HM-(SV} z{{Ugq_eFS%5Rw?eq)LICl^89xjyMCqjT&)_vbFTPew!Y2zm)S`Q{pf7T)KycVZ6M* zhT7^lX1=+(j$tjVY)h&{&xg(z-v}T2{g? zag33HTTLc`r%ijT-s^gVH#(GTjSa4dEyQ7V;e5q)T!K&%yz(=QkzII&3-WTi>8ifI zN415Ix^zGAomnK78cSR1mwJYyX2#|nKHf`bnn{9~(|4S>Q_fhSUViBtcJqEd_&4Jf zx3|@95<7iGu6&oZxV%SL;y}fJd%Jesihf%rT!OFLo%*8&rFsTTNnE=?~& z@RpqvhB;x>uBY;D7)U~~bw4pv&QOeyazMh@PvXysT1Jz3WqWb{m2WM-*k)TR6-(%r zPDufjjBRAVVt8c7B;!X?ue5xs-{t;i6tvjo{6+B!9|!50YueexJc+2S(rNL|zF~|Y zQzgnWvWbcLffyqr<;N!$PmPxoYF9VO9=UU4Zp|Y?mq~1y5fIx-sU^8Mazky-d2S@o zd>5zqy58*QS`@l$(cdk(xxToPbP_5T+R&KU+CleQVt_d8Ye!W0VSA`}d&G@%uEQ$Y z$19j^V==)sc-T{^<%@gkSaY2mn4 zYg9idUoQa2KnI*R7#}S|ZQ$5^J}=?%9=Vw)3t8_ z-1t}F{JLW6Gh4@ZAeKjp4e$ViHLJs0s5)UVD_n9`os(4?-(CPZ3OC6=gcF7~M%3)wf%d={X0o7OdjXz3{ z#goZrWcMoeRu>N%tcxT|wZMKsCnFp1z+~mJc@@!Id_SJ=#1Tz@XdNa;h2^%5$VLYM z>?D9WVbmU;wadz*cT-f(>QrINcFH!oZ`pJi^vxPem|*)+ksr=t$_i&_1dur6l7Aj+ z%Hr^y{P#lE+WPY2&%{#22?dpcoDq@6)nU`r{vQ7Tq4)<^@gofzO=yh5cFcD-2@(u0 z3ow(f0Q&XETI2NJ3s2!+6Uk|(-bZP?d9|~wy$$>rYPMV(zHzDaRUP3ifN? zkDeI3)S{2=lUZBoQ?{jKuLBP@XlIqJAz(9v^5$T~_Y3u|KlWSDJ|XCS1Guo$^p6ht zR?t{<1Y7{e3Ie zp@@FZ+;>{^J19b$f<*o%{iVDGtb7^ObSO1)H7kuOTU57eNqLcwfV;Ns9QV#E&pdJQ zo5OnFf_y~Qy6YWFU$pxqwygv0wMZF0%E0Zx`Bm9~C+3iPR;2zi@d&!b_rdF|rVIIR zVLo~24$gXG{Nkdz*BbiX^4@=h8u5FGnk9RE8qPq4fdiGtYSrSUQ%}*IaD_N)sGk`= zHh4c={hVz4Go)(&0B+V^-WhHe?DEDMKvkt|vVqIFJ!_Qxjs7P1U*YbL;y88vUL7`D zn~0hvn|I6reaC=$WPWtrL&gz9;B7u<_-(4&v9yGJy63}?n|FiCP0S?ehI|mmsKr?L zIpS{(c(cV#rE1<1(@@O`@)`7)V)=$nK3wErG0F85)-GPrUON5-@N6RZC>&BaCo;v*lm3$L$^P{{Y0F3j9Ig`wtMubE{oN149bi zPql}bkN3)LA1Ei2UU&OXU1|Oa(lrP0)`JW;GkNynD@%2YDq^!FoB@zNbB}ZCYct>{ zhwW}G{w7=ag2uyIzmM#4MI@Gp;t?gOeTkL%K->o&m8`K-v%QSfC@ChcdoPT?X>W!) z*T5eScy1pPDb?(4^u(Im>~{le#3SCjjCkyO^V+=s0QTbe_u#*Z-Vf7kyfNY!H6d?s zBoT{Ch+P={?ad+u#x~#%diz(r_>1Acg8a=;{{X@}q^ya3UdoS~(#CxWETfi7JB7@mcYI*S zoNZ<2r#14g?9HM4I#_<);@x7>+VQ0Fe#NL-pUE%|qgMg>JqZ_!7#) z#QIWww@F_N>po@DquW6~(cd_Q5OS%#B&KYnS* z3!bd;srQT8x^E5qc<>B=6g+8v{{RUTO>K8;U?7q^d7pTVW!!M6P62WG*N9sF%K9yx z)sch6I)<3@UtGefAXdRoH|7CfsoFF3t6m!T0Un)kYjbPkol&Qn)_vYw600CbjYN45 zoOARWMQ8QJ4|eNM>$o~Fn{NA_`SCmB$AFvRuBjB>C4)NVk8(8q&`hFm(Yg#0xcl65 zkIK4_hx)#isr)a{G>ezGCgVwh%0*jAPFbx(6$B;^$aj4!^DD<1J^inT{5qF6(_KY! zBx^--SLi;PN7&%V;6}g7 zz76=5@M~M~hP`{QFNieV4#q^WwT-pgrn7swB7^31+@CU?>O*(Q!J^l~pA(M2J}+p^ zA3(#!PmkwA#42@U?R%H(y`?Ct+Q+#5*o+wbJmcm5+EMxJ74&zUJ!|vpR`^%r-w_*c z?O%!>9=%&<62fl0MACV0{pHSZee0(G0E7?XN7>S5_}`@}pOoXoGaNUiIN}#HrOUXb zML5Ph%R|^cE-+8pXW>QO!R&qm_^(U8w!5_PuZAPJw|L}+cy*$zjDrZuagbCD4^Gv) zt9%*wh2o3E)OAlE=vTLP@tC8yy4K!TRBVEwvA{cz8U7>dQyPvhn%b1(86_TV537G< z)(QJR_){mGT%Y`!nz6rZrrYplc>y_F`(ytAj~e;&LGW+I{{RYU@#%U`#0@6bO@nZX z3vFX;X_hQ-F~&(J@vC~b!=H;jI@09Rb)SeDMg5s!A+@>HON_4}hzZ!%uMnLtXKig` zFJo%H)0uB`?vMFmx$BDj{{H}ir7qtN*>R8b5&rGY;*`lMR)o*uPN4r|&ZEqSeEb6!bM4*sYdL!l6or*n0~4EMkz<{n4kT?59S@UgV!v9{3Q)#bU`B)o><#_hXyvIabL=r?Da z=bHKpNzi^U_+}YW+HV8tcJZrEFtlx6)jD#-MT~F&$>!G&yAZL;XJ!>ZKTJfKXFD>;M zG*1cY_ZE&Lj_~-4RTDsS&KZ@8@(BbU+2XifVTFX(B+`Dr)}`Gw$yNFvJnMcv9uC(o zT5Bn8G*bj7cCg$pgqXV6BXBrP+jEjIN}j|VnEY>|_>$gBhP{NW4*=bN#Y2CnTj;u^EGE-oX`+k>yuzX<@)R7L^OS6j+z>hPXGR>}(JS8Ct!{WY z{{VixrEMO?{SrMZOSr$*bgdfl_S;Le-uFT{{>IWX9gxP4=5H+j04{QV>ahOnb{Z7+ z+P8yl^)Iv?IGH@QXl~&buqYX2d3SkXh*l+JTnud-E(R5kh}PZAo+`V7G=JTsHn3bn zYF$*iL=_ZdlFHk1qc|iI1$p)RNgQw7Y+jN#vcCQ5mD*7lK=G4BLjy*#j%vhVU1|^zes=&g&^JwE{yv z$F7l{;Mt{25AaQ^J938mx ztHz6~&ujF*`~#zv^5{{I##&+fH1=AmxwT#Qe{Q#TRRkO(54^5ea>TLc8;56j$VBvuvVYjJuYON(1eNg!l^tlwywb=plc}cfD)lXf+G#4JIHa)!Lfs(&{h#^>=qzvQb>sZ#q(TBqwS$1z#v8$+G zeX@BOq>@W7Lr{p6AhXe!ZkiaRh}4ua|(Q_N;X23ZhDNaIl->u z{u2TJ079If_ryQ^RdSlflclY~+v%}I5>Bd-THBOXpK9PM5>ITAmKn*-W&Z%eKXd;8 zLgml?1X+LaRhp#=$x7dU{5i`c;_P;J@=2;RHu`P#ypr55SMLT)0g`aM0&|QW$2hDl zbHf_Hh%N1{9$T1_NmtI6&Qw;y_8-JX20L=y@Ib@<%DuS0VE2aF-0mAyHnw{LK=1u) zYfrtJ+Gu>6_q(|A@v)a^2R%<7{RMSXs;J3Im5Nen-JX%-F9+F;AH$YfZl4vrmgQxZ zUn^-RVSdb2%zk5hiZBj-?$WpYgwj;T5GB0OPs0PuNNmhNdyp2CcQ81N35H({SLpZ zJKZCx_?z*b!^9EkuxPf@$@VMNH(=YYWsIY6bC3>BdG!Z_(~{>`|46))hiad&>KN?y%2B1+5K_$Q;DLu29?CJ! zPdTn8r9I8X)b`g#LvF;k+DR-ii~<2X@&-q0)sIrW{{Vz#UOgu_Z?}n;e4bcb5uf4c z52xMFN|%gsQN65*b$OzrVqX?%;%^N2Ur9*b(mfJ2mf{#9Bug;f>=}qLpa+cn%u5_( zk^<=Z47!Gk4z1+Lb*ac^iYQ*`Bxu#43myc8OCd47PDcnU3+$S;zPU8bV$d}I06g%l zYOuJ-<7xSR@EG870Rz)|PZ9W)wZ~gqMvY`q<}IlaAji>0PFp!7_dQKy@s51cZELo> z585kQOH+>5{8@Qr;=LD6wY7%sSY$CKE!!5588!jtl1SX+Xm!p(I3pOV{xtBmrEB8J zbUQ@T;zYKG&(#{@Pcw4v+!c|*+Cr*?`EnCD138}iP1FHohr`gu(@M7-j* zJWZ-u>wX`!(o)XTEjV@o0`JdOevER!aD$Nl> z!AT!5IQz^&C{T? zx6|-0AX8mfY@XRYEem$-4^2l~pFZXX3)aZR3e#@Ybhs zsZDIL%G0dU%6!HM8MojZH*M_j7NIV2Di62lohoYqunHs<8?NXgXq zypML%bhU=oNu`tR0_A}#$tp-V!2_Ib{CzrB+}eGL>FsfLB%RQz#|b1OHzaT|mgBGC zSpFCBMc3FMgL7Sw*#)x1b_1qQIqT0g)n0gpX;oI{-CkJ6JhzERE&L-swaqEcGP659 zq$O*dCZ4MW)Z#RnG6oj~ff*!Y^7ijZ;aw6I@fMye`~%wjFKa4I8&;Fiw>sm?u5 zYVED{iz}IBk*!2?Fv(?%W4<$wZk3T^V;r+;3?pG1Ur1MAJBAT^9@!mhe`xt-Z4F}i zby||@(CM?@GG9$((#TRKnbFAkxGu5`sygF>M?EqtobkoeMW$V%UNn-&1VQ7EcEgzy z3|WaTLY3t6l6np*X^qVH%#k9rA+zU5q=D=^fBOFbHP z#Bec;Dg8ZasM$&gXc%Ig2&z+>gC72|#`HF@#ZW=hs{l;LamLoiO;CjV}(kF&h;UH4UPi!Kb3Op@Sf`PT)Bh$UORs-MY$Ow zMmvLrAA5ibk5k`{m8UfdiK$N3U*u~)f)HI7>Ut-Kemue981*Evo!;W=Ic>b{A9S=i~TCL&Vi2nx(=Od8o+N5TE>}KHyH^7Bx8={Fg^bOR;yE^Xw#Eu zx?a*Q$(?`f^Ws^b#Sdq2%rr|-?DJeZxgdPXGC>&tHUJsUGI_;&H^7mqTzKQgx3^l& z1)9zb#EiT}rA1dQ+06hmh^Ii!L*tCluD^JtKtnVWtBi?BwOE4MLBW<58fAS6W^S02hjK6@m|idgjSxXZ8;?e2XnlO zUW-lmKWf2}PqHL7I~aCJ<%_006Vssl`S@-jyF|LRg4fJ$Jk>=EKOe@U@rH}wU28$T z)9rjib*0<9?Kq8O4yfhif0zT7UD7`Rho*7Oe6ZFZ7c`$2UPq$cSzl?E_YN&4lKEp< zS0DmnGI7Dq4tgH7=~l%}$yu$t^*ZSCzNbn1O)T-edD{vJre&CIC{LGc2*CWm&MT_; z17{cZm&K`Okx)k-n%Q>70kBd{a=#vJjqExsSE!3~HOu+y1cpdairQo;Dx{B_Jvqpz zz8T$Fe`@?-vQoEkpt4A$C7Lbq+91mSq+}HwH%@zu*Hm4p^EGyBIYmioeW`t++G>(& zmq{vILnZZw)`fvn`^dY4^&f?N>HBM1{gdI2y=J|Cx1riI)Cgh!0OG559~0_2ZN{MA zZm^ja>dr*Bx4%>h113h+0Dd^n0KR+uA6DH6%edg=^Jlz!4aJeOAR-@~EiyGdnst@8{B@{#M59A@MxVJGlp4{Y<&zORL>B-C}-{PdFXHOmRrvK%O7vrOlw zKR0g1qLWza4J4J!DosiAM`Oq#_(ySXeW%>ReW_VnNq;;R>l~h0#>t55)2DHflUaJ_ zf+6s(mt%i<@e5SYtsGvE?Q|7TtiuR4kjHmYp!0#i73}^b)xN`jrdmv@Ad^+So+-AI zwm}ui*nOLhG3)JK74aKZ(0(oWGA$ERaUw?^p%ToCf-Vio^4SR(3{xks@ey3K@|->y z-}?TwCN(2<7PUTe@dHaWo|^&Hrj_(=o$!C+SBNxC z15Kae6j%0l{{UwSx0+l`sNPoPBr_fOP|Nt{zH8L2?DRkE>)7QNpJ|jyc^bA1QU{d+ z62O7`wT-wP8osmfC&Y;?JSX9)KeS`FySw``+D8|euz8kp!nja4AiB3a0^JDCeT+I& zf_KpAt!S=umH4rz{6L#UpHI(JlI@h5nCnqbOa+9)0#*5_LV)K%Ga zkBoDoMz)%E_Hs6`CPaX*&%8<)$QJ8}RR!*N=1>sGQz!)(e_ zbNkc&G^0-X+-=#|_=U9(jPcFpMw*`fM%S{8pI`aqirdmWMe(Bk5RiXpKbYG|xU;tl z-++9q_cit$S6W=#S&j<*7|rS2A6iw`TqbY zAU*-e7~pi|(&XjR{{V?x$(}Q(>faTvUNrkhm|N#DrKDxJ^#U$G`t{UU+(JTLZoDm^>rz?S*jriM!{x&(TU*4ZYn9p#26p8A zGAoAo(eXP=(5>RKj#zblMg|gMNpTIUAG*)9jGhn6?laV9#Qrk;ezlXtg8IVRJDYoi z4!0j{Sp>HDUCSSrouP_ka&XFc$vE*%bI13(ezkIz@AgXxWDz7ggWS0XaCsR)G3AdW z?aoJBaI0haT%k>9a>hzBPJ90VT~5o!-xT#r>&wfrcXO%gQb;3tt`zSK-4-g{R)HE85s?ad{sXN z{9F5VxUtnP?SHf_Cw0G)Sqc*u@VscL&T=p?M+cnOgLv!3nuM0N*4`hrC2b;(=Hlbe zNTYTjZYdbS9600*F+B5(d7p#)L9KYBUDa-1H?gQyvmi8T@{WA1sM~pB$8+QsPQN{P zs~L)KnQhbl7|x}7Y5S{l^fyn}EOqEhY4dsBTM+8tt0z&Cr;+zgGw5ojwHo;{Gkl=_ zb;W!S()CLp3Ek=%y!PpPe$vRyIZMj?Or)9e#0k~GbwNzV|F+_ zeXCc+GI(!T@LKqj!Co-&4yC0`n02|-og{@wK42h$xpA`~l_2yj&~hIgyc@1+R=U;1 z5jCVrgZRDHnC4@kH z{G#%ONZPJB$(Q>d)tea_GR7B$?PG<&O=t+hB|#5#du(axZfgFKS? z2P3J&eJh!|_}IP$(ysKudvU00Gfb@_+h^sIIT4KU^7PMq3=DHu{B7a=H^q>_toTjk zTZXxU?XB8P(pg|I-5CRDR>2{MCxAfOD>uS_3G8OqC%cNzQhj>XHk?^sNDk)nb{2Uu zo4E`^s3eYZKq9_EI1D#6o~yT?pOL`n$-VSFU&A`28s~)V;MZrmSym%-vB?9pc8=)E zs?5xwvW8-F>-blcUwjPEH6Ig8aRrpJ>VYFmt2ym{&1+{K^bIN;fw+zMRU{JpjB<6p zD*nsfCh*Odh4o1Sz;5pDCAhW|sdQ3Nz$b((eqaFT2+ldgjjn38x){@J^!-OqxwXy0 z$*5dQaVe5UD4AWjW0x5#oGP4q^wPsr!AYuhJ}#SB#~!HwhCa>Rvaag*DgDc&ORSBtfe73oWEVSf$A(q`~&*{$yGo>`)`CSF3vBx(y{LQ8SLIHeZbvfuAszNZyBa*MxnJ5bQ#hgVBW zk26lbnm2fy!yLBEnIL6A+`t4FV~&TOIeoK1sWslOWq%6K8!GLyvJtW!&4yV+qm?Cb zf(~~9&Uy4N2VeMuTDQMGBe(l*n`;x=e(Y^T2&~(N3YG*F`G`3L1NW(A`mtV|G*Uou7jBORuxo{hmgR{Fx)L zM-VfNu?UJ6emNuW75T7EIq5F73yVFrdVZA*7Mr}4iJl*qAn$c_AaB6_EVrh6Z|Sp} zc_Ov3@ZG%7%^03b=2t|Cq{c(YMst?s94j%Mq@cLx4#D&J6=YsQ4jYvzcejv%At zT*jFh9_It~@4Xc~O z=5WLi${D!~&NI`e@bgzKd_fMNC%4hyiIOtFh~ywf5c9@J!6O`SNF4fL=zG#sqkiq1 zQljS0oBk5ZuQEG%tmL@7lzi~6*j>pT`jH^#k-+1Tn&muI@d>7eEjvvOr_1K~D{}G? zHHg-~)vOoehkGgJnX5wv2Qi}5K*UpL= zi8Id$Uzh>{?gu5e5->(SZ1%3B{t_s^=n1EP;6VQX<2BA`*W%ISy^W;4+ZxWZ%@GeA zkHK(Dfx!a+^~o6TU1!?|{Pev40Kf&Tt5aOpYX1Ov%-X()=4@_n8tY{Ge38f-Eb^VP z{s+HN(zWE$<+_b-5=h<_M#R2ysK#&sCy}4?n(D`gEuq}v$IQkGGfE1abDR_F?NVI- z0Ay%!+}+;VF2VsI0^cw_6$VCs3i=u44m$Isdy&9lqP^?={{Zk0IM%#L{em5l1k9lA zZTrS^i~c2 zfVo~VT>k)wzS(6lZ97G4wT*I(1IC1P;|$q72ORaT3Y6hP-O^0Xre8zijQ(4mblSz_ zRzg>e;u$2D9)qbr=cPk$uUs{}h)iS5kYY^z)#y%h>IWTpuD;@EE|sk>VMwP%9%r2> zC<*8T4Z}S<_o;3?JsdV?$hU^#KR082nngQuah3r6bC2m<>PxLN&7KZd_a28M4be|K zX6czpkgV7QE(yr`<27c-T7=5aAln0}%*)fE`Fi_w_CB@I$KiXKE?zivS)nk+xKAR2 zBx93+xzDF0bmpkq_+M36%H8QOwcB8|y0B8Y`L`;Q+jl;`)uX26_8Aet)j@SMa4JNl zCCeDtt7BmpJd@j#(z5>5KRZ)u?jJXpA0Yn#EM!)#mxOiAO6jej(waXr7GtOBN(k|{v2xdqzfxZroiEth8sb{XXYID;C3~d!%w6cUsbEQh5f3ks^(V%IAORR zoMYCQd_`v*W(UATAQ?a`GI-}c#Mfc}01Bs3@_hC-j=&5<983YpAzN`c&JKExp7^KT zd>_;=ZZ4u1_H7lx`}3Wo^&7xYaB_NmeT``x4NL15Y4uu&;u`Z#eN-ei@l31)oGOFR z7xf#}Dopuw?}CkTO_*z(-#}UXeG$Eo%BEj^;=VF{nSe zmO`fh1J2R6l5vdZoYKSaV(o3;b?=($pSzMqDYPC)I9%WpkHWd04-}hRj4I({f8A_( z&Gwt&{aVH;Zgj|`k$6XB;eh8D{72`gcmYGh%c5nVd>=`z6b-8j-31Aw2obkmWnU$FnD=( zJgVx}+xKrYQYcpe9P#qtkOArkT8HeqM0W003y936m6|yY)(i+BamGeRu&-p3;H~Y< zF~ZkM@hfFX$iO-001kQYNWTbg0_Eqkp4c!OWnu^;A%Gxy`uqJWU+m0ee#869juQL3 z9wC3G_C*;R-*kRpNxNH{%3dt~1PZZ0Hd zj@rgEu%KP+$M}dNBZ3ca@TepBH+OAf@kyuJ-!uxKst_b&w>&W*V4qwZk4iqLi`l|f zE9x=3_m3Ky+eW#!XqMQCE*mC1;1E9c2l#aSD=Obh(V5*X;__pcE9Y&?5y0!){{Yus z;cxI%+{Xp?RYM1g z+m-z7{$_H)J<~iU^TQU+YbB+n_1&t=AD1N7t+eBIPki;wVngt^O-qMw?MH*{Dx%?> zFfWD$LwEb5x4*4@CZ7P{jxg}vMy^}tW^VmB`M*kS_ra5BjP|;;a!#x^uaoAe=O>Ky zJ-PJY*H_f>wEiE>8GTAej%ofE)AWUt?6*<8KoVFM+)vkcvBv|ESM?7HeYV{pxA7xc z3^84fa(50v$@KpKBE4V!6**{ap>0AEk*4A0z)-OnB?6O{<8j7w!0S|QJ_l-7*O6RB zb#D}Ws8qL=8wUdio(RGG`L0P|@toUV?qz*l#NvP99l5i$duVky5jweP|fgm zTZ@K=&YA-o2J-D^3$q75Fd*=Gu4O|T&8#2XZ|tPe;WBt%#4=qNCh}y7ZhXn4j_73L zI8vG8wn!QF^wQC0@hldt_IL9niS{9BFgZEhf#0ruax2m`Pub%3X~Bx_>PSly@~Rwj z(#2>Q?eFTbqT9f~97KNax%SUVfG6{aX`KHs$;on!{9!x_^ns-duRY?OtJu zM_4wnc#tOUoPt|87{{sf^vY)OCHA7G=H5t3g6kU0pEhtu1D*=|_peKn;J1lHrQ*~d z2c!)b%hRDdy?f%Ep8@qaV*((pD-*ecV<6=6d-IN%tbVJCg8m);OsP_>-|;;9>ss*& z-AZG$Gsh%^7k3Y}cAh}=Q3OE=YI-dOcS8ETz{{Rp| zz$EsdCU6=Sec2cupO$+X(Tm|HiQ%?(HxN9y$Z)bQ8=bs!oM-f=@Hoi)H2(mvytvY; zx|}`Nh_yx0<+m1Z1=M3KRX{>P=dmY&(0W%bYvT=LQnQI>-xML*GPv4UAvs>0;Ed<5 zKcBB%d?)cg?%do%JLfVnEQ2KAh02@^RY|@VYu{; zAn}Y=Ps1H9=i%LtjT+Za*C(~qqqdsa^v72c$Zf%s%q53MbNF#x-m&mo#PQmZJ)hdu zS8B$)NF$~I-^Wwb{{T9dMfhvuPY&C>R?tUgjT(TydP}%t7-a_4gv>WJNbt zffWD?w(G^`u#*5z>FfWTMe zjo+cKH1TJ{uNruhMu8*IC7bNGv9U2t8x>M9_d^zJ`haWR8%XhWjn%Ao4}2hnceBQX zmcsPh20QiRzpYzqtJw1LKCf<)5O>H!L@}N@jhhE7c{Ma;JUrBt68p}|e#O3Rtaz`7 z{5j)mJ!(j!@Z-z56pQu~u>nn{;ZH>z;cG08c`W$ZS$ie7)R4`a-mzuOU5o(%?!yYz? z$YqQ+bGQNr9eM9m>bX1e>*NZZ6=_F*U-%)fc(cZSCev)*NgajQoOXf)Jf8(mDxr-6JFj= zXS}G1KGRZ(twQ;02{EZ4=kJl!4ClTH zt(N_q^!Vf#H#$mR-&?nx2$pQTY(_H}B<;@}jAY|EIIfx)iuC0dC^nV)ea9lDQcl$x zW+b_exHH^&s>%T2fK}&=5m}!P{w;}oQE@J(aAlOV7704W z-@gF6Gh=UG-mZIR+PzCq_-hO4mbSB6x7ls&)I(~{cPb!1DzD6aagSeWrs4AA z1GlI&&kg)Xi&nasBe`2>{Jqi3KH#oF#?gg5@@u1S563FQacAL+p%;{Vu{E-Jxjp`3 zqyjxXbHzGM4^-3{V~@hxjj;@>^Gs9%fZ(nUGm+_>d*Y|-xQ!fB)7Sij)fWCEp1Amd zb8f9Qt(@0+I3^gSAaVZgc{%*6obmPNiTrV6q%MW1+UT=GI~jkpppYt(;>SC3ya)6;J*U4Q5DZ5$RV^OXe1jb<|rx9E{*|*mlWN zRPp%7@W=3fzfPhIa*}KWS3()gU3_PQQN(E71otAYF3FhgM0&Le9_&+k*66@Sg7YX;|H}#{{RSn zt8mKp_6+bct46}*4pfdoBOK?P@mtc&@ihMc<=T_^{{WGc;Yz1gkEEm4qSc^|LH28a z!+8+QImSJWd`t0TL%I0te)j$$^Cf8Hk?idLV{dMOn`)yNCsPQ8;awu zD?7&4r^OF@a*uOiZ4qXewXt)!{KIQ52qXp00Kgc}di5O_;SY*@A!^cT_r^Qx)cZ8s z&^E{ivnjeTGVd0xwIWMM_(paX3*#zBA3}H$6lyoPa`8X!4c!S`cp=;vh zOS{|4U0TdDX*zzTVq=a5mPTS?360+|atO{F>0Z;}zkvQE@UEzrmU^7uX+`r;%;1D0 z19u$tEIRTJpsihd;V!N53&a-M--s?&{{T|7wpi```j zb-xtty3=P_+zrniwY<`Z&OkC?9)};r)0~r&U3>fu)b(g#jrE)R+XFsU-s@E+6=1$u z0x$*$89V{W9Zo9NpYRX+J%p(|%R2?b#eDc(yskHN&mfRSLB$fF^^$G;zoc*MV54o% zKk*lbZG2a9jRc=)oW%h!%zVUY5-^No8Tkn(867~zGB~}a>kn(GX_{_uaM z5T#hwOLOMzWegwY3KZn$JoY5|ns59ndmD1> z9NEsnb~?UXIdPnmkaN#I=chI0RHajv&tn;3oKef&_!Td;tGRUemd58|?=l304oS`= zBXKG3JWqgMWC{7;ZOB@g<(A z_Bih@tZt=2B+;F#91%$Q0H|a+Bz0`$Zeh+%XvePUI#r#w@2!=#xbobEQlx@ZZQvi5 zuTOr}i*MoEOB87&k}O8ShnF4++Cb;mumYrACc@pP!4De|$x^)Jaxf1}=RVcY@%2+r zt%oR6TAY>dh5R{ubo|*$L;ogyFr_XN`KW2qwVq}2q^8q>J4CD+C!ndRFotS75?IF3ANc^@ks=07S zeb~nX{P9|m3pttMN8K95mJP=nc>ojJnl1kTvf3DAYnI>VZ4qxjB9$1EcOVUpsJbE>oT49p^a5OmH*av)jV~q~f?CC6WZjYfxUD?W>Mlfn) zE?pwXQjOB+X6s%9veva>sLtm#ww5Q}SbkcYr`-X!q{*i5$R_CK6l46DZZR#wJv z#H%J$`ulynT5+w0>2jmaP!C1|?o_a!a=#DD8~o<3HnB;w0~D+Q*wWN=ol@ow@jT zCB~@`^3Wf&N4;KFe1!xOSoO~37{_5-pYVpo{{TL%{{VkK`t?bCM`I?Swk;0nt>h>b zv${qse)k+_gYTN@e$glV^N;@k0$2QNj&66edl|-5`yG^eT$3!DCPu^KG2ns5c_55+ z&wu4m+<02skV+#k#(L#@gO=w!53i+Kp7L2%B!XDhEFH|U9CgUr008ymjyUO4=~vR~ z5=rGNU5&pZ!Z5CTkTJmbJ#+Y<*WM0;-5$!hMn8wJv{A%&BS>)`31uI|KpcU<_3g(> zyZ-Zow)FTl*{%qV28?r&CudP(0PX6fV_x8Z!I0A2Q}Z6!Xvy-qgBp!wqNc#%ZoJ*nq&U> z{{Uz}$+QR*KwoYNg?N;BnKO;+Z$Y z>D^-Ac^G_r@@!+CclirbN)vZd1YmI))g!Uz++tx|y`mEV7gkfJtq*$G0cAtf@RmtXPPkETbSR z9(etDtz9^d#m}%_DJlYaeM1Bp%}6x+5+D+APv? z>GH7Mjt^swJO2Rn>mnbDH*2wOZ2XsyHo(ZGcLVpjf;c_>zaBO}hWD^+mp79#k0_P7 z+FWjC!v;Sv`J3~`DorcFdi9Bh=`CcQ>464mWD*8FNqn|OIL98|)#d*H!mOotVm+Lj z>~#@*P*&Ll*ULQ4faVmBD#IJ{0pJXQ)SsnUgW@bV7Wa!VkyJ4G2pg2J>Onm}!ng2IaHsd`PsO^!|)bRWz@ekQdCiF-p0S?weLiPZW$OoRhXT5F9 zn07*2~o^m)*rLJ?9=}Mgq`iJq4QR2c1|+iIq8w#r9$2+)E-tSjC{BM09GX_ zpU84BNIBxDU3??)CB$gcY6&AQK>?XbJbxBH-t<4879sdss(B@@wQ`aa!Hzq9{{S}} zhAp0>r5{(!EQ{n_#9tK~8J$wml*WhV^Ih;c;kd>*$31Gye-SQWg*HUdZoocbzkZ|; zFgoM>DgOZQt74KgZ|ytAnRB_ej5i&!bKBJa09us%Fkd7Fzqy-@r_31#+>G*iQu?kw z_lx|AaO}j3#@F&jj3$m1ZdObmm)r5{SyB8-)b$H+@J)pds&_Wf-!b((a0UmtsV_bc z!F0Cc`&RDKDA+lU7?3Mq=M9x78OMIre%IlQF)~Xgp)`arb=?TSZ(bMt=A-JlpYwhu ze2=R#qxgxa%_A&V5`0L%V)=*gTN$NQrI@DG_3Tz{Jo9&agCA`;-K3wDxjB+qQ`T^LT4&9rO#Tar%dBFPr08d)eUx=5{B&{9VO&ScD2p=fh$vkxb00T~MgSL|0 zU0ua{aF-Gjxq>JdC!iSGqdtQsxf_oPS=rt!aJAHy_QBa^d5T12eZb{ZoQ$61y=6Lh z{4`UkJ73H_oUfxg`2Hkn4Y8J3WQdTWNfaI3^T$!!Bl(Jw$e4?~09lA2YNQ-P%uluz#rFqXy@m2_SwT za(Jv>D;FI(n-6C+v~2C9_>ZYVo@;5q#BfJmgr0GqYK~8c!I{n7lmxQw+mb*TI0uv1 zeulX{Kg0J|6Gbt$TXihRAf1D#!TCwrak+W?{)U;QcqOCL-gXLA-I0TVwU~BL*&t_j z0O`#iSjEj*wk}GY8#@K@WDDn99U);VK2sgck(bY20QdcAZGIpXDIfM*Z(tsD( zNY5AB@J5`I=+2JU;#JkaSq_;TkF@3Y@Hrig5&TBfm5Mfv7#YH{wpG6zU>-5au4-K# z=S|DM`lSu>guo-7Mt+B{IjJPj?q)=mJG10GiidJCbUlYZT#RQJ&wA@++20 zbEN+Og@LKg>Z_)}MuzJ!#fDU!4uJ4N6Ofl{j=_k(21X$;tj+lznR#EgOGC6LdXCq@(h#eK5j9J!_#z`ZlPv6bHH*)hHb_| zcqOm_$0yf6O1B=KB#rVeqX+;y5Qbjla3WtCfXhHc*Ji%-BcA zxg79onzO!_>?l0_;M`8KX$03KC>o_llY zpQURYcQ&>gjBRsf-ugC=bb5Nn$pxX2;%=Gaq3Q2Z$E`yoepHcPeC@*hq{;^z5J|#< zK9uVp4~AWn#cvWUURl{8ZR4Ioo|xmVani0YhHYkz1W+k~SIQx|+&W|qo}If_N{w2n zJ1@Kb0iQHc2iqdLXMZ;SIb@8hH=Kl`k^uk@ps45ZI7Eqs*OwUU`OK1kqpTzO>^fq8acQ8p-JPXJwF=K(R_WRtY$mApRx=Pv55B!4nF~(Z~nd5 zbP>7~0b{qg<~5V4KijKk z$@YkrKNe`Ef}7@e_vL;@P%=3uAY^{M>NoMa#!AH>hvqI-W6Xwq4W zi(7*wu^e2kHe?=CImS*1{3;u-7HHw2nnv5@Hg^@v$B<4=KJOh*A6l<*q3GItsF7KD zVnLnp9iu(C9m((gX|P9Q405rvx&gjdMsF|?&#CvR_EkO>-Dz`T(^}GQf_;hM znH&itRdqaPuk-po{McOklY#y{Dzw%X{#r8I zsB%Z|5H45H2LWaeFn?>m8#-7(hYX3Bk@7k4`E$f3%~7GhC=moUtQt$8vG$ zSx%%mt;ou^nbs%St>KbZwYY!zXi&Cra!vuyuW?mc{>t9)$8%)y`OD>rFy32kAY+dF z^dp|5zBqkXQq**PJ5XlSq?IkRDl~|txaS0c+n>OXt#Upn_*_>?~dXbUIQ`6tANgk=E z#dC6#G)W;!}7GU0MKBN*@B z+upfW@dGxVe9MU(f^p_a_W;|_HA~ zG6&GAvvB)-p zu4YrTslmYm9;ESHy~m2n+F3YdcZoKp=E!4#*Pbv(>sbq}8}A}m(6nU;2mw$|PSSJV zC*1U|V)fMIdXaJrQ$z0P{LiCaUpO=?Xq#1^X?eCo>K_Z|o( z$o0V(9^<8N-FT8sLg`DWjH=+Q0Ao2VxE?>r?_Ljm;^{1;Y3>oE7S|jI*0dX7K3K*w?WM3TOTD#WyU$M*D> z(aSQ~1m6( z!Pip1bI?B3aLm!LQ5z_9jxs=F)Su-_%vE4nH_GjR%1%1xpUS-byk{rbt>c&Hnh`2V z6l_(qklRNbk_iJHYg*sN0y|hf*KI6Lz(Rbqb|~Bc&M-*EdCzX3R&JeJ*`Ybv9n5;G zz|#b2VKV;iIaHCx+%V6kKb=0|Oq1X^5Vy@AKQf$i`sDurjbQz{-L6{ET*k_zj2^_` zes}}(s8(sMZDwCF0*Kv);uj^0WQ>8%3H>V<8LbAYT;H&~4K?H^9%K-SL0n;m(hf21 zr|>7zoY&UAT;~2}jV8HM<_5=7f5dV1u75j8vi0IFd7!#0R2jwA$Us~Al zC5*7?HkV#vtP#r4v}^&8F=k=7XOER}>C>kaa^0FKJ6#daA<5jiEXt%Y=RHb?{{Y9UMHiw*mrl%`ZYR{OR4(Xag_u02 zg~8ZUk6vH$6=K^-ztgU7;JmYtD?%<{j^Ro&mkM`|GyDe!2cYz-cUsg*6N`IEnppsx z0SOWI?n4x*D$1GNyl&%wy$a+4ed^Wbwf(W7n54Ox=2LBC43hH^K4Ab6(Lp0UhI7)o z<9r2#!LSI4{yh) z~|-aX>!+HK&M{>KdJeThV*q7$pUQaK1!Tz)Hb_!Q?VhLH*E^px zx-;d~HaAd)=KA|mNbT)o0h(!NZ<;{cvyPngIUTcAzqe!m0HFT>{!1V6tJ;0)=~{-O zA&F&ccHcM*Hsfekj2@W?&5ZXwDz4ja`TqdC{M-KkimtESzm41a^k$Qdm=*)k*= zG;eOa`O2&E@$bp0to4a*Nr-8$2lp9N5ZrKb4hQ2=t)p(0w@CY&Cvy@yV}aOkI2?EH zPmVbPY0~vj&Noxde95P6qN0z|n@CTpDv%ldWyYmETrrl$%90fS< z(DwHAs}fyD=7866DysvvNcq1%!k7DE!Bp*&=Jf|WeqUOPGOj^ytLie_@utlXWA}>! z-FZJsY&Q~JATsH@ISb|FdJ~h^>(JEQZE!SU)E+kd;-bfms-z$}9e`Zb zElb2#7A+^3ut63=EUhP&#F5Au9fmr3pGwNI8ol(d9p<>I1ex;eRL7C%SdvISsu!Otr>k4bJenI5{;Vu+yPOlr z9%7&?)24HtE1i8a#TWM%VB5!KXv^}$B>5v5{{Ws~FggDKXpCoy^kBA8!?EE3_Yx8? zyR*qS>CaAm>RWVQET+PnP8FSi$oCkjbez?tCI_GBF?h<}@kQL)3R(ip&|2F%G?~c+ zxjk{zCLPicswoZMsP|0;TRuM!8U}Vn?_*T!^gKTd$J*0not8+lNO@!_a zbBvC0ngHjbn^I*3qkw`)ISg{y{{Z#V*X?Z{3|8Uy0_3X?et52plnD?Fu+9#6{&b4F znC)Ij`MITQbpsJ?ZrWoa7(#KK$V)#Rat&HoZxH>+l}GTla(ydJV_5*mR2#ALAAjY= zIhH2iy0#8L-JjPq?dBsgODmhD2@#4p4m%z_@s71Z4KG!eZc-*iICJwUTpvzzTkCid z=>g(3>Y;LadYZ9$scG@a8yPlm4t9;ZvCy1Q<9?#~9O~&;Yjj4~OgnJ7%CT(w=Z>GP zLq3q@h8UG|{3H&2KhLdazlf6J)?2G}cZZNXfx9Eu zfs^zftwf@lw>7?F`5fM-74vEGTVKg*7-bGp-QPT5<2hgH!RTv~O(r<);*KNq`n_N|LsZI-8HkqeF? zz}@Yw2-6@GR?~(ws4EfLB z=yEc9WOelER;k53gez$_*LM-kEDBv$!&n8I~ZjG2B36y*uzZH4A9= zTcq;gjGXU3I8Za!r$hZ}#A-IK{BKT1+~+*{lk3u;Xi2wFyNWRR)x=wTUC1q-XWTTDOrBMnu`hcalLMp5M-)wU5iS5wx;PCQcdLF&~Zx zYE@q*Sk$b{P;D&I6Wg!9<5Hn*QADEN5#=+Ys9%%?8v^H!0H-ay5WxWZGAfQiRV&YW ziSDNJO6^<@NiEO4BD8yq_yy13>&V9$^{VAvg<;~>C18B4eA|P2pIm!Xa#&i-#I=-c zUpttf>VMDcQtkzR)PlLm3~)ViDE`pc6}+V=y~#Xx&S|*Esuh?voDmgcf*&jlY*hU{ zsf(wgTt_Ne#qNmOkhWE=zTN9ZYJ@eFFpy9@5a4X2;Vm-~7)w`Wo|sR4)|Fa!?y z>CH=2o`K6Q!5)ik2G&?&NrP|Pi}zc(A6!z+VvXy~23W964rbk;LZ6|_p)h17|h4>9q%dS@W|W4~&P z`$FnS*oa#Ijm)pf`MUMzr|DS>;@eo*?hG9hb8b_P_!-7CfyflA;(Kp7kzGu`#D$9O zKAkzOd8yjUL-ud6p$?%t&nUS@h_4CQGYz$*-=Wy?zLED<- z^{*Qn39jOvAmBD5-P0RX?ay#dMRB^n#ldf)%Y3bFj;`+Ib~jXIqmiIs4ff^?6MST(1i?=F;G|dx^+1B z?O#7%d~RP7S?Pi9V-sawK^lP-#A-=o%Mf}gJoi!9*Fk&n2+gTItk)!%&gbLzsm3;L z=mvZHbvUmsrfEVmvyY*iaWwB^*4xIGQOY-MwuO{~BMhSBDh68wbGy`W+N$1sN!@Bz z-L%&TQ_J&WE(ilSI6U?rk9zTKOT?aRa!ni};%5w!A_B#T7z6>HyZ!oRsm-oj+p9t3 zh@+82tg(h1f_TSV{Q33%Rm(inncA71OhqWaBhx%#@nYgef*CDsB{9fBmevz*y8!&I zq_AK?0~~XVbAmiGRq+tkbxD}{prc4h1gffs8-Vo~#yzlWTUON~pHZ@ad6C*p8I;CQ z{H#L-C!U;j`g_+o945*!Zf)mlTa{pfLxax;8ST#>?$;!-Qmw4FN0VN+w02t#(&C!t zKrYet;FUz(fEceWjNlMC>+AK_n^g;?#SB1+*}^G}hVE4FBmu`h{XYuDvAB;GnsOG8 zQIb8+5Rj9zIXn)z1%Mdt2Nh#jxSAa(t00`%U(Lp2Er)qwa zXLG07tWP5EnNQ3}J%J}_&pi5b_|#gn4MO=OEFMBPmOKziDhUAm z!ypsWyN<^}@JI4BxeG7Z-DZttRE=ERZ!M9Z;p6V$ ziX|I-NS#`;CDNK}7*tAG9YQRK2Ia=>$DZB#e@-8Ha1Mcahi@-e|2m_+flwZ(oU}Th8u9cOn1Tq5<%l>Bj2{1ohbY33(M+t|V3Jws1RhMwl?QyT#^o@y0R+sV(}+raE{ zqmNkOjz!YvzP`JbXjV}-*jyPRM(7C1B=g5O=QzzxphJ0aEc4BGG@6C2nVQ|wGZM<7 z)U*t&IvB&Uoc{m-_N=sWa+}lo^e^JIi!j@xX);QYn~ODRSS)Bl5CSR|J%}f+7<2E6 z)PJ$frCCb_oYEu?%PIRd!X((Gm6LZseq63OQ|xOp>&20-lW_==>fT8ra|}$OP!>Cw z_vbwOVS3g70Qg9iX0=gyB(uvaMR3u%aS}5hkYm1AJQ2=m&OEVh*Ihs0{{T#_9>ZY_ z(@d~WAuA&jBdPn$ia^?O25?)RM?88@r`=rH?}qy7ONqS348|q}i9d9Vl>lw+)8^z8 zOQy>9Z6ZdsDesa0#>-UJpI0H4Cs$I!>HSAN!BWN%TImt&~n>Yuaqk-Q) zTEe6iMWLGaOy~K*DUFBL!Fu_TSKNX#=1<+Wfir|vtEZRa*D*JK+iZN zs^>k)>zd2B4;}TzwZw4kXW57O)t}}d3@GP6rDtkd-PeTl>2*yp?e6XEW{i`0AWo%6 z8*&Q{aBu>UYTt=9`z(amn^n+ljPfO|^wLfH zlyMP|22G8yhC&I-fH?>Kug13TC%2O7>eAj7SiI3GXu}QY11{j?o^VJw;<&wIQ?S%D z-9|;bh3)3@mE(+nrP+XPK;VT^NF6cIRXBW1b%lSkJZW=xyh|Or7KorHkl5-$UOx_^ zw|8qrp`7ab*GF-t>b6($$2=l*dt80u0>ibrPyoRlIrqhB>K9hGntEJCa%Z@B5=K<$ z$6#T`GoP6KHjd`Fp9I~DO-gpTja5?MluLv=a6shx{odZb)uA4)d_iHQHIxYyGn@r- z6_k9+8;*F%J^FOy)^0FYE;Sy8_K|nw-!e^^?j*A%*f!P$V_>76{Nt19)};Q)*T3i5 zZ~Oy4;aujQsK*uLvqJ3ls=!K)p|W?JVD{s`psjB?Kj*uD;6v5L^ldN1B|d#m|Jg2X B_m}_x literal 0 HcmV?d00001 diff --git a/python/packages/foundry/tests/conftest.py b/python/packages/foundry/tests/foundry/conftest.py similarity index 100% rename from python/packages/foundry/tests/conftest.py rename to python/packages/foundry/tests/foundry/conftest.py diff --git a/python/packages/foundry/tests/foundry/test_foundry_agent.py b/python/packages/foundry/tests/foundry/test_foundry_agent.py new file mode 100644 index 0000000000..2eb992d1a2 --- /dev/null +++ b/python/packages/foundry/tests/foundry/test_foundry_agent.py @@ -0,0 +1,413 @@ +# Copyright (c) Microsoft. All rights reserved. + +from __future__ import annotations + +import os +import sys +from typing import Any +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from agent_framework import AgentResponse, ChatContext, ChatMiddleware, Message, tool +from azure.core.exceptions import ResourceNotFoundError +from azure.identity import AzureCliCredential + +from agent_framework_foundry._agent import ( + FoundryAgent, + RawFoundryAgent, + RawFoundryAgentChatClient, + _FoundryAgentChatClient, +) + +skip_if_foundry_agent_integration_tests_disabled = pytest.mark.skipif( + os.getenv("FOUNDRY_PROJECT_ENDPOINT", "") in ("", "https://test-project.services.ai.azure.com/") + or os.getenv("FOUNDRY_AGENT_NAME", "") == "", + reason="No real FOUNDRY_PROJECT_ENDPOINT or FOUNDRY_AGENT_NAME provided; skipping integration tests.", +) + +_FOUNDRY_AGENT_ENV_VARS = ( + "FOUNDRY_PROJECT_ENDPOINT", + "FOUNDRY_AGENT_NAME", + "FOUNDRY_AGENT_VERSION", +) + + +@pytest.fixture(autouse=True) +def clear_foundry_agent_settings_env(monkeypatch: pytest.MonkeyPatch, request: pytest.FixtureRequest) -> None: + """Prevent unit tests from inheriting Foundry agent settings from the shell.""" + + if request.node.get_closest_marker("integration") is not None: + return + + for env_var in _FOUNDRY_AGENT_ENV_VARS: + monkeypatch.delenv(env_var, raising=False) + + +def test_raw_foundry_agent_chat_client_init_requires_agent_name() -> None: + """Test that agent_name is required.""" + + with pytest.raises(ValueError, match="Agent name is required"): + RawFoundryAgentChatClient( + project_client=MagicMock(), + ) + + +def test_raw_foundry_agent_chat_client_init_with_agent_name() -> None: + """Test construction with agent_name and project_client.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + + client = RawFoundryAgentChatClient( + project_client=mock_project, + agent_name="test-agent", + agent_version="1.0", + ) + + assert client.agent_name == "test-agent" + assert client.agent_version == "1.0" + + +def test_raw_foundry_agent_chat_client_get_agent_reference_with_version() -> None: + """Test agent reference includes version when provided.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + + client = RawFoundryAgentChatClient( + project_client=mock_project, + agent_name="my-agent", + agent_version="2.0", + ) + + ref = client._get_agent_reference() + assert ref == {"name": "my-agent", "version": "2.0", "type": "agent_reference"} + + +def test_raw_foundry_agent_chat_client_get_agent_reference_without_version() -> None: + """Test agent reference omits version for HostedAgents.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + + client = RawFoundryAgentChatClient( + project_client=mock_project, + agent_name="hosted-agent", + ) + + ref = client._get_agent_reference() + assert ref == {"name": "hosted-agent", "type": "agent_reference"} + assert "version" not in ref + + +def test_raw_foundry_agent_chat_client_as_agent_preserves_client_type() -> None: + """Test that as_agent() wraps the client in FoundryAgent using the same client class.""" + + class CustomClient(RawFoundryAgentChatClient): + pass + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + + client = CustomClient( + project_client=mock_project, + agent_name="test-agent", + agent_version="1.0", + ) + + agent = client.as_agent(instructions="You are helpful.") + + assert isinstance(agent, FoundryAgent) + assert agent.name == "test-agent" + assert isinstance(agent.client, CustomClient) + assert agent.client.project_client is mock_project + assert agent.client.agent_name == "test-agent" + assert agent.client.agent_version == "1.0" + + named_agent = client.as_agent(name="display-name", instructions="You are helpful.") + assert named_agent.name == "display-name" + assert named_agent.client.agent_name == "test-agent" + + +async def test_raw_foundry_agent_chat_client_prepare_options_validates_tools() -> None: + """Test that _prepare_options rejects non-FunctionTool objects.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + + client = RawFoundryAgentChatClient( + project_client=mock_project, + agent_name="test-agent", + ) + + with pytest.raises(TypeError, match="Only FunctionTool objects are accepted"): + await client._prepare_options( + messages=[Message(role="user", contents="hi")], + options={"tools": [{"type": "function", "function": {"name": "bad"}}]}, + ) + + +async def test_raw_foundry_agent_chat_client_prepare_options_accepts_function_tools() -> None: + """Test that _prepare_options accepts FunctionTool objects.""" + + mock_project = MagicMock() + mock_openai = MagicMock() + mock_project.get_openai_client.return_value = mock_openai + + client = RawFoundryAgentChatClient( + project_client=mock_project, + agent_name="test-agent", + ) + + @tool(approval_mode="never_require") + def my_func() -> str: + """A test function.""" + + return "ok" + + with patch( + "agent_framework_openai._chat_client.RawOpenAIChatClient._prepare_options", + new_callable=AsyncMock, + return_value={}, + ): + result = await client._prepare_options( + messages=[Message(role="user", contents="hi")], + options={"tools": [my_func]}, + ) + + assert "extra_body" in result + assert result["extra_body"]["agent_reference"]["name"] == "test-agent" + + +def test_raw_foundry_agent_chat_client_check_model_presence_is_noop() -> None: + """Test that _check_model_presence does nothing (model is on service).""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + + client = RawFoundryAgentChatClient( + project_client=mock_project, + agent_name="test-agent", + ) + + options: dict[str, Any] = {} + client._check_model_presence(options) + assert "model" not in options + + +def test_foundry_agent_chat_client_init() -> None: + """Test construction of the full-middleware client.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + + client = _FoundryAgentChatClient( + project_client=mock_project, + agent_name="test-agent", + agent_version="1.0", + ) + + assert client.agent_name == "test-agent" + + +def test_raw_foundry_agent_init_creates_client() -> None: + """Test that RawFoundryAgent creates a client internally.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + + agent = RawFoundryAgent( + project_client=mock_project, + agent_name="test-agent", + agent_version="1.0", + ) + + assert agent.client is not None + assert agent.client.agent_name == "test-agent" + + +def test_raw_foundry_agent_init_with_custom_client_type() -> None: + """Test that client_type parameter is respected.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + + agent = RawFoundryAgent( + project_client=mock_project, + agent_name="test-agent", + client_type=RawFoundryAgentChatClient, + ) + + assert isinstance(agent.client, RawFoundryAgentChatClient) + + +def test_raw_foundry_agent_init_rejects_invalid_client_type() -> None: + """Test that invalid client_type raises TypeError.""" + + with pytest.raises(TypeError, match="must be a subclass of RawFoundryAgentChatClient"): + RawFoundryAgent( + project_client=MagicMock(), + agent_name="test-agent", + client_type=object, # type: ignore[arg-type] + ) + + +def test_raw_foundry_agent_init_with_function_tools() -> None: + """Test that FunctionTool and callables are accepted.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + + @tool(approval_mode="never_require") + def my_func() -> str: + """A test function.""" + + return "ok" + + agent = RawFoundryAgent( + project_client=mock_project, + agent_name="test-agent", + tools=[my_func], + ) + + assert agent.default_options.get("tools") is not None + + +def test_foundry_agent_init() -> None: + """Test construction of the full-middleware agent.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + + agent = FoundryAgent( + project_client=mock_project, + agent_name="test-agent", + agent_version="1.0", + ) + + assert agent.client is not None + assert agent.client.agent_name == "test-agent" + + +def test_foundry_agent_init_with_middleware() -> None: + """Test that agent-level middleware is accepted.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + + class MyMiddleware(ChatMiddleware): + async def process(self, context: ChatContext) -> None: + pass + + agent = FoundryAgent( + project_client=mock_project, + agent_name="test-agent", + middleware=[MyMiddleware()], + ) + + assert agent.client is not None + + +async def test_foundry_agent_configure_azure_monitor() -> None: + """Test configure_azure_monitor delegates through the underlying client.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + mock_project.telemetry.get_application_insights_connection_string = AsyncMock( + return_value="InstrumentationKey=test-key;IngestionEndpoint=https://test.endpoint" + ) + agent = FoundryAgent(project_client=mock_project, agent_name="test-agent") + + mock_configure = MagicMock() + mock_views = MagicMock(return_value=[]) + mock_resource = MagicMock() + mock_enable = MagicMock() + + with ( + patch.dict( + "sys.modules", + {"azure.monitor.opentelemetry": MagicMock(configure_azure_monitor=mock_configure)}, + ), + patch("agent_framework.observability.create_metric_views", mock_views), + patch("agent_framework.observability.create_resource", return_value=mock_resource), + patch("agent_framework.observability.enable_instrumentation", mock_enable), + ): + await agent.configure_azure_monitor(enable_sensitive_data=True) + + mock_project.telemetry.get_application_insights_connection_string.assert_called_once() + call_kwargs = mock_configure.call_args.kwargs + assert call_kwargs["connection_string"] == "InstrumentationKey=test-key;IngestionEndpoint=https://test.endpoint" + assert call_kwargs["views"] == [] + assert call_kwargs["resource"] is mock_resource + mock_enable.assert_called_once_with(enable_sensitive_data=True) + + +async def test_foundry_agent_configure_azure_monitor_resource_not_found() -> None: + """Test configure_azure_monitor handles ResourceNotFoundError gracefully.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + mock_project.telemetry.get_application_insights_connection_string = AsyncMock( + side_effect=ResourceNotFoundError("No Application Insights found") + ) + agent = FoundryAgent(project_client=mock_project, agent_name="test-agent") + + await agent.configure_azure_monitor() + + mock_project.telemetry.get_application_insights_connection_string.assert_called_once() + + +async def test_foundry_agent_configure_azure_monitor_import_error() -> None: + """Test configure_azure_monitor raises ImportError when Azure Monitor is unavailable.""" + + mock_project = MagicMock() + mock_project.get_openai_client.return_value = MagicMock() + mock_project.telemetry.get_application_insights_connection_string = AsyncMock( + return_value="InstrumentationKey=test-key" + ) + agent = FoundryAgent(project_client=mock_project, agent_name="test-agent") + original_import = __import__ + + def _import_with_missing_azure_monitor( + name: str, + globals: dict[str, Any] | None = None, + locals: dict[str, Any] | None = None, + fromlist: tuple[str, ...] = (), + level: int = 0, + ) -> Any: + if name == "azure.monitor.opentelemetry": + raise ImportError("No module named 'azure.monitor.opentelemetry'") + return original_import(name, globals, locals, fromlist, level) + + with ( + patch.dict(sys.modules, {"azure.monitor.opentelemetry": None}), + patch("builtins.__import__", side_effect=_import_with_missing_azure_monitor), + pytest.raises(ImportError, match="azure-monitor-opentelemetry is required"), + ): + await agent.configure_azure_monitor() + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_foundry_agent_integration_tests_disabled +async def test_foundry_agent_basic_run() -> None: + """Smoke-test FoundryAgent against a real configured agent.""" + async with FoundryAgent(credential=AzureCliCredential()) as agent: + response = await agent.run("Please respond with exactly: 'This is a response test.'") + + assert isinstance(response, AgentResponse) + assert response.text is not None + assert "response test" in response.text.lower() + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_foundry_agent_integration_tests_disabled +async def test_foundry_agent_custom_client_run() -> None: + """Smoke-test FoundryAgent against a real configured agent.""" + async with FoundryAgent(credential=AzureCliCredential(), client_type=RawFoundryAgentChatClient) as agent: + response = await agent.run("Please respond with exactly: 'This is a response test.'") + + assert isinstance(response, AgentResponse) + assert response.text is not None + assert "response test" in response.text.lower() diff --git a/python/packages/foundry/tests/foundry/test_foundry_chat_client.py b/python/packages/foundry/tests/foundry/test_foundry_chat_client.py new file mode 100644 index 0000000000..039dd8f877 --- /dev/null +++ b/python/packages/foundry/tests/foundry/test_foundry_chat_client.py @@ -0,0 +1,763 @@ +# Copyright (c) Microsoft. All rights reserved. + +from __future__ import annotations + +import json +import os +import sys +from functools import wraps +from pathlib import Path +from typing import Annotated, Any +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from agent_framework import ChatResponse, Content, Message, SupportsChatGetResponse, tool +from agent_framework._telemetry import AGENT_FRAMEWORK_USER_AGENT +from agent_framework.exceptions import ChatClientException, ChatClientInvalidRequestException +from agent_framework_openai import OpenAIContentFilterException +from azure.core.exceptions import ResourceNotFoundError +from azure.identity import AzureCliCredential +from openai import BadRequestError +from pydantic import BaseModel +from pytest import param + +from agent_framework_foundry import FoundryChatClient, RawFoundryChatClient + + +class OutputStruct(BaseModel): + """A structured output for testing purposes.""" + + location: str + weather: str | None = None + + +@tool(approval_mode="never_require") +async def get_weather(location: Annotated[str, "The location as a city name"]) -> str: + """Get the current weather in a given location.""" + return f"The current weather in {location} is sunny." + + +skip_if_foundry_integration_tests_disabled = pytest.mark.skipif( + os.getenv("FOUNDRY_PROJECT_ENDPOINT", "") in ("", "https://test-project.services.ai.azure.com/") + or os.getenv("FOUNDRY_MODEL", "") == "", + reason="No real FOUNDRY_PROJECT_ENDPOINT or FOUNDRY_MODEL provided; skipping integration tests.", +) + +_TEST_FOUNDRY_PROJECT_ENDPOINT = "https://test-project.services.ai.azure.com/" +_TEST_FOUNDRY_MODEL = "test-gpt-4o" +_FOUNDRY_CHAT_ENV_VARS = ("FOUNDRY_PROJECT_ENDPOINT", "FOUNDRY_MODEL") + + +@pytest.fixture(autouse=True) +def clear_foundry_chat_settings_env(monkeypatch: pytest.MonkeyPatch, request: pytest.FixtureRequest) -> None: + """Prevent unit tests from inheriting Foundry chat settings from the shell.""" + + if request.node.get_closest_marker("integration") is not None: + return + + for env_var in _FOUNDRY_CHAT_ENV_VARS: + monkeypatch.delenv(env_var, raising=False) + + +def _with_foundry_debug() -> Any: + def decorator(func: Any) -> Any: + @wraps(func) + async def wrapper(*args: Any, **kwargs: Any) -> Any: + try: + return await func(*args, **kwargs) + except Exception as exc: + debug_message = ( + "Foundry debug: " + f"project_endpoint={os.getenv('FOUNDRY_PROJECT_ENDPOINT', '')}, " + f"model={os.getenv('FOUNDRY_MODEL', '')}" + ) + if hasattr(exc, "add_note"): + exc.add_note(debug_message) + elif exc.args: + exc.args = (f"{exc.args[0]}\n{debug_message}", *exc.args[1:]) + else: + exc.args = (debug_message,) + raise + + return wrapper + + return decorator + + +def _make_mock_openai_client() -> MagicMock: + client = MagicMock() + client.default_headers = {} + client.responses = MagicMock() + client.responses.create = AsyncMock() + client.responses.parse = AsyncMock() + client.files = MagicMock() + client.files.create = AsyncMock() + client.files.delete = AsyncMock() + client.vector_stores = MagicMock() + client.vector_stores.create = AsyncMock() + client.vector_stores.delete = AsyncMock() + client.vector_stores.files = MagicMock() + client.vector_stores.files.create_and_poll = AsyncMock() + return client + + +async def create_vector_store(client: FoundryChatClient) -> tuple[str, Content]: + """Create a vector store with sample documents for testing.""" + file = await client.client.files.create( + file=("todays_weather.txt", b"The weather today is sunny with a high of 75F."), + purpose="user_data", + ) + vector_store = await client.client.vector_stores.create( + name="knowledge_base", + expires_after={"anchor": "last_active_at", "days": 1}, + ) + result = await client.client.vector_stores.files.create_and_poll( + vector_store_id=vector_store.id, + file_id=file.id, + poll_interval_ms=1000, + ) + if result.last_error is not None: + raise RuntimeError(f"Vector store file processing failed with status: {result.last_error.message}") + + return file.id, Content.from_hosted_vector_store(vector_store_id=vector_store.id) + + +async def delete_vector_store(client: FoundryChatClient, file_id: str, vector_store_id: str) -> None: + """Delete the vector store after tests.""" + await client.client.vector_stores.delete(vector_store_id=vector_store_id) + await client.client.files.delete(file_id=file_id) + + +def test_init() -> None: + mock_openai_client = _make_mock_openai_client() + mock_project_client = MagicMock() + mock_project_client.get_openai_client.return_value = mock_openai_client + + client = FoundryChatClient(project_client=mock_project_client, model=_TEST_FOUNDRY_MODEL) + + assert client.model == _TEST_FOUNDRY_MODEL + assert isinstance(client, SupportsChatGetResponse) + assert client.project_client is mock_project_client + + +def test_init_with_default_header() -> None: + default_headers = {"X-Unit-Test": "test-guid"} + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + + client = FoundryChatClient( + project_client=project_client, + model=_TEST_FOUNDRY_MODEL, + default_headers=default_headers, + ) + + assert client.model == _TEST_FOUNDRY_MODEL + for key, value in default_headers.items(): + assert client.default_headers is not None + assert key in client.default_headers + assert client.default_headers[key] == value + + +def test_init_with_project_endpoint_creates_project_client() -> None: + credential = MagicMock() + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + + with patch("agent_framework_foundry._chat_client.AIProjectClient", return_value=project_client) as factory: + client = FoundryChatClient( + project_endpoint=_TEST_FOUNDRY_PROJECT_ENDPOINT, + model=_TEST_FOUNDRY_MODEL, + credential=credential, + allow_preview=True, + ) + + assert client.project_client is project_client + assert client.model == _TEST_FOUNDRY_MODEL + assert factory.call_args.kwargs["endpoint"] == _TEST_FOUNDRY_PROJECT_ENDPOINT + assert factory.call_args.kwargs["credential"] is credential + assert factory.call_args.kwargs["allow_preview"] is True + assert factory.call_args.kwargs["user_agent"] == AGENT_FRAMEWORK_USER_AGENT + + +def test_init_with_empty_model_raises(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("FOUNDRY_MODEL", raising=False) + mock_openai_client = _make_mock_openai_client() + mock_project_client = MagicMock() + mock_project_client.get_openai_client.return_value = mock_openai_client + + with pytest.raises(ValueError, match="Model is required"): + FoundryChatClient(project_client=mock_project_client) + + +def test_init_with_empty_project_source_raises(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("FOUNDRY_PROJECT_ENDPOINT", raising=False) + + with pytest.raises(ValueError, match="Either 'project_endpoint' or 'project_client' is required"): + FoundryChatClient(model=_TEST_FOUNDRY_MODEL) + + +def test_init_with_project_endpoint_requires_credential() -> None: + with pytest.raises(ValueError, match="Azure credential is required"): + FoundryChatClient( + project_endpoint=_TEST_FOUNDRY_PROJECT_ENDPOINT, + model=_TEST_FOUNDRY_MODEL, + ) + + +async def test_configure_azure_monitor() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + project_client.telemetry.get_application_insights_connection_string = AsyncMock( + return_value="InstrumentationKey=test-key;IngestionEndpoint=https://test.endpoint" + ) + client = FoundryChatClient(project_client=project_client, model=_TEST_FOUNDRY_MODEL) + + mock_configure = MagicMock() + mock_views = MagicMock(return_value=[]) + mock_resource = MagicMock() + mock_enable = MagicMock() + + with ( + patch.dict( + "sys.modules", + {"azure.monitor.opentelemetry": MagicMock(configure_azure_monitor=mock_configure)}, + ), + patch("agent_framework.observability.create_metric_views", mock_views), + patch("agent_framework.observability.create_resource", return_value=mock_resource), + patch("agent_framework.observability.enable_instrumentation", mock_enable), + ): + await client.configure_azure_monitor(enable_sensitive_data=True) + + project_client.telemetry.get_application_insights_connection_string.assert_called_once() + mock_configure.assert_called_once() + call_kwargs = mock_configure.call_args.kwargs + assert call_kwargs["connection_string"] == "InstrumentationKey=test-key;IngestionEndpoint=https://test.endpoint" + assert call_kwargs["views"] == [] + assert call_kwargs["resource"] is mock_resource + mock_enable.assert_called_once_with(enable_sensitive_data=True) + + +async def test_configure_azure_monitor_resource_not_found() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + project_client.telemetry.get_application_insights_connection_string = AsyncMock( + side_effect=ResourceNotFoundError("No Application Insights found") + ) + client = FoundryChatClient(project_client=project_client, model=_TEST_FOUNDRY_MODEL) + + await client.configure_azure_monitor() + + project_client.telemetry.get_application_insights_connection_string.assert_called_once() + + +async def test_configure_azure_monitor_import_error() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + project_client.telemetry.get_application_insights_connection_string = AsyncMock( + return_value="InstrumentationKey=test-key" + ) + client = FoundryChatClient(project_client=project_client, model=_TEST_FOUNDRY_MODEL) + original_import = __import__ + + def _import_with_missing_azure_monitor( + name: str, + globals: dict[str, Any] | None = None, + locals: dict[str, Any] | None = None, + fromlist: tuple[str, ...] = (), + level: int = 0, + ) -> Any: + if name == "azure.monitor.opentelemetry": + raise ImportError("No module named 'azure.monitor.opentelemetry'") + return original_import(name, globals, locals, fromlist, level) + + with ( + patch.dict(sys.modules, {"azure.monitor.opentelemetry": None}), + patch("builtins.__import__", side_effect=_import_with_missing_azure_monitor), + pytest.raises(ImportError, match="azure-monitor-opentelemetry is required"), + ): + await client.configure_azure_monitor() + + +async def test_configure_azure_monitor_with_custom_resource() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + project_client.telemetry.get_application_insights_connection_string = AsyncMock( + return_value="InstrumentationKey=test-key" + ) + client = FoundryChatClient(project_client=project_client, model=_TEST_FOUNDRY_MODEL) + + custom_resource = MagicMock() + mock_configure = MagicMock() + + with ( + patch.dict( + "sys.modules", + {"azure.monitor.opentelemetry": MagicMock(configure_azure_monitor=mock_configure)}, + ), + patch("agent_framework.observability.create_metric_views", return_value=[]), + patch("agent_framework.observability.create_resource") as mock_create_resource, + patch("agent_framework.observability.enable_instrumentation"), + ): + await client.configure_azure_monitor(resource=custom_resource) + + mock_create_resource.assert_not_called() + call_kwargs = mock_configure.call_args.kwargs + assert call_kwargs["resource"] is custom_resource + + +async def test_get_response_with_invalid_input() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + client = FoundryChatClient(project_client=project_client, model="test-model") + + with pytest.raises(ChatClientInvalidRequestException, match="Messages are required"): + await client.get_response(messages=[]) + + +async def test_web_search_tool_with_location() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + client = FoundryChatClient(project_client=project_client, model="test-model") + + web_search_tool = FoundryChatClient.get_web_search_tool( + user_location={ + "city": "Seattle", + "country": "US", + "region": "WA", + "timezone": "America/Los_Angeles", + } + ) + + assert web_search_tool.user_location.city == "Seattle" + assert web_search_tool.user_location.country == "US" + _, run_options, _ = await client._prepare_request( + messages=[Message(role="user", text="What's the weather?")], + options={"tools": [web_search_tool], "tool_choice": "auto"}, + ) + + assert run_options["tools"] == [web_search_tool] + assert run_options["tool_choice"] == "auto" + + +async def test_code_interpreter_tool_variations() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + client = FoundryChatClient(project_client=project_client, model="test-model") + + code_tool = FoundryChatClient.get_code_interpreter_tool() + assert code_tool.container["type"] == "auto" + + _, run_options, _ = await client._prepare_request( + messages=[Message("user", ["Run some code"])], + options={"tools": [code_tool]}, + ) + + assert run_options["tools"] == [code_tool] + + code_tool_with_files = FoundryChatClient.get_code_interpreter_tool(file_ids=["file1", "file2"]) + assert code_tool_with_files.container.file_ids == ["file1", "file2"] + + _, run_options, _ = await client._prepare_request( + messages=[Message(role="user", text="Process these files")], + options={"tools": [code_tool_with_files]}, + ) + + assert run_options["tools"] == [code_tool_with_files] + + +async def test_hosted_file_search_tool_validation() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + client = FoundryChatClient(project_client=project_client, model="test-model") + + with pytest.raises(ValueError, match="vector_store_ids"): + FoundryChatClient.get_file_search_tool(vector_store_ids=[]) + + file_search_tool = FoundryChatClient.get_file_search_tool(vector_store_ids=["vs_123"]) + assert file_search_tool.vector_store_ids == ["vs_123"] + + _, run_options, _ = await client._prepare_request( + messages=[Message("user", ["Test"])], + options={"tools": [file_search_tool]}, + ) + + assert run_options["tools"] == [file_search_tool] + + +async def test_chat_message_parsing_with_function_calls() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + client = FoundryChatClient(project_client=project_client, model="test-model") + + function_call = Content.from_function_call( + call_id="test-call-id", + name="test_function", + arguments='{"param": "value"}', + additional_properties={"fc_id": "test-fc-id"}, + ) + function_result = Content.from_function_result(call_id="test-call-id", result="Function executed successfully") + messages = [ + Message(role="user", text="Call a function"), + Message(role="assistant", contents=[function_call]), + Message(role="tool", contents=[function_result]), + ] + + prepared_messages = client._prepare_messages_for_openai(messages) + + assert prepared_messages == [ + { + "type": "message", + "role": "user", + "content": [{"type": "input_text", "text": "Call a function"}], + }, + { + "call_id": "test-call-id", + "id": "fc_test-fc-id", + "type": "function_call", + "name": "test_function", + "arguments": '{"param": "value"}', + }, + { + "call_id": "test-call-id", + "type": "function_call_output", + "output": "Function executed successfully", + }, + ] + + +async def test_content_filter_exception() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + client = FoundryChatClient(project_client=project_client, model="test-model") + + mock_error = BadRequestError( + message="Content filter error", + response=MagicMock(), + body={"error": {"code": "content_filter", "message": "Content filter error"}}, + ) + mock_error.code = "content_filter" + client.client.responses.create.side_effect = mock_error + + with pytest.raises(OpenAIContentFilterException) as exc_info: + await client.get_response(messages=[Message(role="user", text="Test message")]) + + assert "content error" in str(exc_info.value) + + +async def test_response_format_parse_path() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + client = FoundryChatClient(project_client=project_client, model="test-model") + + mock_parsed_response = MagicMock() + mock_parsed_response.id = "parsed_response_123" + mock_parsed_response.text = "Parsed response" + mock_parsed_response.model = "test-model" + mock_parsed_response.created_at = 1000000000 + mock_parsed_response.metadata = {} + mock_parsed_response.output_parsed = None + mock_parsed_response.usage = None + mock_parsed_response.finish_reason = None + mock_parsed_response.conversation = None + client.client.responses.parse = AsyncMock(return_value=mock_parsed_response) + + response = await client.get_response( + messages=[Message(role="user", text="Test message")], + options={"response_format": OutputStruct, "store": True}, + ) + assert response.response_id == "parsed_response_123" + assert response.conversation_id == "parsed_response_123" + assert response.model == "test-model" + + +async def test_response_format_parse_path_with_conversation_id() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + client = FoundryChatClient(project_client=project_client, model="test-model") + + mock_parsed_response = MagicMock() + mock_parsed_response.id = "parsed_response_123" + mock_parsed_response.text = "Parsed response" + mock_parsed_response.model = "test-model" + mock_parsed_response.created_at = 1000000000 + mock_parsed_response.metadata = {} + mock_parsed_response.output_parsed = None + mock_parsed_response.usage = None + mock_parsed_response.finish_reason = None + mock_parsed_response.conversation = MagicMock() + mock_parsed_response.conversation.id = "conversation_456" + client.client.responses.parse = AsyncMock(return_value=mock_parsed_response) + + response = await client.get_response( + messages=[Message(role="user", text="Test message")], + options={"response_format": OutputStruct, "store": True}, + ) + assert response.response_id == "parsed_response_123" + assert response.conversation_id == "conversation_456" + assert response.model == "test-model" + + +async def test_bad_request_error_non_content_filter() -> None: + mock_openai_client = _make_mock_openai_client() + project_client = MagicMock() + project_client.get_openai_client.return_value = mock_openai_client + client = FoundryChatClient(project_client=project_client, model="test-model") + + mock_error = BadRequestError( + message="Invalid request", + response=MagicMock(), + body={"error": {"code": "invalid_request", "message": "Invalid request"}}, + ) + mock_error.code = "invalid_request" + client.client.responses.parse = AsyncMock(side_effect=mock_error) + + with pytest.raises(ChatClientException) as exc_info: + await client.get_response( + messages=[Message(role="user", text="Test message")], + options={"response_format": OutputStruct}, + ) + + assert "failed to complete the prompt" in str(exc_info.value) + + +def test_get_mcp_tool_with_project_connection_id() -> None: + tool_config = FoundryChatClient.get_mcp_tool( + name="Docs MCP", + project_connection_id="conn-123", + allowed_tools=["search_docs"], + ) + + assert tool_config["project_connection_id"] == "conn-123" + assert tool_config["allowed_tools"] == ["search_docs"] + assert tool_config["server_label"] == "Docs_MCP" + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_foundry_integration_tests_disabled +@pytest.mark.parametrize( + "option_name,option_value,needs_validation", + [ + param("max_tokens", 500, False, id="max_tokens"), + param("seed", 123, False, id="seed"), + param("user", "test-user-id", False, id="user"), + param("metadata", {"test_key": "test_value"}, False, id="metadata"), + param("tool_choice", "none", True, id="tool_choice_none"), + param("tools", [get_weather], True, id="tools_function"), + param("tool_choice", "auto", True, id="tool_choice_auto"), + param("response_format", OutputStruct, True, id="response_format_pydantic"), + param( + "response_format", + { + "type": "json_schema", + "json_schema": { + "name": "WeatherDigest", + "strict": True, + "schema": { + "title": "WeatherDigest", + "type": "object", + "properties": { + "location": {"type": "string"}, + "conditions": {"type": "string"}, + }, + "required": ["location", "conditions"], + "additionalProperties": False, + }, + }, + }, + True, + id="response_format_runtime_json_schema", + ), + ], +) +@_with_foundry_debug() +async def test_integration_options( + option_name: str, + option_value: Any, + needs_validation: bool, +) -> None: + client = FoundryChatClient(credential=AzureCliCredential()) + client.function_invocation_configuration["max_iterations"] = 2 + + for streaming in [False, True]: + if option_name.startswith("tools") or option_name.startswith("tool_choice"): + messages = [Message(role="user", text="What is the weather in Seattle?")] + elif option_name.startswith("response_format"): + messages = [Message(role="user", text="The weather in Seattle is sunny")] + messages.append(Message(role="user", text="What is the weather in Seattle?")) + else: + messages = [Message(role="user", text="Say 'Hello World' briefly.")] + + options: dict[str, Any] = {option_name: option_value} + if option_name.startswith("tool_choice"): + options["tools"] = [get_weather] + + if streaming: + response = await client.get_response(messages=messages, options=options, stream=True).get_final_response() + else: + response = await client.get_response(messages=messages, options=options) + + assert isinstance(response, ChatResponse) + assert response.text is not None + assert len(response.text) > 0 + + if needs_validation: + if option_name.startswith("tools") or option_name.startswith("tool_choice"): + text = response.text.lower() + assert "sunny" in text or "seattle" in text + elif option_name.startswith("response_format"): + if option_value == OutputStruct: + assert response.value is not None + assert isinstance(response.value, OutputStruct) + assert "seattle" in response.value.location.lower() + else: + assert response.value is None + response_value = json.loads(response.text) + assert isinstance(response_value, dict) + assert "location" in response_value + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_foundry_integration_tests_disabled +@_with_foundry_debug() +async def test_integration_web_search() -> None: + client = FoundryChatClient(credential=AzureCliCredential()) + + for streaming in [False, True]: + web_search_tool = FoundryChatClient.get_web_search_tool() + content = { + "messages": [ + Message( + role="user", + text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", + ) + ], + "options": {"tool_choice": "auto", "tools": [web_search_tool]}, + } + if streaming: + response = await client.get_response(stream=True, **content).get_final_response() + else: + response = await client.get_response(**content) + + assert isinstance(response, ChatResponse) + assert "Rumi" in response.text + assert "Mira" in response.text + assert "Zoey" in response.text + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_foundry_integration_tests_disabled +@_with_foundry_debug() +async def test_integration_tool_rich_content_image() -> None: + image_path = Path(__file__).parent.parent / "assets" / "sample_image.jpg" + image_bytes = image_path.read_bytes() + + @tool(approval_mode="never_require") + def get_test_image() -> Content: + return Content.from_data(data=image_bytes, media_type="image/jpeg") + + client = FoundryChatClient(credential=AzureCliCredential()) + client.function_invocation_configuration["max_iterations"] = 2 + + for streaming in [False, True]: + messages = [Message(role="user", text="Call the get_test_image tool and describe what you see.")] + options: dict[str, Any] = {"tools": [get_test_image], "tool_choice": "auto"} + + if streaming: + response = await client.get_response(messages=messages, options=options, stream=True).get_final_response() + else: + response = await client.get_response(messages=messages, options=options) + + assert isinstance(response, ChatResponse) + assert response.text is not None + assert len(response.text) > 0 + assert "house" in response.text.lower(), f"Model did not describe the house image. Response: {response.text}" + + +def test_get_code_interpreter_tool() -> None: + """Test code interpreter tool creation.""" + + tool_obj = RawFoundryChatClient.get_code_interpreter_tool() + assert tool_obj is not None + + +def test_get_code_interpreter_tool_with_file_ids() -> None: + """Test code interpreter tool with file IDs.""" + + tool_obj = RawFoundryChatClient.get_code_interpreter_tool(file_ids=["file-abc123"]) + assert tool_obj is not None + + +def test_get_file_search_tool() -> None: + """Test file search tool creation.""" + + tool_obj = RawFoundryChatClient.get_file_search_tool(vector_store_ids=["vs_abc123"]) + assert tool_obj is not None + + +def test_get_file_search_tool_requires_vector_store_ids() -> None: + """Test that empty vector_store_ids raises ValueError.""" + + with pytest.raises(ValueError, match="vector_store_ids"): + RawFoundryChatClient.get_file_search_tool(vector_store_ids=[]) + + +def test_get_web_search_tool() -> None: + """Test web search tool creation.""" + + tool_obj = RawFoundryChatClient.get_web_search_tool() + assert tool_obj is not None + + +def test_get_web_search_tool_with_location() -> None: + """Test web search tool with user location.""" + + tool_obj = RawFoundryChatClient.get_web_search_tool( + user_location={"city": "Seattle", "country": "US"}, + search_context_size="high", + ) + assert tool_obj is not None + + +def test_get_image_generation_tool() -> None: + """Test image generation tool creation.""" + + tool_obj = RawFoundryChatClient.get_image_generation_tool() + assert tool_obj is not None + + +def test_get_mcp_tool() -> None: + """Test MCP tool creation.""" + + tool_obj = RawFoundryChatClient.get_mcp_tool( + name="my_mcp", + url="https://mcp.example.com", + ) + assert tool_obj is not None + + +def test_get_mcp_tool_with_connection_id() -> None: + """Test MCP tool with project connection ID.""" + + tool_obj = RawFoundryChatClient.get_mcp_tool( + name="github_mcp", + project_connection_id="conn_abc123", + description="GitHub MCP via Foundry", + ) + assert tool_obj is not None diff --git a/python/packages/foundry/tests/foundry/test_foundry_memory_provider.py b/python/packages/foundry/tests/foundry/test_foundry_memory_provider.py new file mode 100644 index 0000000000..005eed29ce --- /dev/null +++ b/python/packages/foundry/tests/foundry/test_foundry_memory_provider.py @@ -0,0 +1,501 @@ +# Copyright (c) Microsoft. All rights reserved. +# pyright: reportPrivateUsage=false + +from __future__ import annotations + +import os +from unittest.mock import AsyncMock, Mock, patch + +import pytest +from agent_framework import AGENT_FRAMEWORK_USER_AGENT, AgentResponse, Message +from agent_framework._sessions import AgentSession, SessionContext + +from agent_framework_foundry._memory_provider import FoundryMemoryProvider + + +@pytest.fixture +def mock_project_client() -> AsyncMock: + """Create a mock AIProjectClient.""" + mock_client = AsyncMock() + mock_client.beta = AsyncMock() + mock_client.beta.memory_stores = AsyncMock() + mock_client.beta.memory_stores.search_memories = AsyncMock() + mock_client.beta.memory_stores.begin_update_memories = AsyncMock() + mock_client.__aenter__ = AsyncMock(return_value=mock_client) + mock_client.__aexit__ = AsyncMock() + return mock_client + + +@pytest.fixture +def mock_credential() -> Mock: + """Create a mock Azure credential.""" + return Mock() + + +# -- Initialization tests ------------------------------------------------------ + + +def test_init_with_all_params(mock_project_client: AsyncMock) -> None: + provider = FoundryMemoryProvider( + source_id="custom_source", + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + context_prompt="Custom prompt", + update_delay=60, + ) + assert provider.source_id == "custom_source" + assert provider.project_client is mock_project_client + assert provider.memory_store_name == "test_store" + assert provider.scope == "user_123" + assert provider.context_prompt == "Custom prompt" + assert provider.update_delay == 60 + + +def test_init_default_source_id(mock_project_client: AsyncMock) -> None: + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + assert provider.source_id == FoundryMemoryProvider.DEFAULT_SOURCE_ID + + +def test_init_default_context_prompt(mock_project_client: AsyncMock) -> None: + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + assert provider.context_prompt == FoundryMemoryProvider.DEFAULT_CONTEXT_PROMPT + + +def test_init_default_update_delay(mock_project_client: AsyncMock) -> None: + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + assert provider.update_delay == 300 + + +def test_init_with_project_endpoint_and_credential(mock_project_client: AsyncMock, mock_credential: Mock) -> None: + with patch("agent_framework_foundry._memory_provider.AIProjectClient") as mock_ai_project_client: + mock_ai_project_client.return_value = mock_project_client + provider = FoundryMemoryProvider( + project_endpoint="https://test.project.endpoint", + credential=mock_credential, # type: ignore[arg-type] + allow_preview=True, + memory_store_name="test_store", + scope="user_123", + ) + assert provider.project_client is mock_project_client + mock_ai_project_client.assert_called_once_with( + endpoint="https://test.project.endpoint", + credential=mock_credential, + allow_preview=True, + user_agent=AGENT_FRAMEWORK_USER_AGENT, + ) + + +def test_init_requires_project_endpoint_without_project_client() -> None: + with ( + patch("agent_framework_foundry._memory_provider.load_settings") as mock_load_settings, + patch.dict(os.environ, {}, clear=True), + pytest.raises(ValueError, match="project endpoint is required"), + ): + mock_load_settings.return_value = {"project_endpoint": None} + FoundryMemoryProvider( + memory_store_name="test_store", + scope="user_123", + ) + + +def test_init_requires_credential_without_project_client() -> None: + with pytest.raises(ValueError, match="Azure credential is required"): + FoundryMemoryProvider( + project_endpoint="https://test.project.endpoint", + memory_store_name="test_store", + scope="user_123", + ) + + +def test_init_requires_memory_store_name(mock_project_client: AsyncMock) -> None: + with pytest.raises(ValueError, match="memory_store_name is required"): + FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="", + scope="user_123", + ) + + +def test_init_requires_scope(mock_project_client: AsyncMock) -> None: + with pytest.raises(ValueError, match="scope is required"): + FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="", + ) + + +# -- before_run tests ---------------------------------------------------------- + + +async def test_retrieves_static_memories_on_first_run(mock_project_client: AsyncMock) -> None: + mem1 = Mock() + mem1.memory_item.content = "User prefers Python" + mem2 = Mock() + mem2.memory_item.content = "User is based in Seattle" + mock_search_result = Mock() + mock_search_result.memories = [mem1, mem2] + mock_project_client.beta.memory_stores.search_memories.return_value = mock_search_result + + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + session = AgentSession(session_id="test-session") + ctx = SessionContext(input_messages=[Message(role="user", text="Hello")], session_id="s1") + + await provider.before_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) + ) + + # Should call search_memories twice: once for static, once for contextual + assert mock_project_client.beta.memory_stores.search_memories.call_count == 2 + # Static memories should be cached + assert len(session.state[provider.source_id]["static_memories"]) == 2 + assert session.state[provider.source_id]["initialized"] is True + + +async def test_contextual_memories_added_to_context(mock_project_client: AsyncMock) -> None: + # Mock static search (first call) + static_mem = Mock() + static_mem.memory_item.content = "User prefers Python" + static_result = Mock() + static_result.memories = [static_mem] + + # Mock contextual search (second call) + contextual_mem = Mock() + contextual_mem.memory_item.content = "Last discussed async patterns" + contextual_result = Mock() + contextual_result.memories = [contextual_mem] + contextual_result.search_id = "search-123" + + mock_project_client.beta.memory_stores.search_memories.side_effect = [static_result, contextual_result] + + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + session = AgentSession(session_id="test-session") + ctx = SessionContext(input_messages=[Message(role="user", text="Hello")], session_id="s1") + + await provider.before_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) + ) + + # Check that memories were added to context + assert provider.source_id in ctx.context_messages + added = ctx.context_messages[provider.source_id] + assert len(added) == 1 + assert "User prefers Python" in added[0].text # type: ignore[operator] + assert "Last discussed async patterns" in added[0].text # type: ignore[operator] + assert provider.context_prompt in added[0].text # type: ignore[operator] + assert session.state[provider.source_id]["previous_search_id"] == "search-123" + + +async def test_empty_input_skips_contextual_search(mock_project_client: AsyncMock) -> None: + static_result = Mock() + static_result.memories = [] + mock_project_client.beta.memory_stores.search_memories.return_value = static_result + + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + session = AgentSession(session_id="test-session") + ctx = SessionContext(input_messages=[Message(role="user", text="")], session_id="s1") + + await provider.before_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) + ) + + # Should only call search_memories once for static memories + assert mock_project_client.beta.memory_stores.search_memories.call_count == 1 + assert provider.source_id not in ctx.context_messages + + +async def test_empty_search_results_no_messages(mock_project_client: AsyncMock) -> None: + mock_search_result = Mock() + mock_search_result.memories = [] + mock_project_client.beta.memory_stores.search_memories.return_value = mock_search_result + + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + session = AgentSession(session_id="test-session") + ctx = SessionContext(input_messages=[Message(role="user", text="test")], session_id="s1") + + await provider.before_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) + ) + + assert provider.source_id not in ctx.context_messages + + +async def test_static_memories_only_retrieved_once(mock_project_client: AsyncMock) -> None: + static_mem = Mock() + static_mem.memory_item.content = "Static memory" + static_result = Mock() + static_result.memories = [static_mem] + contextual_result = Mock() + contextual_result.memories = [] + + mock_project_client.beta.memory_stores.search_memories.side_effect = [static_result, contextual_result] + + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + session = AgentSession(session_id="test-session") + ctx = SessionContext(input_messages=[Message(role="user", text="Hello")], session_id="s1") + + # First call + await provider.before_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) + ) + assert mock_project_client.beta.memory_stores.search_memories.call_count == 2 + + # Reset mock for second call + mock_project_client.beta.memory_stores.search_memories.reset_mock() + contextual_result2 = Mock() + contextual_result2.memories = [] + mock_project_client.beta.memory_stores.search_memories.return_value = contextual_result2 + + # Second call - should only search contextual, not static + ctx2 = SessionContext(input_messages=[Message(role="user", text="World")], session_id="s1") + await provider.before_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx2, state=session.state.setdefault(provider.source_id, {}) + ) + assert mock_project_client.beta.memory_stores.search_memories.call_count == 1 + + +async def test_handles_search_exception_gracefully(mock_project_client: AsyncMock) -> None: + mock_project_client.beta.memory_stores.search_memories.side_effect = Exception("API error") + + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + session = AgentSession(session_id="test-session") + ctx = SessionContext(input_messages=[Message(role="user", text="Hello")], session_id="s1") + + # Should not raise exception + await provider.before_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) + ) + + # No memories added + assert provider.source_id not in ctx.context_messages + + +# -- after_run tests ----------------------------------------------------------- + + +async def test_stores_input_and_response(mock_project_client: AsyncMock) -> None: + mock_poller = Mock() + mock_poller.update_id = "update-456" + mock_project_client.beta.memory_stores.begin_update_memories.return_value = mock_poller + + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + session = AgentSession(session_id="test-session") + ctx = SessionContext(input_messages=[Message(role="user", text="question")], session_id="s1") + ctx._response = AgentResponse(messages=[Message(role="assistant", text="answer")]) + + await provider.after_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) + ) + + mock_project_client.beta.memory_stores.begin_update_memories.assert_awaited_once() + call_kwargs = mock_project_client.beta.memory_stores.begin_update_memories.call_args.kwargs + assert call_kwargs["name"] == "test_store" + assert call_kwargs["scope"] == "user_123" + assert len(call_kwargs["items"]) == 2 + assert call_kwargs["items"][0]["content"] == "question" + assert call_kwargs["items"][1]["content"] == "answer" + assert session.state[provider.source_id]["previous_update_id"] == "update-456" + + +async def test_only_stores_user_assistant_system(mock_project_client: AsyncMock) -> None: + mock_poller = Mock() + mock_project_client.beta.memory_stores.begin_update_memories.return_value = mock_poller + + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + session = AgentSession(session_id="test-session") + ctx = SessionContext( + input_messages=[ + Message(role="user", text="hello"), + Message(role="tool", text="tool output"), + ], + session_id="s1", + ) + ctx._response = AgentResponse(messages=[Message(role="assistant", text="reply")]) + + await provider.after_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) + ) + + call_kwargs = mock_project_client.beta.memory_stores.begin_update_memories.call_args.kwargs + items = call_kwargs["items"] + assert len(items) == 2 + assert items[0]["content"] == "hello" + assert items[1]["content"] == "reply" + + +async def test_skips_empty_messages(mock_project_client: AsyncMock) -> None: + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + session = AgentSession(session_id="test-session") + ctx = SessionContext( + input_messages=[ + Message(role="user", text=""), + Message(role="user", text=" "), + ], + session_id="s1", + ) + ctx._response = AgentResponse(messages=[]) + + await provider.after_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) + ) + + mock_project_client.beta.memory_stores.begin_update_memories.assert_not_awaited() + + +async def test_uses_configured_update_delay(mock_project_client: AsyncMock) -> None: + mock_poller = Mock() + mock_project_client.beta.memory_stores.begin_update_memories.return_value = mock_poller + + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + update_delay=60, + ) + session = AgentSession(session_id="test-session") + ctx = SessionContext(input_messages=[Message(role="user", text="hi")], session_id="s1") + ctx._response = AgentResponse(messages=[Message(role="assistant", text="hey")]) + + await provider.after_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) + ) + + call_kwargs = mock_project_client.beta.memory_stores.begin_update_memories.call_args.kwargs + assert call_kwargs["update_delay"] == 60 + + +async def test_uses_previous_update_id_for_incremental_updates(mock_project_client: AsyncMock) -> None: + mock_poller1 = Mock() + mock_poller1.update_id = "update-1" + mock_poller2 = Mock() + mock_poller2.update_id = "update-2" + + mock_project_client.beta.memory_stores.begin_update_memories.side_effect = [mock_poller1, mock_poller2] + + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + session = AgentSession(session_id="test-session") + ctx1 = SessionContext(input_messages=[Message(role="user", text="first")], session_id="s1") + ctx1._response = AgentResponse(messages=[Message(role="assistant", text="response1")]) + + # First update + await provider.after_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx1, state=session.state.setdefault(provider.source_id, {}) + ) + assert session.state[provider.source_id]["previous_update_id"] == "update-1" + + # Second update should use previous_update_id + ctx2 = SessionContext(input_messages=[Message(role="user", text="second")], session_id="s1") + ctx2._response = AgentResponse(messages=[Message(role="assistant", text="response2")]) + + await provider.after_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx2, state=session.state.setdefault(provider.source_id, {}) + ) + + call_kwargs = mock_project_client.beta.memory_stores.begin_update_memories.call_args.kwargs + assert call_kwargs["previous_update_id"] == "update-1" + assert session.state[provider.source_id]["previous_update_id"] == "update-2" + + +async def test_handles_update_exception_gracefully(mock_project_client: AsyncMock) -> None: + mock_project_client.beta.memory_stores.begin_update_memories.side_effect = Exception("API error") + + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + session = AgentSession(session_id="test-session") + ctx = SessionContext(input_messages=[Message(role="user", text="hi")], session_id="s1") + ctx._response = AgentResponse(messages=[Message(role="assistant", text="hey")]) + + # Should not raise exception + await provider.after_run( # type: ignore[arg-type] + agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) + ) + + +# -- Context manager tests ----------------------------------------------------- + + +async def test_aenter_delegates_to_client(mock_project_client: AsyncMock) -> None: + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + result = await provider.__aenter__() + assert result is provider + mock_project_client.__aenter__.assert_awaited_once() + + +async def test_aexit_delegates_to_client(mock_project_client: AsyncMock) -> None: + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + await provider.__aexit__(None, None, None) + mock_project_client.__aexit__.assert_awaited_once() + + +async def test_async_with_syntax(mock_project_client: AsyncMock) -> None: + provider = FoundryMemoryProvider( + project_client=mock_project_client, + memory_store_name="test_store", + scope="user_123", + ) + async with provider as p: + assert p is provider diff --git a/python/packages/foundry/tests/test_foundry_agent.py b/python/packages/foundry/tests/test_foundry_agent.py deleted file mode 100644 index 549d922ff9..0000000000 --- a/python/packages/foundry/tests/test_foundry_agent.py +++ /dev/null @@ -1,374 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Tests for FoundryAgentClient and FoundryAgent classes.""" - -from typing import Any -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest -from agent_framework._tools import tool - - -class TestRawFoundryAgentChatClient: - """Tests for RawFoundryAgentChatClient.""" - - def test_init_requires_agent_name(self) -> None: - """Test that agent_name is required.""" - from agent_framework_foundry._foundry_agent_client import RawFoundryAgentChatClient - - with pytest.raises(ValueError, match="Agent name is required"): - RawFoundryAgentChatClient( - project_client=MagicMock(), - ) - - def test_init_with_agent_name(self) -> None: - """Test construction with agent_name and project_client.""" - from agent_framework_foundry._foundry_agent_client import RawFoundryAgentChatClient - - mock_project = MagicMock() - mock_project.get_openai_client.return_value = MagicMock() - - client = RawFoundryAgentChatClient( - project_client=mock_project, - agent_name="test-agent", - agent_version="1.0", - ) - - assert client.agent_name == "test-agent" - assert client.agent_version == "1.0" - - def test_get_agent_reference_with_version(self) -> None: - """Test agent reference includes version when provided.""" - from agent_framework_foundry._foundry_agent_client import RawFoundryAgentChatClient - - mock_project = MagicMock() - mock_project.get_openai_client.return_value = MagicMock() - - client = RawFoundryAgentChatClient( - project_client=mock_project, - agent_name="my-agent", - agent_version="2.0", - ) - - ref = client._get_agent_reference() - assert ref == {"name": "my-agent", "version": "2.0", "type": "agent_reference"} - - def test_get_agent_reference_without_version(self) -> None: - """Test agent reference omits version for HostedAgents.""" - from agent_framework_foundry._foundry_agent_client import RawFoundryAgentChatClient - - mock_project = MagicMock() - mock_project.get_openai_client.return_value = MagicMock() - - client = RawFoundryAgentChatClient( - project_client=mock_project, - agent_name="hosted-agent", - ) - - ref = client._get_agent_reference() - assert ref == {"name": "hosted-agent", "type": "agent_reference"} - assert "version" not in ref - - def test_as_agent_returns_foundry_agent_and_preserves_client_type(self) -> None: - """Test that as_agent() wraps the client in FoundryAgent using the same client class.""" - from agent_framework_foundry._foundry_agent import FoundryAgent - from agent_framework_foundry._foundry_agent_client import RawFoundryAgentChatClient - - class CustomClient(RawFoundryAgentChatClient): - pass - - mock_project = MagicMock() - mock_project.get_openai_client.return_value = MagicMock() - - client = CustomClient( - project_client=mock_project, - agent_name="test-agent", - agent_version="1.0", - ) - - agent = client.as_agent(instructions="You are helpful.") - - assert isinstance(agent, FoundryAgent) - assert agent.name == "test-agent" - assert isinstance(agent.client, CustomClient) - assert agent.client.project_client is mock_project - assert agent.client.agent_name == "test-agent" - assert agent.client.agent_version == "1.0" - - named_agent = client.as_agent(name="display-name", instructions="You are helpful.") - assert named_agent.name == "display-name" - assert named_agent.client.agent_name == "test-agent" - - async def test_prepare_options_validates_tools(self) -> None: - """Test that _prepare_options rejects non-FunctionTool objects.""" - from agent_framework import Message - - from agent_framework_foundry._foundry_agent_client import RawFoundryAgentChatClient - - mock_project = MagicMock() - mock_project.get_openai_client.return_value = MagicMock() - - client = RawFoundryAgentChatClient( - project_client=mock_project, - agent_name="test-agent", - ) - - # A dict tool should be rejected - with pytest.raises(TypeError, match="Only FunctionTool objects are accepted"): - await client._prepare_options( - messages=[Message(role="user", contents="hi")], - options={"tools": [{"type": "function", "function": {"name": "bad"}}]}, - ) - - async def test_prepare_options_accepts_function_tools(self) -> None: - """Test that _prepare_options accepts FunctionTool objects.""" - from agent_framework import Message - - from agent_framework_foundry._foundry_agent_client import RawFoundryAgentChatClient - - mock_project = MagicMock() - mock_openai = MagicMock() - mock_project.get_openai_client.return_value = mock_openai - - client = RawFoundryAgentChatClient( - project_client=mock_project, - agent_name="test-agent", - ) - - @tool(approval_mode="never_require") - def my_func() -> str: - """A test function.""" - return "ok" - - # Should not raise — patch the parent's _prepare_options - with patch( - "agent_framework_openai._chat_client.RawOpenAIChatClient._prepare_options", - new_callable=AsyncMock, - return_value={}, - ): - result = await client._prepare_options( - messages=[Message(role="user", contents="hi")], - options={"tools": [my_func]}, - ) - assert "extra_body" in result - assert result["extra_body"]["agent_reference"]["name"] == "test-agent" - - def test_check_model_presence_is_noop(self) -> None: - """Test that _check_model_presence does nothing (model is on service).""" - from agent_framework_foundry._foundry_agent_client import RawFoundryAgentChatClient - - mock_project = MagicMock() - mock_project.get_openai_client.return_value = MagicMock() - - client = RawFoundryAgentChatClient( - project_client=mock_project, - agent_name="test-agent", - ) - - options: dict[str, Any] = {} - client._check_model_presence(options) - assert "model" not in options - - -class TestFoundryAgentChatClient: - """Tests for _FoundryAgentChatClient (full middleware).""" - - def test_init(self) -> None: - """Test construction of the full-middleware client.""" - from agent_framework_foundry._foundry_agent_client import _FoundryAgentChatClient - - mock_project = MagicMock() - mock_project.get_openai_client.return_value = MagicMock() - - client = _FoundryAgentChatClient( - project_client=mock_project, - agent_name="test-agent", - agent_version="1.0", - ) - - assert client.agent_name == "test-agent" - - -class TestRawFoundryAgent: - """Tests for RawFoundryAgent.""" - - def test_init_creates_client(self) -> None: - """Test that RawFoundryAgent creates a client internally.""" - from agent_framework_foundry._foundry_agent import RawFoundryAgent - - mock_project = MagicMock() - mock_project.get_openai_client.return_value = MagicMock() - - agent = RawFoundryAgent( - project_client=mock_project, - agent_name="test-agent", - agent_version="1.0", - ) - - assert agent.client is not None - assert agent.client.agent_name == "test-agent" - - def test_init_with_custom_client_type(self) -> None: - """Test that client_type parameter is respected.""" - from agent_framework_foundry._foundry_agent import RawFoundryAgent - from agent_framework_foundry._foundry_agent_client import RawFoundryAgentChatClient - - mock_project = MagicMock() - mock_project.get_openai_client.return_value = MagicMock() - - agent = RawFoundryAgent( - project_client=mock_project, - agent_name="test-agent", - client_type=RawFoundryAgentChatClient, - ) - - assert isinstance(agent.client, RawFoundryAgentChatClient) - - def test_init_rejects_invalid_client_type(self) -> None: - """Test that invalid client_type raises TypeError.""" - from agent_framework_foundry._foundry_agent import RawFoundryAgent - - with pytest.raises(TypeError, match="must be a subclass of RawFoundryAgentChatClient"): - RawFoundryAgent( - project_client=MagicMock(), - agent_name="test-agent", - client_type=object, # type: ignore[arg-type] - ) - - def test_init_with_function_tools(self) -> None: - """Test that FunctionTool and callables are accepted.""" - from agent_framework_foundry._foundry_agent import RawFoundryAgent - - mock_project = MagicMock() - mock_project.get_openai_client.return_value = MagicMock() - - @tool(approval_mode="never_require") - def my_func() -> str: - """A test function.""" - return "ok" - - agent = RawFoundryAgent( - project_client=mock_project, - agent_name="test-agent", - tools=[my_func], - ) - - assert agent.default_options.get("tools") is not None - - -class TestFoundryAgent: - """Tests for FoundryAgent (full middleware).""" - - def test_init(self) -> None: - """Test construction of the full-middleware agent.""" - from agent_framework_foundry._foundry_agent import FoundryAgent - - mock_project = MagicMock() - mock_project.get_openai_client.return_value = MagicMock() - - agent = FoundryAgent( - project_client=mock_project, - agent_name="test-agent", - agent_version="1.0", - ) - - assert agent.client is not None - assert agent.client.agent_name == "test-agent" - - def test_init_with_middleware(self) -> None: - """Test that agent-level middleware is accepted.""" - from agent_framework import ChatContext, ChatMiddleware - - from agent_framework_foundry._foundry_agent import FoundryAgent - - mock_project = MagicMock() - mock_project.get_openai_client.return_value = MagicMock() - - class MyMiddleware(ChatMiddleware): - async def process(self, context: ChatContext) -> None: - pass - - agent = FoundryAgent( - project_client=mock_project, - agent_name="test-agent", - middleware=[MyMiddleware()], - ) - - assert agent.client is not None - - -class TestFoundryChatClientToolMethods: - """Tests for RawFoundryChatClient tool factory methods.""" - - def test_get_code_interpreter_tool(self) -> None: - """Test code interpreter tool creation.""" - from agent_framework_foundry._foundry_chat_client import RawFoundryChatClient - - tool_obj = RawFoundryChatClient.get_code_interpreter_tool() - assert tool_obj is not None - - def test_get_code_interpreter_tool_with_file_ids(self) -> None: - """Test code interpreter tool with file IDs.""" - from agent_framework_foundry._foundry_chat_client import RawFoundryChatClient - - tool_obj = RawFoundryChatClient.get_code_interpreter_tool(file_ids=["file-abc123"]) - assert tool_obj is not None - - def test_get_file_search_tool(self) -> None: - """Test file search tool creation.""" - from agent_framework_foundry._foundry_chat_client import RawFoundryChatClient - - tool_obj = RawFoundryChatClient.get_file_search_tool(vector_store_ids=["vs_abc123"]) - assert tool_obj is not None - - def test_get_file_search_tool_requires_vector_store_ids(self) -> None: - """Test that empty vector_store_ids raises ValueError.""" - from agent_framework_foundry._foundry_chat_client import RawFoundryChatClient - - with pytest.raises(ValueError, match="vector_store_ids"): - RawFoundryChatClient.get_file_search_tool(vector_store_ids=[]) - - def test_get_web_search_tool(self) -> None: - """Test web search tool creation.""" - from agent_framework_foundry._foundry_chat_client import RawFoundryChatClient - - tool_obj = RawFoundryChatClient.get_web_search_tool() - assert tool_obj is not None - - def test_get_web_search_tool_with_location(self) -> None: - """Test web search tool with user location.""" - from agent_framework_foundry._foundry_chat_client import RawFoundryChatClient - - tool_obj = RawFoundryChatClient.get_web_search_tool( - user_location={"city": "Seattle", "country": "US"}, - search_context_size="high", - ) - assert tool_obj is not None - - def test_get_image_generation_tool(self) -> None: - """Test image generation tool creation.""" - from agent_framework_foundry._foundry_chat_client import RawFoundryChatClient - - tool_obj = RawFoundryChatClient.get_image_generation_tool() - assert tool_obj is not None - - def test_get_mcp_tool(self) -> None: - """Test MCP tool creation.""" - from agent_framework_foundry._foundry_chat_client import RawFoundryChatClient - - tool_obj = RawFoundryChatClient.get_mcp_tool( - name="my_mcp", - url="https://mcp.example.com", - ) - assert tool_obj is not None - - def test_get_mcp_tool_with_connection_id(self) -> None: - """Test MCP tool with project connection ID.""" - from agent_framework_foundry._foundry_chat_client import RawFoundryChatClient - - tool_obj = RawFoundryChatClient.get_mcp_tool( - name="github_mcp", - project_connection_id="conn_abc123", - description="GitHub MCP via Foundry", - ) - assert tool_obj is not None diff --git a/python/packages/foundry/tests/test_foundry_memory_provider.py b/python/packages/foundry/tests/test_foundry_memory_provider.py deleted file mode 100644 index f7e02f8a89..0000000000 --- a/python/packages/foundry/tests/test_foundry_memory_provider.py +++ /dev/null @@ -1,507 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -# pyright: reportPrivateUsage=false - -from __future__ import annotations - -import os -from unittest.mock import AsyncMock, Mock, patch - -import pytest -from agent_framework import AGENT_FRAMEWORK_USER_AGENT, AgentResponse, Message -from agent_framework._sessions import AgentSession, SessionContext - -from agent_framework_foundry._foundry_memory_provider import FoundryMemoryProvider - - -@pytest.fixture -def mock_project_client() -> AsyncMock: - """Create a mock AIProjectClient.""" - mock_client = AsyncMock() - mock_client.beta = AsyncMock() - mock_client.beta.memory_stores = AsyncMock() - mock_client.beta.memory_stores.search_memories = AsyncMock() - mock_client.beta.memory_stores.begin_update_memories = AsyncMock() - mock_client.__aenter__ = AsyncMock(return_value=mock_client) - mock_client.__aexit__ = AsyncMock() - return mock_client - - -@pytest.fixture -def mock_credential() -> Mock: - """Create a mock Azure credential.""" - return Mock() - - -# -- Initialization tests ------------------------------------------------------ - - -class TestInit: - """Test FoundryMemoryProvider initialization.""" - - def test_init_with_all_params(self, mock_project_client: AsyncMock) -> None: - provider = FoundryMemoryProvider( - source_id="custom_source", - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - context_prompt="Custom prompt", - update_delay=60, - ) - assert provider.source_id == "custom_source" - assert provider.project_client is mock_project_client - assert provider.memory_store_name == "test_store" - assert provider.scope == "user_123" - assert provider.context_prompt == "Custom prompt" - assert provider.update_delay == 60 - - def test_init_default_source_id(self, mock_project_client: AsyncMock) -> None: - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - assert provider.source_id == FoundryMemoryProvider.DEFAULT_SOURCE_ID - - def test_init_default_context_prompt(self, mock_project_client: AsyncMock) -> None: - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - assert provider.context_prompt == FoundryMemoryProvider.DEFAULT_CONTEXT_PROMPT - - def test_init_default_update_delay(self, mock_project_client: AsyncMock) -> None: - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - assert provider.update_delay == 300 - - def test_init_with_project_endpoint_and_credential( - self, mock_project_client: AsyncMock, mock_credential: Mock - ) -> None: - with patch("agent_framework_foundry._foundry_memory_provider.AIProjectClient") as mock_ai_project_client: - mock_ai_project_client.return_value = mock_project_client - provider = FoundryMemoryProvider( - project_endpoint="https://test.project.endpoint", - credential=mock_credential, # type: ignore[arg-type] - allow_preview=True, - memory_store_name="test_store", - scope="user_123", - ) - assert provider.project_client is mock_project_client - mock_ai_project_client.assert_called_once_with( - endpoint="https://test.project.endpoint", - credential=mock_credential, - allow_preview=True, - user_agent=AGENT_FRAMEWORK_USER_AGENT, - ) - - def test_init_requires_project_endpoint_without_project_client(self) -> None: - with ( - patch("agent_framework_foundry._foundry_memory_provider.load_settings") as mock_load_settings, - patch.dict(os.environ, {}, clear=True), - pytest.raises(ValueError, match="project endpoint is required"), - ): - mock_load_settings.return_value = {"project_endpoint": None} - FoundryMemoryProvider( - memory_store_name="test_store", - scope="user_123", - ) - - def test_init_requires_credential_without_project_client(self) -> None: - with pytest.raises(ValueError, match="Azure credential is required"): - FoundryMemoryProvider( - project_endpoint="https://test.project.endpoint", - memory_store_name="test_store", - scope="user_123", - ) - - def test_init_requires_memory_store_name(self, mock_project_client: AsyncMock) -> None: - with pytest.raises(ValueError, match="memory_store_name is required"): - FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="", - scope="user_123", - ) - - def test_init_requires_scope(self, mock_project_client: AsyncMock) -> None: - with pytest.raises(ValueError, match="scope is required"): - FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="", - ) - - -# -- before_run tests ---------------------------------------------------------- - - -class TestBeforeRun: - """Test before_run hook.""" - - async def test_retrieves_static_memories_on_first_run(self, mock_project_client: AsyncMock) -> None: - """First call retrieves static (user profile) memories.""" - mem1 = Mock() - mem1.memory_item.content = "User prefers Python" - mem2 = Mock() - mem2.memory_item.content = "User is based in Seattle" - mock_search_result = Mock() - mock_search_result.memories = [mem1, mem2] - mock_project_client.beta.memory_stores.search_memories.return_value = mock_search_result - - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[Message(role="user", text="Hello")], session_id="s1") - - await provider.before_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) - ) - - # Should call search_memories twice: once for static, once for contextual - assert mock_project_client.beta.memory_stores.search_memories.call_count == 2 - # Static memories should be cached - assert len(session.state[provider.source_id]["static_memories"]) == 2 - assert session.state[provider.source_id]["initialized"] is True - - async def test_contextual_memories_added_to_context(self, mock_project_client: AsyncMock) -> None: - """Contextual search returns memories → messages added to context with prompt.""" - # Mock static search (first call) - static_mem = Mock() - static_mem.memory_item.content = "User prefers Python" - static_result = Mock() - static_result.memories = [static_mem] - - # Mock contextual search (second call) - contextual_mem = Mock() - contextual_mem.memory_item.content = "Last discussed async patterns" - contextual_result = Mock() - contextual_result.memories = [contextual_mem] - contextual_result.search_id = "search-123" - - mock_project_client.beta.memory_stores.search_memories.side_effect = [static_result, contextual_result] - - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[Message(role="user", text="Hello")], session_id="s1") - - await provider.before_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) - ) - - # Check that memories were added to context - assert provider.source_id in ctx.context_messages - added = ctx.context_messages[provider.source_id] - assert len(added) == 1 - assert "User prefers Python" in added[0].text # type: ignore[operator] - assert "Last discussed async patterns" in added[0].text # type: ignore[operator] - assert provider.context_prompt in added[0].text # type: ignore[operator] - assert session.state[provider.source_id]["previous_search_id"] == "search-123" - - async def test_empty_input_skips_contextual_search(self, mock_project_client: AsyncMock) -> None: - """Empty input messages → only static search performed, no contextual search.""" - static_result = Mock() - static_result.memories = [] - mock_project_client.beta.memory_stores.search_memories.return_value = static_result - - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[Message(role="user", text="")], session_id="s1") - - await provider.before_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) - ) - - # Should only call search_memories once for static memories - assert mock_project_client.beta.memory_stores.search_memories.call_count == 1 - assert provider.source_id not in ctx.context_messages - - async def test_empty_search_results_no_messages(self, mock_project_client: AsyncMock) -> None: - """Empty search results → no messages added.""" - mock_search_result = Mock() - mock_search_result.memories = [] - mock_project_client.beta.memory_stores.search_memories.return_value = mock_search_result - - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[Message(role="user", text="test")], session_id="s1") - - await provider.before_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) - ) - - assert provider.source_id not in ctx.context_messages - - async def test_static_memories_only_retrieved_once(self, mock_project_client: AsyncMock) -> None: - """Static memories are only retrieved on the first call.""" - static_mem = Mock() - static_mem.memory_item.content = "Static memory" - static_result = Mock() - static_result.memories = [static_mem] - contextual_result = Mock() - contextual_result.memories = [] - - mock_project_client.beta.memory_stores.search_memories.side_effect = [static_result, contextual_result] - - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[Message(role="user", text="Hello")], session_id="s1") - - # First call - await provider.before_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) - ) - assert mock_project_client.beta.memory_stores.search_memories.call_count == 2 - - # Reset mock for second call - mock_project_client.beta.memory_stores.search_memories.reset_mock() - contextual_result2 = Mock() - contextual_result2.memories = [] - mock_project_client.beta.memory_stores.search_memories.return_value = contextual_result2 - - # Second call - should only search contextual, not static - ctx2 = SessionContext(input_messages=[Message(role="user", text="World")], session_id="s1") - await provider.before_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx2, state=session.state.setdefault(provider.source_id, {}) - ) - assert mock_project_client.beta.memory_stores.search_memories.call_count == 1 - - async def test_handles_search_exception_gracefully(self, mock_project_client: AsyncMock) -> None: - """Search exception is logged but doesn't fail the operation.""" - mock_project_client.beta.memory_stores.search_memories.side_effect = Exception("API error") - - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[Message(role="user", text="Hello")], session_id="s1") - - # Should not raise exception - await provider.before_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) - ) - - # No memories added - assert provider.source_id not in ctx.context_messages - - -# -- after_run tests ----------------------------------------------------------- - - -class TestAfterRun: - """Test after_run hook.""" - - async def test_stores_input_and_response(self, mock_project_client: AsyncMock) -> None: - """Stores input+response messages via begin_update_memories.""" - mock_poller = Mock() - mock_poller.update_id = "update-456" - mock_project_client.beta.memory_stores.begin_update_memories.return_value = mock_poller - - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[Message(role="user", text="question")], session_id="s1") - ctx._response = AgentResponse(messages=[Message(role="assistant", text="answer")]) - - await provider.after_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) - ) - - mock_project_client.beta.memory_stores.begin_update_memories.assert_awaited_once() - call_kwargs = mock_project_client.beta.memory_stores.begin_update_memories.call_args.kwargs - assert call_kwargs["name"] == "test_store" - assert call_kwargs["scope"] == "user_123" - assert len(call_kwargs["items"]) == 2 - assert call_kwargs["items"][0]["content"] == "question" - assert call_kwargs["items"][1]["content"] == "answer" - assert session.state[provider.source_id]["previous_update_id"] == "update-456" - - async def test_only_stores_user_assistant_system(self, mock_project_client: AsyncMock) -> None: - """Only stores user/assistant/system messages with text.""" - mock_poller = Mock() - mock_project_client.beta.memory_stores.begin_update_memories.return_value = mock_poller - - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - session = AgentSession(session_id="test-session") - ctx = SessionContext( - input_messages=[ - Message(role="user", text="hello"), - Message(role="tool", text="tool output"), - ], - session_id="s1", - ) - ctx._response = AgentResponse(messages=[Message(role="assistant", text="reply")]) - - await provider.after_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) - ) - - call_kwargs = mock_project_client.beta.memory_stores.begin_update_memories.call_args.kwargs - items = call_kwargs["items"] - assert len(items) == 2 - assert items[0]["content"] == "hello" - assert items[1]["content"] == "reply" - - async def test_skips_empty_messages(self, mock_project_client: AsyncMock) -> None: - """Skips messages with empty text.""" - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - session = AgentSession(session_id="test-session") - ctx = SessionContext( - input_messages=[ - Message(role="user", text=""), - Message(role="user", text=" "), - ], - session_id="s1", - ) - ctx._response = AgentResponse(messages=[]) - - await provider.after_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) - ) - - mock_project_client.beta.memory_stores.begin_update_memories.assert_not_awaited() - - async def test_uses_configured_update_delay(self, mock_project_client: AsyncMock) -> None: - """Uses the configured update_delay parameter.""" - mock_poller = Mock() - mock_project_client.beta.memory_stores.begin_update_memories.return_value = mock_poller - - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - update_delay=60, - ) - session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[Message(role="user", text="hi")], session_id="s1") - ctx._response = AgentResponse(messages=[Message(role="assistant", text="hey")]) - - await provider.after_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) - ) - - call_kwargs = mock_project_client.beta.memory_stores.begin_update_memories.call_args.kwargs - assert call_kwargs["update_delay"] == 60 - - async def test_uses_previous_update_id_for_incremental_updates(self, mock_project_client: AsyncMock) -> None: - """Uses previous_update_id for incremental updates.""" - mock_poller1 = Mock() - mock_poller1.update_id = "update-1" - mock_poller2 = Mock() - mock_poller2.update_id = "update-2" - - mock_project_client.beta.memory_stores.begin_update_memories.side_effect = [mock_poller1, mock_poller2] - - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - session = AgentSession(session_id="test-session") - ctx1 = SessionContext(input_messages=[Message(role="user", text="first")], session_id="s1") - ctx1._response = AgentResponse(messages=[Message(role="assistant", text="response1")]) - - # First update - await provider.after_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx1, state=session.state.setdefault(provider.source_id, {}) - ) - assert session.state[provider.source_id]["previous_update_id"] == "update-1" - - # Second update should use previous_update_id - ctx2 = SessionContext(input_messages=[Message(role="user", text="second")], session_id="s1") - ctx2._response = AgentResponse(messages=[Message(role="assistant", text="response2")]) - - await provider.after_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx2, state=session.state.setdefault(provider.source_id, {}) - ) - - call_kwargs = mock_project_client.beta.memory_stores.begin_update_memories.call_args.kwargs - assert call_kwargs["previous_update_id"] == "update-1" - assert session.state[provider.source_id]["previous_update_id"] == "update-2" - - async def test_handles_update_exception_gracefully(self, mock_project_client: AsyncMock) -> None: - """Update exception is logged but doesn't fail the operation.""" - mock_project_client.beta.memory_stores.begin_update_memories.side_effect = Exception("API error") - - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[Message(role="user", text="hi")], session_id="s1") - ctx._response = AgentResponse(messages=[Message(role="assistant", text="hey")]) - - # Should not raise exception - await provider.after_run( # type: ignore[arg-type] - agent=None, session=session, context=ctx, state=session.state.setdefault(provider.source_id, {}) - ) - - -# -- Context manager tests ----------------------------------------------------- - - -class TestContextManager: - """Test __aenter__/__aexit__ delegation.""" - - async def test_aenter_delegates_to_client(self, mock_project_client: AsyncMock) -> None: - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - result = await provider.__aenter__() - assert result is provider - mock_project_client.__aenter__.assert_awaited_once() - - async def test_aexit_delegates_to_client(self, mock_project_client: AsyncMock) -> None: - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - await provider.__aexit__(None, None, None) - mock_project_client.__aexit__.assert_awaited_once() - - async def test_async_with_syntax(self, mock_project_client: AsyncMock) -> None: - provider = FoundryMemoryProvider( - project_client=mock_project_client, - memory_store_name="test_store", - scope="user_123", - ) - async with provider as p: - assert p is provider diff --git a/python/packages/lab/gaia/samples/openai_agent.py b/python/packages/lab/gaia/samples/openai_agent.py index a5709ecf2a..227b12c03c 100644 --- a/python/packages/lab/gaia/samples/openai_agent.py +++ b/python/packages/lab/gaia/samples/openai_agent.py @@ -7,7 +7,7 @@ Required Environment Variables: OPENAI_API_KEY: Your OpenAI API key - OPENAI_RESPONSES_MODEL_ID: Model to use with Responses API (e.g., gpt-4o, gpt-4o-mini) + OPENAI_RESPONSES_MODEL: Model to use with Responses API (e.g., gpt-4o, gpt-4o-mini) Optional Environment Variables: OPENAI_BASE_URL: Custom API base URL if using a proxy or compatible service @@ -19,7 +19,7 @@ Example: export OPENAI_API_KEY="sk-..." - export OPENAI_RESPONSES_MODEL_ID="gpt-4o" + export OPENAI_RESPONSES_MODEL="gpt-4o" """ from collections.abc import AsyncIterator diff --git a/python/packages/openai/agent_framework_openai/_chat_client.py b/python/packages/openai/agent_framework_openai/_chat_client.py index 9448142847..b0d56ee26f 100644 --- a/python/packages/openai/agent_framework_openai/_chat_client.py +++ b/python/packages/openai/agent_framework_openai/_chat_client.py @@ -137,7 +137,7 @@ class ReasoningOptions(TypedDict, total=False): See: https://platform.openai.com/docs/guides/reasoning """ - effort: Literal["low", "medium", "high"] + effort: Literal["none", "low", "medium", "high", "xhigh"] """The effort level for reasoning. Higher effort means more reasoning tokens.""" summary: Literal["auto", "concise", "detailed"] diff --git a/python/packages/openai/tests/openai/test_openai_chat_client.py b/python/packages/openai/tests/openai/test_openai_chat_client.py index cfba5b57a4..1f7a5fffce 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client.py @@ -212,34 +212,56 @@ async def test_get_response_with_invalid_input() -> None: async def test_get_response_with_all_parameters() -> None: - """Test get_response with all possible parameters to cover parameter handling logic.""" + """Test request preparation with a comprehensive parameter set.""" client = OpenAIChatClient(model="test-model", api_key="test-key") - # Test with comprehensive parameter set - should fail due to invalid API key - with pytest.raises(ChatClientException): - await client.get_response( - messages=[Message(role="user", text="Test message")], - options={ - "include": ["message.output_text.logprobs"], - "instructions": "You are a helpful assistant", - "max_tokens": 100, - "parallel_tool_calls": True, - "model": "gpt-4", - "previous_response_id": "prev-123", - "reasoning": {"chain_of_thought": "enabled"}, - "service_tier": "auto", - "response_format": OutputStruct, - "seed": 42, - "store": True, - "temperature": 0.7, - "tool_choice": "auto", - "tools": [get_weather], - "top_p": 0.9, - "user": "test-user", - "truncation": "auto", - "timeout": 30.0, - "additional_properties": {"custom": "value"}, - }, - ) + _, run_options, _ = await client._prepare_request( + messages=[Message(role="user", text="Test message")], + options={ + "include": ["message.output_text.logprobs"], + "instructions": "You are a helpful assistant", + "max_tokens": 100, + "parallel_tool_calls": True, + "model": "gpt-4", + "previous_response_id": "prev-123", + "reasoning": {"chain_of_thought": "enabled"}, + "service_tier": "auto", + "response_format": OutputStruct, + "seed": 42, + "store": True, + "temperature": 0.7, + "tool_choice": "auto", + "tools": [get_weather], + "top_p": 0.9, + "user": "test-user", + "truncation": "auto", + "timeout": 30.0, + "additional_properties": {"custom": "value"}, + }, + ) + + assert run_options["include"] == ["message.output_text.logprobs"] + assert run_options["max_output_tokens"] == 100 + assert run_options["parallel_tool_calls"] is True + assert run_options["model"] == "gpt-4" + assert run_options["previous_response_id"] == "prev-123" + assert run_options["reasoning"] == {"chain_of_thought": "enabled"} + assert run_options["service_tier"] == "auto" + assert run_options["text_format"] is OutputStruct + assert run_options["store"] is True + assert run_options["temperature"] == 0.7 + assert run_options["tool_choice"] == "auto" + assert run_options["top_p"] == 0.9 + assert run_options["user"] == "test-user" + assert run_options["truncation"] == "auto" + assert run_options["timeout"] == 30.0 + assert run_options["additional_properties"] == {"custom": "value"} + assert len(run_options["tools"]) == 1 + assert run_options["tools"][0]["type"] == "function" + assert run_options["tools"][0]["name"] == "get_weather" + assert run_options["input"][0]["role"] == "system" + assert run_options["input"][0]["content"][0]["text"] == "You are a helpful assistant" + assert run_options["input"][1]["role"] == "user" + assert run_options["input"][1]["content"][0]["text"] == "Test message" @pytest.mark.asyncio @@ -257,12 +279,13 @@ async def test_web_search_tool_with_location() -> None: } ) - # Should raise an authentication error due to invalid API key - with pytest.raises(ChatClientException): - await client.get_response( - messages=[Message(role="user", text="What's the weather?")], - options={"tools": [web_search_tool], "tool_choice": "auto"}, - ) + _, run_options, _ = await client._prepare_request( + messages=[Message(role="user", text="What's the weather?")], + options={"tools": [web_search_tool], "tool_choice": "auto"}, + ) + + assert run_options["tools"] == [web_search_tool] + assert run_options["tool_choice"] == "auto" async def test_code_interpreter_tool_variations() -> None: @@ -272,20 +295,22 @@ async def test_code_interpreter_tool_variations() -> None: # Test code interpreter using static method code_tool = OpenAIChatClient.get_code_interpreter_tool() - with pytest.raises(ChatClientException): - await client.get_response( - messages=[Message("user", ["Run some code"])], - options={"tools": [code_tool]}, - ) + _, run_options, _ = await client._prepare_request( + messages=[Message("user", ["Run some code"])], + options={"tools": [code_tool]}, + ) + + assert run_options["tools"] == [code_tool] # Test code interpreter with files using static method code_tool_with_files = OpenAIChatClient.get_code_interpreter_tool(file_ids=["file1", "file2"]) - with pytest.raises(ChatClientException): - await client.get_response( - messages=[Message(role="user", text="Process these files")], - options={"tools": [code_tool_with_files]}, - ) + _, run_options, _ = await client._prepare_request( + messages=[Message(role="user", text="Process these files")], + options={"tools": [code_tool_with_files]}, + ) + + assert run_options["tools"] == [code_tool_with_files] async def test_content_filter_exception() -> None: @@ -309,23 +334,23 @@ async def test_content_filter_exception() -> None: @pytest.mark.asyncio async def test_hosted_file_search_tool_validation() -> None: - """Test get_response HostedFileSearchTool validation.""" + """Test HostedFileSearchTool validation and request preparation.""" client = OpenAIChatClient(model="test-model", api_key="test-key") # Test file search tool with vector store IDs file_search_tool = OpenAIChatClient.get_file_search_tool(vector_store_ids=["vs_123"]) - # Test using file search tool - may raise various exceptions depending on API response - with pytest.raises((ValueError, ChatClientInvalidRequestException, ChatClientException)): - await client.get_response( - messages=[Message("user", ["Test"])], - options={"tools": [file_search_tool]}, - ) + _, run_options, _ = await client._prepare_request( + messages=[Message("user", ["Test"])], + options={"tools": [file_search_tool]}, + ) + + assert run_options["tools"] == [file_search_tool] async def test_chat_message_parsing_with_function_calls() -> None: - """Test get_response message preparation with function call and result content types in conversation flow.""" + """Test message preparation with function call and function result content.""" client = OpenAIChatClient(model="test-model", api_key="test-key") # Create messages with function call and result content @@ -344,9 +369,27 @@ async def test_chat_message_parsing_with_function_calls() -> None: Message(role="tool", contents=[function_result]), ] - # This should exercise the message parsing logic - will fail due to invalid API key - with pytest.raises(ChatClientException): - await client.get_response(messages=messages) + prepared_messages = client._prepare_messages_for_openai(messages) + + assert prepared_messages == [ + { + "type": "message", + "role": "user", + "content": [{"type": "input_text", "text": "Call a function"}], + }, + { + "call_id": "test-call-id", + "id": "fc_test-fc-id", + "type": "function_call", + "name": "test_function", + "arguments": '{"param": "value"}', + }, + { + "call_id": "test-call-id", + "type": "function_call_output", + "output": "Function executed successfully", + }, + ] async def test_response_format_parse_path() -> None: @@ -3052,8 +3095,6 @@ async def get_api_key() -> str: "option_name,option_value,needs_validation", [ # Simple ChatOptions - just verify they don't fail - param("temperature", 0.7, False, id="temperature"), - param("top_p", 0.9, False, id="top_p"), param("max_tokens", 500, False, id="max_tokens"), param("seed", 123, False, id="seed"), param("user", "test-user-id", False, id="user"), @@ -3066,7 +3107,6 @@ async def get_api_key() -> str: # OpenAIChatOptions - just verify they don't fail param("safety_identifier", "user-hash-abc123", False, id="safety_identifier"), param("truncation", "auto", False, id="truncation"), - param("top_logprobs", 5, False, id="top_logprobs"), param("prompt_cache_key", "test-cache-key", False, id="prompt_cache_key"), param("max_tool_calls", 3, False, id="max_tool_calls"), # Complex options requiring output validation @@ -3360,7 +3400,6 @@ def get_test_image() -> Content: assert "house" in response.text.lower(), f"Model did not describe the house image. Response: {response.text}" -@pytest.mark.timeout(300) @pytest.mark.flaky @pytest.mark.integration @skip_if_openai_integration_tests_disabled @@ -3372,14 +3411,11 @@ async def test_integration_agent_replays_local_tool_history_without_stale_fc_id( async def search_hotels(city: Annotated[str, "The city to search for hotels in"]) -> str: return f"The only hotel option in {city} is {hotel_code}." - client = OpenAIChatClient() + # override with model that does not do reasoning by default + client = OpenAIChatClient(model="gpt-5.4") client.function_invocation_configuration["max_iterations"] = 2 - agent = Agent( - client=client, - tools=[search_hotels], - default_options={"store": False}, - ) + agent = Agent(client=client, tools=[search_hotels], default_options={"store": False}) session = agent.create_session() first_response = await agent.run( diff --git a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py index 6a62f0b41a..5f3e7a740b 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py @@ -4,6 +4,7 @@ import json import os +from functools import wraps from pathlib import Path from typing import Any from unittest.mock import MagicMock, patch @@ -31,6 +32,32 @@ ) +def _with_azure_openai_debug() -> Any: + def decorator(func: Any) -> Any: + @wraps(func) + async def wrapper(*args: Any, **kwargs: Any) -> Any: + try: + return await func(*args, **kwargs) + except Exception as exc: + model = os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") or os.getenv( + "AZURE_OPENAI_DEPLOYMENT_NAME", "" + ) + api_version = os.getenv("AZURE_OPENAI_API_VERSION") or "preview" + endpoint = os.getenv("AZURE_OPENAI_ENDPOINT", "") + debug_message = f"Azure OpenAI debug: endpoint={endpoint}, model={model}, api_version={api_version}" + if hasattr(exc, "add_note"): + exc.add_note(debug_message) + elif exc.args: + exc.args = (f"{exc.args[0]}\n{debug_message}", *exc.args[1:]) + else: + exc.args = (debug_message,) + raise + + return wrapper + + return decorator + + class OutputStruct(BaseModel): """A structured output for testing purposes.""" @@ -38,20 +65,6 @@ class OutputStruct(BaseModel): weather: str | None = None -def _create_azure_openai_chat_client( - *, - api_key: Any = None, - credential: AsyncTokenCredential | None = None, -) -> OpenAIChatClient: - resolved_api_key = ( - api_key if api_key is not None else None if credential is not None else os.environ["AZURE_OPENAI_API_KEY"] - ) - return OpenAIChatClient( - api_key=resolved_api_key, - credential=credential, - ) - - async def create_vector_store(client: OpenAIChatClient) -> tuple[str, Content]: """Create a vector store with sample documents for testing.""" file = await client.client.files.create( @@ -87,7 +100,7 @@ async def get_weather(location: str) -> str: def test_init_with_azure_endpoint(azure_openai_unit_test_env: dict[str, str]) -> None: - client = _create_azure_openai_chat_client(credential=AzureCliCredential()) + client = OpenAIChatClient(credential=AzureCliCredential()) assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] assert isinstance(client, SupportsChatGetResponse) @@ -194,7 +207,7 @@ async def get_token(self, *scopes: str, **kwargs: object): @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_VERSION"]], indirect=True) def test_init_uses_default_azure_api_version(azure_openai_unit_test_env: dict[str, str]) -> None: - client = _create_azure_openai_chat_client(credential=AzureCliCredential()) + client = OpenAIChatClient(credential=AzureCliCredential()) assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] assert client.api_version is not None @@ -218,8 +231,6 @@ def test_openai_base_url_wins_over_azure_aliases(monkeypatch, azure_openai_unit_ @pytest.mark.parametrize( "option_name,option_value,needs_validation", [ - param("temperature", 0.7, False, id="temperature"), - param("top_p", 0.9, False, id="top_p"), param("max_tokens", 500, False, id="max_tokens"), param("seed", 123, False, id="seed"), param("user", "test-user-id", False, id="user"), @@ -231,7 +242,6 @@ def test_openai_base_url_wins_over_azure_aliases(monkeypatch, azure_openai_unit_ param("tool_choice", "none", True, id="tool_choice_none"), param("safety_identifier", "user-hash-abc123", False, id="safety_identifier"), param("truncation", "auto", False, id="truncation"), - param("top_logprobs", 5, False, id="top_logprobs"), param("prompt_cache_key", "test-cache-key", False, id="prompt_cache_key"), param("max_tool_calls", 3, False, id="max_tool_calls"), param("tools", [get_weather], True, id="tools_function"), @@ -269,13 +279,14 @@ def test_openai_base_url_wins_over_azure_aliases(monkeypatch, azure_openai_unit_ ), ], ) +@_with_azure_openai_debug() async def test_integration_options( option_name: str, option_value: Any, needs_validation: bool, ) -> None: async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client(credential=credential) + client = OpenAIChatClient(credential=credential) client.function_invocation_configuration["max_iterations"] = 2 for streaming in [False, True]: @@ -326,34 +337,12 @@ async def test_integration_options( @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_web_search() -> None: async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client(credential=credential) + client = OpenAIChatClient(credential=credential) for streaming in [False, True]: - content = { - "messages": [ - Message( - role="user", - text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", - ) - ], - "options": { - "tool_choice": "auto", - "tools": [OpenAIChatClient.get_web_search_tool()], - }, - "stream": streaming, - } - if streaming: - response = await client.get_response(**content).get_final_response() - else: - response = await client.get_response(**content) - - assert isinstance(response, ChatResponse) - assert "Rumi" in response.text - assert "Mira" in response.text - assert "Zoey" in response.text - content = { "messages": [ Message( @@ -377,9 +366,10 @@ async def test_integration_web_search() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_client_file_search() -> None: async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client(credential=credential) + client = OpenAIChatClient(credential=credential) file_id, vector_store = await create_vector_store(client) try: response = await client.get_response( @@ -399,9 +389,10 @@ async def test_integration_client_file_search() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_client_file_search_streaming() -> None: async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client(credential=credential) + client = OpenAIChatClient(credential=credential) file_id, vector_store = await create_vector_store(client) try: response_stream = client.get_response( @@ -423,9 +414,10 @@ async def test_integration_client_file_search_streaming() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_client_agent_hosted_mcp_tool() -> None: async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client(credential=credential) + client = OpenAIChatClient(credential=credential) response = await client.get_response( messages=[Message(role="user", text="How to create an Azure storage account using az cli?")], options={ @@ -446,9 +438,10 @@ async def test_integration_client_agent_hosted_mcp_tool() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_client_agent_hosted_code_interpreter_tool() -> None: async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client(credential=credential) + client = OpenAIChatClient(credential=credential) response = await client.get_response( messages=[Message(role="user", text="Calculate the sum of numbers from 1 to 10 using Python code.")], @@ -464,12 +457,13 @@ async def test_integration_client_agent_hosted_code_interpreter_tool() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_integration_client_agent_existing_session() -> None: async with AzureCliCredential() as credential: preserved_session = None async with Agent( - client=_create_azure_openai_chat_client(credential=credential), + client=OpenAIChatClient(credential=credential), instructions="You are a helpful assistant with good memory.", ) as first_agent: session = first_agent.create_session() @@ -484,7 +478,7 @@ async def test_integration_client_agent_existing_session() -> None: if preserved_session: async with Agent( - client=_create_azure_openai_chat_client(credential=credential), + client=OpenAIChatClient(credential=credential), instructions="You are a helpful assistant with good memory.", ) as second_agent: second_response = await second_agent.run("What is my hobby?", session=preserved_session) @@ -497,6 +491,7 @@ async def test_integration_client_agent_existing_session() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_client_tool_rich_content_image() -> None: image_path = Path(__file__).parent.parent / "assets" / "sample_image.jpg" image_bytes = image_path.read_bytes() @@ -507,7 +502,7 @@ def get_test_image() -> Content: return Content.from_data(data=image_bytes, media_type="image/jpeg") async with AzureCliCredential() as credential: - client = _create_azure_openai_chat_client(credential=credential) + client = OpenAIChatClient(credential=credential) client.function_invocation_configuration["max_iterations"] = 2 for streaming in [False, True]: diff --git a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py index 22787f8092..3e37606a65 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py @@ -3,7 +3,8 @@ from __future__ import annotations import os -from collections.abc import Awaitable, Callable +from functools import wraps +from typing import Any from unittest.mock import MagicMock, patch import pytest @@ -35,25 +36,30 @@ ) -def _get_azure_chat_deployment_name() -> str: - return os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME") or os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"] - - -def _create_azure_chat_completion_client( - *, - api_key: str | Callable[[], str | Awaitable[str]] | None = None, - credential: AsyncTokenCredential | None = None, -) -> OpenAIChatCompletionClient: - resolved_api_key = ( - api_key if api_key is not None else None if credential is not None else os.environ["AZURE_OPENAI_API_KEY"] - ) - return OpenAIChatCompletionClient( - model=_get_azure_chat_deployment_name(), - api_key=resolved_api_key, - azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], - api_version=os.getenv("AZURE_OPENAI_API_VERSION"), - credential=credential, - ) +def _with_azure_openai_debug() -> Any: + def decorator(func: Any) -> Any: + @wraps(func) + async def wrapper(*args: Any, **kwargs: Any) -> Any: + try: + return await func(*args, **kwargs) + except Exception as exc: + model = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME") or os.getenv( + "AZURE_OPENAI_DEPLOYMENT_NAME", "" + ) + api_version = os.getenv("AZURE_OPENAI_API_VERSION", "") + endpoint = os.getenv("AZURE_OPENAI_ENDPOINT", "") + debug_message = f"Azure OpenAI debug: endpoint={endpoint}, model={model}, api_version={api_version}" + if hasattr(exc, "add_note"): + exc.add_note(debug_message) + elif exc.args: + exc.args = (f"{exc.args[0]}\n{debug_message}", *exc.args[1:]) + else: + exc.args = (debug_message,) + raise + + return wrapper + + return decorator @tool(approval_mode="never_require") @@ -74,7 +80,7 @@ async def get_weather(location: str) -> str: def test_init_with_azure_endpoint(azure_openai_unit_test_env: dict[str, str]) -> None: - client = _create_azure_chat_completion_client() + client = OpenAIChatCompletionClient(azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT")) assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] assert isinstance(client, SupportsChatGetResponse) @@ -103,19 +109,6 @@ def test_openai_api_key_wins_over_azure_env(monkeypatch, azure_openai_unit_test_ assert client.azure_endpoint is None -def test_api_version_alone_does_not_override_openai_api_key( - monkeypatch, azure_openai_unit_test_env: dict[str, str] -) -> None: - monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") - monkeypatch.setenv("OPENAI_MODEL", "gpt-5") - - client = OpenAIChatCompletionClient(api_version="2024-10-21") - - assert client.model == "gpt-5" - assert not isinstance(client.client, AsyncAzureOpenAI) - assert client.azure_endpoint is None - - def test_explicit_credential_wins_over_openai_api_key(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") monkeypatch.setenv("OPENAI_MODEL", "gpt-5") @@ -164,6 +157,8 @@ def test_init_does_not_fall_back_to_openai_model_for_azure_env( def test_init_with_credential_wraps_async_token_credential( monkeypatch, azure_openai_unit_test_env: dict[str, str] ) -> None: + monkeypatch.delenv("AZURE_OPENAI_API_KEY", raising=False) + class TestAsyncTokenCredential(AsyncTokenCredential): async def get_token(self, *scopes: str, **kwargs: object): raise NotImplementedError @@ -180,14 +175,6 @@ async def get_token(self, *scopes: str, **kwargs: object): mock_provider.assert_called_once_with(credential, "https://cognitiveservices.azure.com/.default") -@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_VERSION"]], indirect=True) -def test_init_uses_default_azure_api_version(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: - client = _create_azure_chat_completion_client() - - assert client.model == azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] - assert client.api_version is not None - - def test_openai_base_url_wins_over_azure_aliases(monkeypatch, azure_openai_unit_test_env: dict[str, str]) -> None: monkeypatch.setenv("OPENAI_API_KEY", "test-dummy-key") monkeypatch.setenv("OPENAI_MODEL", "gpt-5") @@ -203,9 +190,10 @@ def test_openai_base_url_wins_over_azure_aliases(monkeypatch, azure_openai_unit_ @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_completion_client_response() -> None: async with AzureCliCredential() as credential: - client = _create_azure_chat_completion_client(credential=credential) + client = OpenAIChatCompletionClient(credential=credential) assert isinstance(client, SupportsChatGetResponse) messages = [ @@ -233,9 +221,10 @@ async def test_azure_openai_chat_completion_client_response() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_completion_client_response_tools() -> None: async with AzureCliCredential() as credential: - client = _create_azure_chat_completion_client(credential=credential) + client = OpenAIChatCompletionClient(credential=credential) response = await client.get_response( messages=[Message(role="user", text="who are Emily and David?")], @@ -250,9 +239,10 @@ async def test_azure_openai_chat_completion_client_response_tools() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_completion_client_streaming() -> None: async with AzureCliCredential() as credential: - client = _create_azure_chat_completion_client(credential=credential) + client = OpenAIChatCompletionClient(credential=credential) response = client.get_response( messages=[ @@ -285,9 +275,10 @@ async def test_azure_openai_chat_completion_client_streaming() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_completion_client_streaming_tools() -> None: async with AzureCliCredential() as credential: - client = _create_azure_chat_completion_client(credential=credential) + client = OpenAIChatCompletionClient(credential=credential) response = client.get_response( messages=[Message(role="user", text="who are Emily and David?")], @@ -308,11 +299,12 @@ async def test_azure_openai_chat_completion_client_streaming_tools() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_completion_client_agent_basic_run() -> None: async with ( AzureCliCredential() as credential, Agent( - client=_create_azure_chat_completion_client(credential=credential), + client=OpenAIChatCompletionClient(credential=credential), ) as agent, ): response = await agent.run("Please respond with exactly: 'This is a response test.'") @@ -325,11 +317,12 @@ async def test_azure_openai_chat_completion_client_agent_basic_run() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_completion_client_agent_basic_run_streaming() -> None: async with ( AzureCliCredential() as credential, Agent( - client=_create_azure_chat_completion_client(credential=credential), + client=OpenAIChatCompletionClient(credential=credential), ) as agent, ): full_text = "" @@ -347,11 +340,12 @@ async def test_azure_openai_chat_completion_client_agent_basic_run_streaming() - @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_completion_client_agent_session_persistence() -> None: async with ( AzureCliCredential() as credential, Agent( - client=_create_azure_chat_completion_client(credential=credential), + client=OpenAIChatCompletionClient(credential=credential), instructions="You are a helpful assistant with good memory.", ) as agent, ): @@ -368,12 +362,13 @@ async def test_azure_openai_chat_completion_client_agent_session_persistence() - @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_chat_completion_client_agent_existing_session() -> None: async with AzureCliCredential() as credential: preserved_session = None async with Agent( - client=_create_azure_chat_completion_client(credential=credential), + client=OpenAIChatCompletionClient(credential=credential), instructions="You are a helpful assistant with good memory.", ) as first_agent: session = first_agent.create_session() @@ -384,7 +379,7 @@ async def test_azure_openai_chat_completion_client_agent_existing_session() -> N if preserved_session: async with Agent( - client=_create_azure_chat_completion_client(credential=credential), + client=OpenAIChatCompletionClient(credential=credential), instructions="You are a helpful assistant with good memory.", ) as second_agent: second_response = await second_agent.run("What is my name?", session=preserved_session) @@ -397,11 +392,12 @@ async def test_azure_openai_chat_completion_client_agent_existing_session() -> N @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_chat_completion_client_agent_level_tool_persistence() -> None: async with ( AzureCliCredential() as credential, Agent( - client=_create_azure_chat_completion_client(credential=credential), + client=OpenAIChatCompletionClient(credential=credential), instructions="You are a helpful assistant that uses available tools.", tools=[get_weather], ) as agent, diff --git a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py index be832231df..3cf62a064d 100644 --- a/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_embedding_client_azure.py @@ -3,6 +3,8 @@ from __future__ import annotations import os +from functools import wraps +from typing import Any from unittest.mock import MagicMock, patch import pytest @@ -25,6 +27,32 @@ ) +def _with_azure_openai_debug() -> Any: + def decorator(func: Any) -> Any: + @wraps(func) + async def wrapper(*args: Any, **kwargs: Any) -> Any: + try: + return await func(*args, **kwargs) + except Exception as exc: + model = os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME") or os.getenv( + "AZURE_OPENAI_DEPLOYMENT_NAME", "" + ) + api_version = os.getenv("AZURE_OPENAI_API_VERSION", "") + endpoint = os.getenv("AZURE_OPENAI_ENDPOINT", "") + debug_message = f"Azure OpenAI debug: endpoint={endpoint}, model={model}, api_version={api_version}" + if hasattr(exc, "add_note"): + exc.add_note(debug_message) + elif exc.args: + exc.args = (f"{exc.args[0]}\n{debug_message}", *exc.args[1:]) + else: + exc.args = (debug_message,) + raise + + return wrapper + + return decorator + + def _get_azure_embedding_deployment_name() -> str: return os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME") or os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"] @@ -176,6 +204,7 @@ def test_openai_base_url_wins_over_azure_aliases(monkeypatch, azure_openai_unit_ @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_get_embeddings() -> None: async with AzureCliCredential() as credential: client = _create_azure_embedding_client(credential=credential) @@ -194,6 +223,7 @@ async def test_azure_openai_get_embeddings() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_get_embeddings_multiple() -> None: async with AzureCliCredential() as credential: client = _create_azure_embedding_client(credential=credential) @@ -208,6 +238,7 @@ async def test_azure_openai_get_embeddings_multiple() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_openai_integration_tests_disabled +@_with_azure_openai_debug() async def test_azure_openai_get_embeddings_with_dimensions() -> None: async with AzureCliCredential() as credential: client = _create_azure_embedding_client(credential=credential) diff --git a/python/samples/02-agents/chat_client/README.md b/python/samples/02-agents/chat_client/README.md index e03d532812..6650e510a9 100644 --- a/python/samples/02-agents/chat_client/README.md +++ b/python/samples/02-agents/chat_client/README.md @@ -57,8 +57,8 @@ Depending on the selected client, set the appropriate environment variables: **For OpenAI clients:** - `OPENAI_API_KEY`: Your OpenAI API key -- `OPENAI_CHAT_MODEL_ID`: The OpenAI model for `openai_chat` and `openai_assistants` -- `OPENAI_RESPONSES_MODEL_ID`: The OpenAI model for `openai_responses` +- `OPENAI_CHAT_MODEL`: The OpenAI model for `openai_chat` and `openai_assistants` +- `OPENAI_RESPONSES_MODEL`: The OpenAI model for `openai_responses` **For Anthropic client (`anthropic`):** - `ANTHROPIC_API_KEY`: Your Anthropic API key diff --git a/python/samples/02-agents/devui/README.md b/python/samples/02-agents/devui/README.md index 2bdd6d2233..c5ce2095b8 100644 --- a/python/samples/02-agents/devui/README.md +++ b/python/samples/02-agents/devui/README.md @@ -85,7 +85,7 @@ Alternatively, set environment variables globally: ```bash export OPENAI_API_KEY="your-key-here" -export OPENAI_CHAT_MODEL_ID="gpt-4o" +export OPENAI_CHAT_MODEL="gpt-4o" ``` ## Using DevUI with Your Own Agents diff --git a/python/samples/02-agents/mcp/README.md b/python/samples/02-agents/mcp/README.md index 1df1a449b6..e07d63ddbd 100644 --- a/python/samples/02-agents/mcp/README.md +++ b/python/samples/02-agents/mcp/README.md @@ -17,7 +17,7 @@ The Model Context Protocol (MCP) is an open standard for connecting AI agents to ## Prerequisites - `OPENAI_API_KEY` environment variable -- `OPENAI_RESPONSES_MODEL_ID` environment variable +- `OPENAI_RESPONSES_MODEL` environment variable For `mcp_github_pat.py`: - `GITHUB_PAT` - Your GitHub Personal Access Token (create at https://github.com/settings/tokens) diff --git a/python/samples/02-agents/middleware/README.md b/python/samples/02-agents/middleware/README.md index 754f96e815..5bd318575c 100644 --- a/python/samples/02-agents/middleware/README.md +++ b/python/samples/02-agents/middleware/README.md @@ -25,7 +25,7 @@ The new usage tracking sample uses `OpenAIResponsesClient`, so set the usual Ope ```bash export OPENAI_API_KEY="your-openai-api-key" -export OPENAI_RESPONSES_MODEL_ID="gpt-4.1-mini" +export OPENAI_RESPONSES_MODEL="gpt-4.1-mini" ``` Then run: diff --git a/python/samples/02-agents/observability/.env.example b/python/samples/02-agents/observability/.env.example index 11f0a07810..c1c24a5a72 100644 --- a/python/samples/02-agents/observability/.env.example +++ b/python/samples/02-agents/observability/.env.example @@ -40,8 +40,8 @@ ENABLE_SENSITIVE_DATA=true # OpenAI specific variables # ========================== OPENAI_API_KEY="..." -OPENAI_RESPONSES_MODEL_ID="gpt-4o-2024-08-06" -OPENAI_CHAT_MODEL_ID="gpt-4o-2024-08-06" +OPENAI_RESPONSES_MODEL="gpt-4o-2024-08-06" +OPENAI_CHAT_MODEL="gpt-4o-2024-08-06" # Azure AI Foundry specific variables # ==================================== diff --git a/python/samples/05-end-to-end/m365-agent/.env.example b/python/samples/05-end-to-end/m365-agent/.env.example index 3c21a9e91c..100c2bf69d 100644 --- a/python/samples/05-end-to-end/m365-agent/.env.example +++ b/python/samples/05-end-to-end/m365-agent/.env.example @@ -1,6 +1,6 @@ # OpenAI Configuration OPENAI_API_KEY= -OPENAI_CHAT_MODEL_ID= +OPENAI_CHAT_MODEL= # Agent 365 Agentic Authentication Configuration USE_ANONYMOUS_MODE= diff --git a/python/samples/05-end-to-end/m365-agent/README.md b/python/samples/05-end-to-end/m365-agent/README.md index ecd1e6f632..6962a53229 100644 --- a/python/samples/05-end-to-end/m365-agent/README.md +++ b/python/samples/05-end-to-end/m365-agent/README.md @@ -21,7 +21,7 @@ export USE_ANONYMOUS_MODE=True # set to false if using auth # OpenAI export OPENAI_API_KEY="..." -export OPENAI_CHAT_MODEL_ID="..." +export OPENAI_CHAT_MODEL="..." ``` ## Installing Dependencies From 589a5be1918930f6e3fcc1d9560f8705ca22acbb Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Fri, 27 Mar 2026 13:24:21 +0100 Subject: [PATCH 25/30] remove openai assistants int tests --- .../tests/openai/test_assistant_provider.py | 62 ------------------- 1 file changed, 62 deletions(-) diff --git a/python/packages/openai/tests/openai/test_assistant_provider.py b/python/packages/openai/tests/openai/test_assistant_provider.py index c05ea950a6..df811f6c37 100644 --- a/python/packages/openai/tests/openai/test_assistant_provider.py +++ b/python/packages/openai/tests/openai/test_assistant_provider.py @@ -1,6 +1,5 @@ # Copyright (c) Microsoft. All rights reserved. -import os from typing import Annotated, Any from unittest.mock import AsyncMock, MagicMock @@ -750,64 +749,3 @@ def test_merge_single_user_tool(self, mock_async_openai: MagicMock) -> None: # endregion - -# region Integration Tests - -skip_if_openai_integration_tests_disabled = pytest.mark.skipif( - os.getenv("OPENAI_API_KEY", "") in ("", "test-dummy-key"), - reason="No real OPENAI_API_KEY provided; skipping integration tests.", -) - - -@pytest.mark.flaky -@pytest.mark.integration -@skip_if_openai_integration_tests_disabled -class TestOpenAIAssistantProviderIntegration: - """Integration tests requiring real OpenAI API.""" - - async def test_create_and_run_agent(self) -> None: - """End-to-end test of creating and running an agent.""" - provider = OpenAIAssistantProvider() - - agent = await provider.create_agent( - name="IntegrationTestAgent", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a helpful assistant. Respond briefly.", - ) - - try: - result = await agent.run("Say 'hello' and nothing else.") - result_text = str(result) - assert "hello" in result_text.lower() - finally: - # Clean up the assistant - await provider._client.beta.assistants.delete(agent.id) # type: ignore[reportPrivateUsage, union-attr] - - async def test_create_agent_with_function_tools_integration(self) -> None: - """Integration test with function tools.""" - provider = OpenAIAssistantProvider() - - @tool(approval_mode="never_require") - def get_current_time() -> str: - """Get the current time.""" - from datetime import datetime - - return datetime.now().strftime("%H:%M") - - agent = await provider.create_agent( - name="TimeAgent", - model=os.environ.get("OPENAI_MODEL", "gpt-4"), - instructions="You are a helpful assistant.", - tools=[get_current_time], - ) - - try: - result = await agent.run("What time is it? Use the get_current_time function.") - result_text = str(result) - # The response should contain time information - assert ":" in result_text or "time" in result_text.lower() - finally: - await provider._client.beta.assistants.delete(agent.id) # type: ignore[reportPrivateUsage, union-attr] - - -# endregion From 8892dc56003f4c96a194dc2b18a4bdef6ec4be70 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Fri, 27 Mar 2026 13:59:14 +0100 Subject: [PATCH 26/30] improvements in int tests --- .github/workflows/python-merge-tests.yml | 2 +- .../test_azure_responses_client.py | 161 ++++++----------- .../tests/foundry/test_foundry_chat_client.py | 116 ++++++------ .../tests/openai/test_openai_chat_client.py | 165 +++++++----------- .../openai/test_openai_chat_client_azure.py | 33 ++-- .../test_openai_chat_completion_client.py | 116 ++++++------ ...est_openai_chat_completion_client_azure.py | 9 +- 7 files changed, 241 insertions(+), 361 deletions(-) diff --git a/.github/workflows/python-merge-tests.yml b/.github/workflows/python-merge-tests.yml index f90e193c26..06d2f794ea 100644 --- a/.github/workflows/python-merge-tests.yml +++ b/.github/workflows/python-merge-tests.yml @@ -402,7 +402,7 @@ jobs: env: AZURE_AI_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }} AZURE_AI_MODEL_DEPLOYMENT_NAME: ${{ vars.AZUREAI__DEPLOYMENTNAME }} - FOUNDRY_PROJECT_ENDPOINT: ${{ secrets.FOUNDRY_PROJECT_ENDPOINT }} + FOUNDRY_PROJECT_ENDPOINT: ${{ vars.FOUNDRY_PROJECT_ENDPOINT }} FOUNDRY_MODEL: ${{ vars.FOUNDRY_MODEL }} FOUNDRY_AGENT_NAME: ${{ vars.FOUNDRY_AGENT_NAME }} FOUNDRY_AGENT_VERSION: ${{ vars.FOUNDRY_AGENT_VERSION }} diff --git a/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client.py b/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client.py index 92951f820e..99bd2061b7 100644 --- a/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client.py +++ b/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client.py @@ -272,68 +272,54 @@ async def test_integration_options( # Need at least 2 iterations for tool_choice tests: one to get function call, one to get final response client.function_invocation_configuration["max_iterations"] = 2 - for streaming in [False, True]: - # Prepare test message - if option_name == "tools" or option_name == "tool_choice": - # Use weather-related prompt for tool tests - messages = [Message(role="user", text="What is the weather in Seattle?")] - elif option_name == "response_format": - # Use prompt that works well with structured output - messages = [ - Message(role="user", text="The weather in Seattle is sunny"), - Message(role="user", text="What is the weather in Seattle?"), - ] - else: - # Generic prompt for simple options - messages = [Message(role="user", text="Say 'Hello World' briefly.")] + # Prepare test message + if option_name == "tools" or option_name == "tool_choice": + # Use weather-related prompt for tool tests + messages = [Message(role="user", text="What is the weather in Seattle?")] + elif option_name == "response_format": + # Use prompt that works well with structured output + messages = [ + Message(role="user", text="The weather in Seattle is sunny"), + Message(role="user", text="What is the weather in Seattle?"), + ] + else: + # Generic prompt for simple options + messages = [Message(role="user", text="Say 'Hello World' briefly.")] - # Build options dict - options: dict[str, Any] = {option_name: option_value} + # Build options dict + options: dict[str, Any] = {option_name: option_value} - # Add tools if testing tool_choice to avoid errors - if option_name == "tool_choice": - options["tools"] = [get_weather] + # Add tools if testing tool_choice to avoid errors + if option_name == "tool_choice": + options["tools"] = [get_weather] - if streaming: - # Test streaming mode - response_stream = client.get_response( - messages=messages, - stream=True, - options=options, - ) + # Test streaming mode + response = await client.get_response(messages=messages, stream=True, options=options).get_final_response() - response = await response_stream.get_final_response() - else: - # Test non-streaming mode - response = await client.get_response( - messages=messages, - options=options, - ) + assert response is not None + assert isinstance(response, ChatResponse) + assert response.text is not None, f"No text in response for option '{option_name}'" + assert len(response.text) > 0, f"Empty response for option '{option_name}'" - assert response is not None - assert isinstance(response, ChatResponse) - assert response.text is not None, f"No text in response for option '{option_name}'" - assert len(response.text) > 0, f"Empty response for option '{option_name}'" - - # Validate based on option type - if needs_validation: - if option_name == "tools" or option_name == "tool_choice": - # Should have called the weather function - text = response.text.lower() - assert "sunny" in text or "seattle" in text, f"Tool not invoked for {option_name}" - elif option_name == "response_format": - if option_value == OutputStruct: - # Should have structured output - assert response.value is not None, "No structured output" - assert isinstance(response.value, OutputStruct) - assert "seattle" in response.value.location.lower() - else: - # Runtime JSON schema - assert response.value is None, "No structured output, can't parse any json." - response_value = json.loads(response.text) - assert isinstance(response_value, dict) - assert "location" in response_value - assert "seattle" in response_value["location"].lower() + # Validate based on option type + if needs_validation: + if option_name == "tools" or option_name == "tool_choice": + # Should have called the weather function + text = response.text.lower() + assert "sunny" in text or "seattle" in text, f"Tool not invoked for {option_name}" + elif option_name == "response_format": + if option_value == OutputStruct: + # Should have structured output + assert response.value is not None, "No structured output" + assert isinstance(response.value, OutputStruct) + assert "seattle" in response.value.location.lower() + else: + # Runtime JSON schema + assert response.value is None, "No structured output, can't parse any json." + response_value = json.loads(response.text) + assert isinstance(response_value, dict) + assert "location" in response_value + assert "seattle" in response_value["location"].lower() @pytest.mark.flaky @@ -342,53 +328,22 @@ async def test_integration_options( @_with_azure_openai_debug() async def test_integration_web_search() -> None: client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) + response = await client.get_response( + messages=[ + Message( + role="user", + text="What is the current weather? Do not ask for my current location.", + ) + ], + options={ + "tools": [ + AzureOpenAIResponsesClient.get_web_search_tool(user_location={"country": "US", "city": "Seattle"}) + ] + }, + stream=True, + ).get_final_response() - for streaming in [False, True]: - content = { - "messages": [ - Message( - role="user", - text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", - ) - ], - "options": { - "tool_choice": "auto", - "tools": [AzureOpenAIResponsesClient.get_web_search_tool()], - }, - "stream": streaming, - } - if streaming: - response = await client.get_response(**content).get_final_response() - else: - response = await client.get_response(**content) - - assert response is not None - assert isinstance(response, ChatResponse) - assert "Rumi" in response.text - assert "Mira" in response.text - assert "Zoey" in response.text - - # Test that the client will use the web search tool with location - content = { - "messages": [ - Message( - role="user", - text="What is the current weather? Do not ask for my current location.", - ) - ], - "options": { - "tool_choice": "auto", - "tools": [ - AzureOpenAIResponsesClient.get_web_search_tool(user_location={"country": "US", "city": "Seattle"}) - ], - }, - "stream": streaming, - } - if streaming: - response = await client.get_response(**content).get_final_response() - else: - response = await client.get_response(**content) - assert response.text is not None + assert response.text is not None @pytest.mark.flaky diff --git a/python/packages/foundry/tests/foundry/test_foundry_chat_client.py b/python/packages/foundry/tests/foundry/test_foundry_chat_client.py index 039dd8f877..7489be1896 100644 --- a/python/packages/foundry/tests/foundry/test_foundry_chat_client.py +++ b/python/packages/foundry/tests/foundry/test_foundry_chat_client.py @@ -593,42 +593,38 @@ async def test_integration_options( client = FoundryChatClient(credential=AzureCliCredential()) client.function_invocation_configuration["max_iterations"] = 2 - for streaming in [False, True]: + if option_name.startswith("tools") or option_name.startswith("tool_choice"): + messages = [Message(role="user", text="What is the weather in Seattle?")] + elif option_name.startswith("response_format"): + messages = [Message(role="user", text="The weather in Seattle is sunny")] + messages.append(Message(role="user", text="What is the weather in Seattle?")) + else: + messages = [Message(role="user", text="Say 'Hello World' briefly.")] + + options: dict[str, Any] = {option_name: option_value} + if option_name.startswith("tool_choice"): + options["tools"] = [get_weather] + + response = await client.get_response(messages=messages, options=options, stream=True).get_final_response() + + assert isinstance(response, ChatResponse) + assert response.text is not None + assert len(response.text) > 0 + + if needs_validation: if option_name.startswith("tools") or option_name.startswith("tool_choice"): - messages = [Message(role="user", text="What is the weather in Seattle?")] + text = response.text.lower() + assert "sunny" in text or "seattle" in text elif option_name.startswith("response_format"): - messages = [Message(role="user", text="The weather in Seattle is sunny")] - messages.append(Message(role="user", text="What is the weather in Seattle?")) - else: - messages = [Message(role="user", text="Say 'Hello World' briefly.")] - - options: dict[str, Any] = {option_name: option_value} - if option_name.startswith("tool_choice"): - options["tools"] = [get_weather] - - if streaming: - response = await client.get_response(messages=messages, options=options, stream=True).get_final_response() - else: - response = await client.get_response(messages=messages, options=options) - - assert isinstance(response, ChatResponse) - assert response.text is not None - assert len(response.text) > 0 - - if needs_validation: - if option_name.startswith("tools") or option_name.startswith("tool_choice"): - text = response.text.lower() - assert "sunny" in text or "seattle" in text - elif option_name.startswith("response_format"): - if option_value == OutputStruct: - assert response.value is not None - assert isinstance(response.value, OutputStruct) - assert "seattle" in response.value.location.lower() - else: - assert response.value is None - response_value = json.loads(response.text) - assert isinstance(response_value, dict) - assert "location" in response_value + if option_value == OutputStruct: + assert response.value is not None + assert isinstance(response.value, OutputStruct) + assert "seattle" in response.value.location.lower() + else: + assert response.value is None + response_value = json.loads(response.text) + assert isinstance(response_value, dict) + assert "location" in response_value @pytest.mark.flaky @@ -638,26 +634,22 @@ async def test_integration_options( async def test_integration_web_search() -> None: client = FoundryChatClient(credential=AzureCliCredential()) - for streaming in [False, True]: - web_search_tool = FoundryChatClient.get_web_search_tool() - content = { - "messages": [ - Message( - role="user", - text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", - ) - ], - "options": {"tool_choice": "auto", "tools": [web_search_tool]}, - } - if streaming: - response = await client.get_response(stream=True, **content).get_final_response() - else: - response = await client.get_response(**content) + web_search_tool = FoundryChatClient.get_web_search_tool() + content = { + "messages": [ + Message( + role="user", + text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", + ) + ], + "options": {"tool_choice": "auto", "tools": [web_search_tool]}, + } + response = await client.get_response(stream=True, **content).get_final_response() - assert isinstance(response, ChatResponse) - assert "Rumi" in response.text - assert "Mira" in response.text - assert "Zoey" in response.text + assert isinstance(response, ChatResponse) + assert "Rumi" in response.text + assert "Mira" in response.text + assert "Zoey" in response.text @pytest.mark.flaky @@ -675,19 +667,15 @@ def get_test_image() -> Content: client = FoundryChatClient(credential=AzureCliCredential()) client.function_invocation_configuration["max_iterations"] = 2 - for streaming in [False, True]: - messages = [Message(role="user", text="Call the get_test_image tool and describe what you see.")] - options: dict[str, Any] = {"tools": [get_test_image], "tool_choice": "auto"} + messages = [Message(role="user", text="Call the get_test_image tool and describe what you see.")] + options: dict[str, Any] = {"tools": [get_test_image], "tool_choice": "auto"} - if streaming: - response = await client.get_response(messages=messages, options=options, stream=True).get_final_response() - else: - response = await client.get_response(messages=messages, options=options) + response = await client.get_response(messages=messages, options=options, stream=True).get_final_response() - assert isinstance(response, ChatResponse) - assert response.text is not None - assert len(response.text) > 0 - assert "house" in response.text.lower(), f"Model did not describe the house image. Response: {response.text}" + assert isinstance(response, ChatResponse) + assert response.text is not None + assert len(response.text) > 0 + assert "house" in response.text.lower(), f"Model did not describe the house image. Response: {response.text}" def test_get_code_interpreter_tool() -> None: diff --git a/python/packages/openai/tests/openai/test_openai_chat_client.py b/python/packages/openai/tests/openai/test_openai_chat_client.py index 1f7a5fffce..3c09839594 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client.py @@ -3162,70 +3162,56 @@ async def test_integration_options( they don't cause failures. Options marked with needs_validation also check that the feature actually works correctly. """ - openai_responses_client = OpenAIChatClient() + client = OpenAIChatClient() # Need at least 2 iterations for tool_choice tests: one to get function call, one to get final response - openai_responses_client.function_invocation_configuration["max_iterations"] = 2 + client.function_invocation_configuration["max_iterations"] = 2 - for streaming in [False, True]: - # Prepare test message - if option_name.startswith("tools") or option_name.startswith("tool_choice"): - # Use weather-related prompt for tool tests - messages = [Message(role="user", text="What is the weather in Seattle?")] - elif option_name.startswith("response_format"): - # Use prompt that works well with structured output - messages = [Message(role="user", text="The weather in Seattle is sunny")] - messages.append(Message(role="user", text="What is the weather in Seattle?")) - else: - # Generic prompt for simple options - messages = [Message(role="user", text="Say 'Hello World' briefly.")] + # Prepare test message + if option_name.startswith("tools") or option_name.startswith("tool_choice"): + # Use weather-related prompt for tool tests + messages = [Message(role="user", text="What is the weather in Seattle?")] + elif option_name.startswith("response_format"): + # Use prompt that works well with structured output + messages = [Message(role="user", text="The weather in Seattle is sunny")] + messages.append(Message(role="user", text="What is the weather in Seattle?")) + else: + # Generic prompt for simple options + messages = [Message(role="user", text="Say 'Hello World' briefly.")] - # Build options dict - options: dict[str, Any] = {option_name: option_value} + # Build options dict + options: dict[str, Any] = {option_name: option_value} - # Add tools if testing tool_choice to avoid errors - if option_name.startswith("tool_choice"): - options["tools"] = [get_weather] + # Add tools if testing tool_choice to avoid errors + if option_name.startswith("tool_choice"): + options["tools"] = [get_weather] - if streaming: - # Test streaming mode - response_stream = openai_responses_client.get_response( - stream=True, - messages=messages, - options=options, - ) + # Test streaming mode + response = await client.get_response(stream=True, messages=messages, options=options).get_final_response() - response = await response_stream.get_final_response() - else: - # Test non-streaming mode - response = await openai_responses_client.get_response( - messages=messages, - options=options, - ) + assert response is not None + assert isinstance(response, ChatResponse) + assert response.text is not None, f"No text in response for option '{option_name}'" + assert len(response.text) > 0, f"Empty response for option '{option_name}'" - assert response is not None - assert isinstance(response, ChatResponse) - assert response.text is not None, f"No text in response for option '{option_name}'" - assert len(response.text) > 0, f"Empty response for option '{option_name}'" - - # Validate based on option type - if needs_validation: - if option_name.startswith("tools") or option_name.startswith("tool_choice"): - # Should have called the weather function - text = response.text.lower() - assert "sunny" in text or "seattle" in text, f"Tool not invoked for {option_name}" - elif option_name.startswith("response_format"): - if option_value == OutputStruct: - # Should have structured output - assert response.value is not None, "No structured output" - assert isinstance(response.value, OutputStruct) - assert "seattle" in response.value.location.lower() - else: - # Runtime JSON schema - assert response.value is None, "No structured output, can't parse any json." - response_value = json.loads(response.text) - assert isinstance(response_value, dict) - assert "location" in response_value - assert "seattle" in response_value["location"].lower() + # Validate based on option type + if needs_validation: + if option_name.startswith("tools") or option_name.startswith("tool_choice"): + # Should have called the weather function + text = response.text.lower() + assert "sunny" in text or "seattle" in text, f"Tool not invoked for {option_name}" + elif option_name.startswith("response_format"): + if option_value == OutputStruct: + # Should have structured output + assert response.value is not None, "No structured output" + assert isinstance(response.value, OutputStruct) + assert "seattle" in response.value.location.lower() + else: + # Runtime JSON schema + assert response.value is None, "No structured output, can't parse any json." + response_value = json.loads(response.text) + assert isinstance(response_value, dict) + assert "location" in response_value + assert "seattle" in response_value["location"].lower() @pytest.mark.timeout(300) @@ -3235,53 +3221,24 @@ async def test_integration_options( async def test_integration_web_search() -> None: client = OpenAIChatClient(model="gpt-5") - for streaming in [False, True]: - # Use static method for web search tool - web_search_tool = OpenAIChatClient.get_web_search_tool() - content = { - "messages": [ - Message( - role="user", - text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", - ) - ], - "options": { - "tool_choice": "auto", - "tools": [web_search_tool], - }, - } - if streaming: - response = await client.get_response(stream=True, **content).get_final_response() - else: - response = await client.get_response(**content) - - assert response is not None - assert isinstance(response, ChatResponse) - assert "Rumi" in response.text - assert "Mira" in response.text - assert "Zoey" in response.text - - # Test that the client will use the web search tool with location - web_search_tool_with_location = OpenAIChatClient.get_web_search_tool( - user_location={"country": "US", "city": "Seattle"}, - ) - content = { - "messages": [ - Message( - role="user", - text="What is the current weather? Do not ask for my current location.", - ) - ], - "options": { - "tool_choice": "auto", - "tools": [web_search_tool_with_location], - }, - } - if streaming: - response = await client.get_response(stream=True, **content).get_final_response() - else: - response = await client.get_response(**content) - assert response.text is not None + # Test that the client will use the web search tool with location + web_search_tool_with_location = OpenAIChatClient.get_web_search_tool( + user_location={"country": "US", "city": "Seattle"}, + ) + content = { + "messages": [ + Message( + role="user", + text="What is the current weather? Do not ask for my current location.", + ) + ], + "options": { + "tool_choice": "auto", + "tools": [web_search_tool_with_location], + }, + } + response = await client.get_response(stream=True, **content).get_final_response() + assert response.text is not None @pytest.mark.skip( diff --git a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py index 5f3e7a740b..918fe98767 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_client_azure.py @@ -342,25 +342,20 @@ async def test_integration_web_search() -> None: async with AzureCliCredential() as credential: client = OpenAIChatClient(credential=credential) - for streaming in [False, True]: - content = { - "messages": [ - Message( - role="user", - text="What is the current weather? Do not ask for my current location.", - ) - ], - "options": { - "tool_choice": "auto", - "tools": [OpenAIChatClient.get_web_search_tool(user_location={"country": "US", "city": "Seattle"})], - }, - "stream": streaming, - } - if streaming: - response = await client.get_response(**content).get_final_response() - else: - response = await client.get_response(**content) - assert response.text is not None + response = await client.get_response( + messages=[ + Message( + role="user", + text="What is the current weather? Do not ask for my current location.", + ) + ], + options={ + "tools": [OpenAIChatClient.get_web_search_tool(user_location={"country": "US", "city": "Seattle"})], + }, + stream=True, + ).get_final_response() + assert isinstance(response, ChatResponse) + assert response.text is not None @pytest.mark.flaky diff --git a/python/packages/openai/tests/openai/test_openai_chat_completion_client.py b/python/packages/openai/tests/openai/test_openai_chat_completion_client.py index 1a2d333c47..deee60ac7a 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_completion_client.py +++ b/python/packages/openai/tests/openai/test_openai_chat_completion_client.py @@ -1488,71 +1488,61 @@ async def test_integration_options( # Need at least 2 iterations for tool_choice tests: one to get function call, one to get final response client.function_invocation_configuration["max_iterations"] = 2 - for streaming in [False, True]: - # Prepare test message + # Prepare test message + if option_name.startswith("tools") or option_name.startswith("tool_choice"): + # Use weather-related prompt for tool tests + messages = [Message(role="user", text="What is the weather in Seattle?")] + elif option_name.startswith("response_format"): + # Use prompt that works well with structured output + messages = [Message(role="user", text="The weather in Seattle is sunny")] + messages.append(Message(role="user", text="What is the weather in Seattle?")) + else: + # Generic prompt for simple options + messages = [Message(role="user", text="Say 'Hello World' briefly.")] + + # Build options dict + options: dict[str, Any] = {option_name: option_value} + + # Add tools if testing tool_choice to avoid errors + if option_name.startswith("tool_choice"): + options["tools"] = [get_weather] + + # Test streaming mode + response = await client.get_response( + messages=messages, + stream=True, + options=options, + ).get_final_response() + + assert response is not None + assert isinstance(response, ChatResponse) + assert response.messages is not None + if not option_name.startswith("tool_choice") and ( + (isinstance(option_value, str) and option_value != "required") + or (isinstance(option_value, dict) and option_value.get("mode") != "required") + ): + assert response.text is not None, f"No text in response for option '{option_name}'" + assert len(response.text) > 0, f"Empty response for option '{option_name}'" + + # Validate based on option type + if needs_validation: if option_name.startswith("tools") or option_name.startswith("tool_choice"): - # Use weather-related prompt for tool tests - messages = [Message(role="user", text="What is the weather in Seattle?")] + # Should have called the weather function + text = response.text.lower() + assert "sunny" in text or "seattle" in text, f"Tool not invoked for {option_name}" elif option_name.startswith("response_format"): - # Use prompt that works well with structured output - messages = [Message(role="user", text="The weather in Seattle is sunny")] - messages.append(Message(role="user", text="What is the weather in Seattle?")) - else: - # Generic prompt for simple options - messages = [Message(role="user", text="Say 'Hello World' briefly.")] - - # Build options dict - options: dict[str, Any] = {option_name: option_value} - - # Add tools if testing tool_choice to avoid errors - if option_name.startswith("tool_choice"): - options["tools"] = [get_weather] - - if streaming: - # Test streaming mode - response_stream = client.get_response( - messages=messages, - stream=True, - options=options, - ) - - response = await response_stream.get_final_response() - else: - # Test non-streaming mode - response = await client.get_response( - messages=messages, - options=options, - ) - - assert response is not None - assert isinstance(response, ChatResponse) - assert response.messages is not None - if not option_name.startswith("tool_choice") and ( - (isinstance(option_value, str) and option_value != "required") - or (isinstance(option_value, dict) and option_value.get("mode") != "required") - ): - assert response.text is not None, f"No text in response for option '{option_name}'" - assert len(response.text) > 0, f"Empty response for option '{option_name}'" - - # Validate based on option type - if needs_validation: - if option_name.startswith("tools") or option_name.startswith("tool_choice"): - # Should have called the weather function - text = response.text.lower() - assert "sunny" in text or "seattle" in text, f"Tool not invoked for {option_name}" - elif option_name.startswith("response_format"): - if option_value == OutputStruct: - # Should have structured output - assert response.value is not None, "No structured output" - assert isinstance(response.value, OutputStruct) - assert "seattle" in response.value.location.lower() - else: - # Runtime JSON schema - assert response.value is None, "No structured output, can't parse any json." - response_value = json.loads(response.text) - assert isinstance(response_value, dict) - assert "location" in response_value - assert "seattle" in response_value["location"].lower() + if option_value == OutputStruct: + # Should have structured output + assert response.value is not None, "No structured output" + assert isinstance(response.value, OutputStruct) + assert "seattle" in response.value.location.lower() + else: + # Runtime JSON schema + assert response.value is None, "No structured output, can't parse any json." + response_value = json.loads(response.text) + assert isinstance(response_value, dict) + assert "location" in response_value + assert "seattle" in response_value["location"].lower() @pytest.mark.flaky diff --git a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py index 3e37606a65..f9edab227a 100644 --- a/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py +++ b/python/packages/openai/tests/openai/test_openai_chat_completion_client_azure.py @@ -321,15 +321,10 @@ async def test_azure_openai_chat_completion_client_agent_basic_run() -> None: async def test_azure_openai_chat_completion_client_agent_basic_run_streaming() -> None: async with ( AzureCliCredential() as credential, - Agent( - client=OpenAIChatCompletionClient(credential=credential), - ) as agent, + Agent(client=OpenAIChatCompletionClient(credential=credential)) as agent, ): full_text = "" - async for chunk in agent.run( - "Please respond with exactly: 'This is a streaming response test.'", - stream=True, - ): + async for chunk in agent.run("Please respond with exactly: 'This is a streaming response test.'", stream=True): assert isinstance(chunk, AgentResponseUpdate) if chunk.text: full_text += chunk.text From efb8da2b5f1ba6fdf0f32bcbdb86fce59378fc71 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Fri, 27 Mar 2026 13:59:43 +0100 Subject: [PATCH 27/30] fix env var --- .github/workflows/python-integration-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-integration-tests.yml b/.github/workflows/python-integration-tests.yml index 6066f4bab7..c26ce0ccbf 100644 --- a/.github/workflows/python-integration-tests.yml +++ b/.github/workflows/python-integration-tests.yml @@ -258,7 +258,7 @@ jobs: env: AZURE_AI_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }} AZURE_AI_MODEL_DEPLOYMENT_NAME: ${{ vars.AZUREAI__DEPLOYMENTNAME }} - FOUNDRY_PROJECT_ENDPOINT: ${{ secrets.FOUNDRY_PROJECT_ENDPOINT }} + FOUNDRY_PROJECT_ENDPOINT: ${{ vars.FOUNDRY_PROJECT_ENDPOINT }} FOUNDRY_MODEL: ${{ vars.FOUNDRY_MODEL }} FOUNDRY_AGENT_NAME: ${{ vars.FOUNDRY_AGENT_NAME }} FOUNDRY_AGENT_VERSION: ${{ vars.FOUNDRY_AGENT_VERSION }} From 3dd8679ee62ae7a98c47ad01e5405930f386d1ca Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Fri, 27 Mar 2026 14:00:47 +0100 Subject: [PATCH 28/30] fix env vars --- .github/workflows/python-integration-tests.yml | 4 ++-- .github/workflows/python-merge-tests.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/python-integration-tests.yml b/.github/workflows/python-integration-tests.yml index c26ce0ccbf..2d12425312 100644 --- a/.github/workflows/python-integration-tests.yml +++ b/.github/workflows/python-integration-tests.yml @@ -210,8 +210,8 @@ jobs: OPENAI_API_KEY: ${{ secrets.OPENAI__APIKEY }} AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }} AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} - FOUNDRY_MODEL: ${{ vars.AZUREAI__DEPLOYMENTNAME }} - FOUNDRY_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }} + FOUNDRY_PROJECT_ENDPOINT: ${{ vars.FOUNDRY_PROJECT_ENDPOINT }} + FOUNDRY_MODEL: ${{ vars.FOUNDRY_MODEL }} FUNCTIONS_WORKER_RUNTIME: "python" DURABLE_TASK_SCHEDULER_CONNECTION_STRING: "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None" AzureWebJobsStorage: "UseDevelopmentStorage=true" diff --git a/.github/workflows/python-merge-tests.yml b/.github/workflows/python-merge-tests.yml index 06d2f794ea..f32fceccb5 100644 --- a/.github/workflows/python-merge-tests.yml +++ b/.github/workflows/python-merge-tests.yml @@ -341,8 +341,8 @@ jobs: OPENAI_API_KEY: ${{ secrets.OPENAI__APIKEY }} AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }} AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }} - FOUNDRY_MODEL: ${{ vars.AZUREAI__DEPLOYMENTNAME }} - FOUNDRY_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }} + FOUNDRY_PROJECT_ENDPOINT: ${{ vars.FOUNDRY_PROJECT_ENDPOINT }} + FOUNDRY_MODEL: ${{ vars.FOUNDRY_MODEL }} FUNCTIONS_WORKER_RUNTIME: "python" DURABLE_TASK_SCHEDULER_CONNECTION_STRING: "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None" AzureWebJobsStorage: "UseDevelopmentStorage=true" From 391614a661f78f0c0163f74a3ef76c0d9617ec30 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Fri, 27 Mar 2026 14:23:03 +0100 Subject: [PATCH 29/30] fix azure responses test --- .../test_azure_responses_client_foundry.py | 53 +------------------ 1 file changed, 1 insertion(+), 52 deletions(-) diff --git a/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client_foundry.py b/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client_foundry.py index f44dae280b..bbf10e7b88 100644 --- a/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client_foundry.py +++ b/python/packages/azure-ai/tests/azure_openai/test_azure_responses_client_foundry.py @@ -1,11 +1,10 @@ # Copyright (c) Microsoft. All rights reserved. -import os import warnings from unittest.mock import MagicMock import pytest -from agent_framework import Agent, SupportsChatGetResponse, tool +from agent_framework import SupportsChatGetResponse warnings.filterwarnings( "ignore", @@ -18,11 +17,6 @@ pytestmark = pytest.mark.filterwarnings("ignore:AzureOpenAIResponsesClient is deprecated\\..*:DeprecationWarning") -skip_if_foundry_integration_tests_disabled = pytest.mark.skipif( - os.getenv("FOUNDRY_PROJECT_ENDPOINT", "") == "" or os.getenv("FOUNDRY_MODEL", "") == "", - reason="No real FOUNDRY_PROJECT_ENDPOINT or FOUNDRY_MODEL provided; skipping integration tests.", -) - def test_init_with_project_client(azure_openai_unit_test_env: dict[str, str]) -> None: """Test initialization with an existing AIProjectClient.""" @@ -135,48 +129,3 @@ def test_create_client_from_project_missing_credential() -> None: project_endpoint="https://test-project.services.ai.azure.com", credential=None, ) - - -@pytest.mark.flaky -@pytest.mark.integration -@skip_if_foundry_integration_tests_disabled -async def test_integration_function_call_roundtrip_preserves_fidelity() -> None: - """Test that function calls roundtrip correctly with full fidelity preserved.""" - call_count = 0 - - @tool(name="get_weather", approval_mode="never_require") - async def get_weather_tool(location: str) -> str: - """Get weather for a location.""" - nonlocal call_count - call_count += 1 - return f"Weather in {location} is sunny, 72F" - - client = AzureOpenAIResponsesClient( - project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], - deployment_name=os.environ["FOUNDRY_MODEL"], - credential=AzureCliCredential(), - ) - - async with Agent( - client=client, - name="WeatherAgent", - instructions="You help check weather. Use get_weather when asked about weather.", - tools=[get_weather_tool], - default_options={"store": False}, - ) as agent: - session = agent.create_session() - - response1 = await agent.run("What is the weather in Seattle?", session=session) - - assert response1 is not None - assert response1.text is not None - assert call_count >= 1 - - response_text = response1.text.lower() - assert "seattle" in response_text or "sunny" in response_text or "72" in response_text - - response2 = await agent.run("And how about in Portland?", session=session) - - assert response2 is not None - assert response2.text is not None - assert call_count >= 2 From 4afa7adc2a133e257e02f7b4d51d35b44e3a348a Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Fri, 27 Mar 2026 14:24:57 +0100 Subject: [PATCH 30/30] trigger actions