From 592f904c4c8fd3fa56367089d4adb08bc84991da Mon Sep 17 00:00:00 2001 From: Nan Yu Date: Tue, 27 Jan 2026 22:47:07 +0000 Subject: [PATCH] Check the python format for adk samples --- .github/workflows/python_samples_build.yml | 4 + samples/agent/adk/contact_lookup/__main__.py | 132 ++-- .../agent/adk/contact_lookup/a2ui_schema.py | 4 +- samples/agent/adk/contact_lookup/agent.py | 465 +++++++------- .../adk/contact_lookup/agent_executor.py | 322 +++++----- .../adk/contact_lookup/prompt_builder.py | 48 +- samples/agent/adk/contact_lookup/tools.py | 70 +- .../adk/contact_multiple_surfaces/__main__.py | 132 ++-- .../a2ui_examples.py | 143 ++-- .../contact_multiple_surfaces/a2ui_schema.py | 4 +- .../adk/contact_multiple_surfaces/agent.py | 608 +++++++++--------- .../agent_executor.py | 376 +++++------ .../prompt_builder.py | 51 +- .../adk/contact_multiple_surfaces/tools.py | 80 ++- samples/agent/adk/mcp/__main__.py | 2 +- samples/agent/adk/mcp/server.py | 249 +++---- samples/agent/adk/orchestrator/__main__.py | 79 +-- samples/agent/adk/orchestrator/agent.py | 374 ++++++----- .../agent/adk/orchestrator/agent_executor.py | 207 +++--- .../agent/adk/orchestrator/part_converters.py | 52 +- .../orchestrator/subagent_route_manager.py | 14 +- samples/agent/adk/pyproject.toml | 19 + .../agent/adk/restaurant_finder/__main__.py | 129 ++-- samples/agent/adk/restaurant_finder/agent.py | 474 +++++++------- .../adk/restaurant_finder/agent_executor.py | 302 ++++----- .../adk/restaurant_finder/prompt_builder.py | 58 +- samples/agent/adk/restaurant_finder/tools.py | 68 +- samples/agent/adk/rizzcharts/__main__.py | 151 ++--- samples/agent/adk/rizzcharts/agent.py | 232 +++---- .../agent/adk/rizzcharts/agent_executor.py | 268 ++++---- .../rizzcharts/component_catalog_builder.py | 126 ++-- samples/agent/adk/rizzcharts/tools.py | 154 ++--- samples/agent/adk/uv.lock | 103 ++- 33 files changed, 2888 insertions(+), 2612 deletions(-) diff --git a/.github/workflows/python_samples_build.yml b/.github/workflows/python_samples_build.yml index 172adeecc..d18b5b0db 100644 --- a/.github/workflows/python_samples_build.yml +++ b/.github/workflows/python_samples_build.yml @@ -45,6 +45,10 @@ jobs: python -m pip install --upgrade pip pip install uv + - name: Check Formatting + working-directory: samples/agent/adk + run: uv run pyink --check . + - name: Build contact_lookup working-directory: samples/agent/adk/contact_lookup run: uv build . diff --git a/samples/agent/adk/contact_lookup/__main__.py b/samples/agent/adk/contact_lookup/__main__.py index 713d4c312..13b76a9a1 100644 --- a/samples/agent/adk/contact_lookup/__main__.py +++ b/samples/agent/adk/contact_lookup/__main__.py @@ -34,77 +34,83 @@ class MissingAPIKeyError(Exception): - """Exception for missing API key.""" + """Exception for missing API key.""" @click.command() @click.option("--host", default="localhost") @click.option("--port", default=10003) def main(host, port): - try: - # Check for API key only if Vertex AI is not configured - if not os.getenv("GOOGLE_GENAI_USE_VERTEXAI") == "TRUE": - if not os.getenv("GEMINI_API_KEY"): - raise MissingAPIKeyError( - "GEMINI_API_KEY environment variable not set and GOOGLE_GENAI_USE_VERTEXAI is not TRUE." - ) - - capabilities = AgentCapabilities( - streaming=True, - extensions=[get_a2ui_agent_extension()], + try: + # Check for API key only if Vertex AI is not configured + if not os.getenv("GOOGLE_GENAI_USE_VERTEXAI") == "TRUE": + if not os.getenv("GEMINI_API_KEY"): + raise MissingAPIKeyError( + "GEMINI_API_KEY environment variable not set and GOOGLE_GENAI_USE_VERTEXAI" + " is not TRUE." ) - skill = AgentSkill( - id="find_contact", - name="Find Contact Tool", - description="Helps find contact information for colleagues (e.g., email, location, team).", - tags=["contact", "directory", "people", "finder"], - examples=["Who is David Chen in marketing?", "Find Sarah Lee from engineering"], - ) - - base_url = f"http://{host}:{port}" - - agent_card = AgentCard( - name="Contact Lookup Agent", - description="This agent helps find contact info for people in your organization.", - url=base_url, # <-- Use base_url here - version="1.0.0", - default_input_modes=ContactAgent.SUPPORTED_CONTENT_TYPES, - default_output_modes=ContactAgent.SUPPORTED_CONTENT_TYPES, - capabilities=capabilities, - skills=[skill], - ) - - agent_executor = ContactAgentExecutor(base_url=base_url) - - request_handler = DefaultRequestHandler( - agent_executor=agent_executor, - task_store=InMemoryTaskStore(), - ) - server = A2AStarletteApplication( - agent_card=agent_card, http_handler=request_handler - ) - import uvicorn - - app = server.build() - - app.add_middleware( - CORSMiddleware, - allow_origin_regex=r"http://localhost:\d+", - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], - ) - - app.mount("/static", StaticFiles(directory="images"), name="static") - uvicorn.run(app, host=host, port=port) - except MissingAPIKeyError as e: - logger.error(f"Error: {e}") - exit(1) - except Exception as e: - logger.error(f"An error occurred during server startup: {e}") - exit(1) + capabilities = AgentCapabilities( + streaming=True, + extensions=[get_a2ui_agent_extension()], + ) + skill = AgentSkill( + id="find_contact", + name="Find Contact Tool", + description=( + "Helps find contact information for colleagues (e.g., email, location," + " team)." + ), + tags=["contact", "directory", "people", "finder"], + examples=["Who is David Chen in marketing?", "Find Sarah Lee from engineering"], + ) + + base_url = f"http://{host}:{port}" + + agent_card = AgentCard( + name="Contact Lookup Agent", + description=( + "This agent helps find contact info for people in your organization." + ), + url=base_url, # <-- Use base_url here + version="1.0.0", + default_input_modes=ContactAgent.SUPPORTED_CONTENT_TYPES, + default_output_modes=ContactAgent.SUPPORTED_CONTENT_TYPES, + capabilities=capabilities, + skills=[skill], + ) + + agent_executor = ContactAgentExecutor(base_url=base_url) + + request_handler = DefaultRequestHandler( + agent_executor=agent_executor, + task_store=InMemoryTaskStore(), + ) + server = A2AStarletteApplication( + agent_card=agent_card, http_handler=request_handler + ) + import uvicorn + + app = server.build() + + app.add_middleware( + CORSMiddleware, + allow_origin_regex=r"http://localhost:\d+", + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + + app.mount("/static", StaticFiles(directory="images"), name="static") + + uvicorn.run(app, host=host, port=port) + except MissingAPIKeyError as e: + logger.error(f"Error: {e}") + exit(1) + except Exception as e: + logger.error(f"An error occurred during server startup: {e}") + exit(1) if __name__ == "__main__": - main() + main() diff --git a/samples/agent/adk/contact_lookup/a2ui_schema.py b/samples/agent/adk/contact_lookup/a2ui_schema.py index 4b6038fdc..52ee845e7 100644 --- a/samples/agent/adk/contact_lookup/a2ui_schema.py +++ b/samples/agent/adk/contact_lookup/a2ui_schema.py @@ -15,7 +15,7 @@ # a2ui_schema.py -A2UI_SCHEMA = r''' +A2UI_SCHEMA = r""" { "title": "A2UI Message Schema", "description": "Describes a JSON payload for an A2UI (Agent to UI) message, which is used to dynamically construct and update user interfaces. A message MUST contain exactly ONE of the action properties: 'beginRendering', 'surfaceUpdate', 'dataModelUpdate', or 'deleteSurface'.", @@ -785,4 +785,4 @@ } } } -''' +""" diff --git a/samples/agent/adk/contact_lookup/agent.py b/samples/agent/adk/contact_lookup/agent.py index d16b58a9e..9a09aaad0 100644 --- a/samples/agent/adk/contact_lookup/agent.py +++ b/samples/agent/adk/contact_lookup/agent.py @@ -31,7 +31,6 @@ from google.adk.sessions import InMemorySessionService from google.genai import types from prompt_builder import ( - get_text_prompt, get_ui_prompt, ) @@ -41,252 +40,250 @@ class ContactAgent: - """An agent that finds contact info for colleagues.""" - - SUPPORTED_CONTENT_TYPES = ["text", "text/plain"] - - def __init__(self, base_url: str, use_ui: bool = False): - self.base_url = base_url - self.use_ui = use_ui - self._agent = self._build_agent(use_ui) - self._user_id = "remote_agent" - self._runner = Runner( - app_name=self._agent.name, - agent=self._agent, - artifact_service=InMemoryArtifactService(), - session_service=InMemorySessionService(), - memory_service=InMemoryMemoryService(), + """An agent that finds contact info for colleagues.""" + + SUPPORTED_CONTENT_TYPES = ["text", "text/plain"] + + def __init__(self, base_url: str, use_ui: bool = False): + self.base_url = base_url + self.use_ui = use_ui + self._agent = self._build_agent(use_ui) + self._user_id = "remote_agent" + self._runner = Runner( + app_name=self._agent.name, + agent=self._agent, + artifact_service=InMemoryArtifactService(), + session_service=InMemorySessionService(), + memory_service=InMemoryMemoryService(), + ) + + # --- MODIFICATION: Wrap the schema --- + # Load the A2UI_SCHEMA string into a Python object for validation + try: + # First, load the schema for a *single message* + single_message_schema = json.loads(A2UI_SCHEMA) + + # The prompt instructs the LLM to return a *list* of messages. + # Therefore, our validation schema must be an *array* of the single message schema. + self.a2ui_schema_object = {"type": "array", "items": single_message_schema} + logger.info("A2UI_SCHEMA successfully loaded and wrapped in an array validator.") + except json.JSONDecodeError as e: + logger.error(f"CRITICAL: Failed to parse A2UI_SCHEMA: {e}") + self.a2ui_schema_object = None + # --- END MODIFICATION --- + + def get_processing_message(self) -> str: + return "Looking up contact information..." + + def _build_agent(self, use_ui: bool) -> LlmAgent: + """Builds the LLM agent for the contact agent.""" + LITELLM_MODEL = os.getenv("LITELLM_MODEL", "gemini/gemini-2.5-flash") + + if use_ui: + instruction = get_ui_prompt(self.base_url, CONTACT_UI_EXAMPLES) + else: + # The text prompt function also returns a complete prompt. + instruction = get_text_prompt() + + return LlmAgent( + model=LiteLlm(model=LITELLM_MODEL), + name="contact_agent", + description="An agent that finds colleague contact info.", + instruction=instruction, + tools=[get_contact_info], + ) + + async def stream(self, query, session_id) -> AsyncIterable[dict[str, Any]]: + session_state = {"base_url": self.base_url} + + session = await self._runner.session_service.get_session( + app_name=self._agent.name, + user_id=self._user_id, + session_id=session_id, + ) + if session is None: + session = await self._runner.session_service.create_session( + app_name=self._agent.name, + user_id=self._user_id, + state=session_state, + session_id=session_id, + ) + elif "base_url" not in session.state: + session.state["base_url"] = self.base_url + + # --- Begin: UI Validation and Retry Logic --- + max_retries = 1 # Total 2 attempts + attempt = 0 + current_query_text = query + + # Ensure schema was loaded + if self.use_ui and self.a2ui_schema_object is None: + logger.error( + "--- ContactAgent.stream: A2UI_SCHEMA is not loaded. " + "Cannot perform UI validation. ---" + ) + yield { + "is_task_complete": True, + "content": ( + "I'm sorry, I'm facing an internal configuration error with my UI" + " components. Please contact support." + ), + } + return + + while attempt <= max_retries: + attempt += 1 + logger.info( + f"--- ContactAgent.stream: Attempt {attempt}/{max_retries + 1} " + f"for session {session_id} ---" + ) + + current_message = types.Content( + role="user", parts=[types.Part.from_text(text=current_query_text)] + ) + final_response_content = None + + async for event in self._runner.run_async( + user_id=self._user_id, + session_id=session.id, + new_message=current_message, + ): + logger.info(f"Event from runner: {event}") + if event.is_final_response(): + if event.content and event.content.parts and event.content.parts[0].text: + final_response_content = "\n".join( + [p.text for p in event.content.parts if p.text] + ) + break # Got the final response, stop consuming events + else: + logger.info(f"Intermediate event: {event}") + # Yield intermediate updates on every attempt + yield { + "is_task_complete": False, + "updates": self.get_processing_message(), + } + + if final_response_content is None: + logger.warning( + "--- ContactAgent.stream: Received no final response content from runner " + f"(Attempt {attempt}). ---" + ) + if attempt <= max_retries: + current_query_text = ( + "I received no response. Please try again." + f"Please retry the original request: '{query}'" + ) + continue # Go to next retry + else: + # Retries exhausted on no-response + final_response_content = ( + "I'm sorry, I encountered an error and couldn't process your request." + ) + # Fall through to send this as a text-only error + + is_valid = False + error_message = "" + + if self.use_ui: + logger.info( + "--- ContactAgent.stream: Validating UI response (Attempt" + f" {attempt})... ---" ) - - # --- MODIFICATION: Wrap the schema --- - # Load the A2UI_SCHEMA string into a Python object for validation try: - # First, load the schema for a *single message* - single_message_schema = json.loads(A2UI_SCHEMA) + if "---a2ui_JSON---" not in final_response_content: + raise ValueError("Delimiter '---a2ui_JSON---' not found.") + + text_part, json_string = final_response_content.split("---a2ui_JSON---", 1) - # The prompt instructs the LLM to return a *list* of messages. - # Therefore, our validation schema must be an *array* of the single message schema. - self.a2ui_schema_object = {"type": "array", "items": single_message_schema} + # Handle the "no results found" case + json_string_cleaned = ( + json_string.strip().lstrip("```json").rstrip("```").strip() + ) + if not json_string.strip() or json_string_cleaned == "[]": logger.info( - "A2UI_SCHEMA successfully loaded and wrapped in an array validator." + "--- ContactAgent.stream: Empty JSON list found. Assuming valid (e.g.," + " 'no results'). ---" ) - except json.JSONDecodeError as e: - logger.error(f"CRITICAL: Failed to parse A2UI_SCHEMA: {e}") - self.a2ui_schema_object = None - # --- END MODIFICATION --- - - def get_processing_message(self) -> str: - return "Looking up contact information..." + is_valid = True - def _build_agent(self, use_ui: bool) -> LlmAgent: - """Builds the LLM agent for the contact agent.""" - LITELLM_MODEL = os.getenv("LITELLM_MODEL", "gemini/gemini-2.5-flash") - - if use_ui: - instruction = get_ui_prompt(self.base_url, CONTACT_UI_EXAMPLES) - else: - # The text prompt function also returns a complete prompt. - instruction = get_text_prompt() - - return LlmAgent( - model=LiteLlm(model=LITELLM_MODEL), - name="contact_agent", - description="An agent that finds colleague contact info.", - instruction=instruction, - tools=[get_contact_info], - ) + else: + if not json_string_cleaned: + raise ValueError("Cleaned JSON string is empty.") - async def stream(self, query, session_id) -> AsyncIterable[dict[str, Any]]: - session_state = {"base_url": self.base_url} + # --- New Validation Steps --- + # 1. Check if it's parsable JSON + parsed_json_data = json.loads(json_string_cleaned) - session = await self._runner.session_service.get_session( - app_name=self._agent.name, - user_id=self._user_id, - session_id=session_id, - ) - if session is None: - session = await self._runner.session_service.create_session( - app_name=self._agent.name, - user_id=self._user_id, - state=session_state, - session_id=session_id, - ) - elif "base_url" not in session.state: - session.state["base_url"] = self.base_url - - # --- Begin: UI Validation and Retry Logic --- - max_retries = 1 # Total 2 attempts - attempt = 0 - current_query_text = query - - # Ensure schema was loaded - if self.use_ui and self.a2ui_schema_object is None: - logger.error( - "--- ContactAgent.stream: A2UI_SCHEMA is not loaded. " - "Cannot perform UI validation. ---" - ) - yield { - "is_task_complete": True, - "content": ( - "I'm sorry, I'm facing an internal configuration error with my UI components. " - "Please contact support." - ), - } - return - - while attempt <= max_retries: - attempt += 1 + # 2. Check if it validates against the A2UI_SCHEMA + # This will raise jsonschema.exceptions.ValidationError if it fails logger.info( - f"--- ContactAgent.stream: Attempt {attempt}/{max_retries + 1} " - f"for session {session_id} ---" + "--- ContactAgent.stream: Validating against A2UI_SCHEMA... ---" + ) + jsonschema.validate( + instance=parsed_json_data, schema=self.a2ui_schema_object ) + # --- End New Validation Steps --- - current_message = types.Content( - role="user", parts=[types.Part.from_text(text=current_query_text)] + logger.info( + "--- ContactAgent.stream: UI JSON successfully parsed AND validated" + f" against schema. Validation OK (Attempt {attempt}). ---" ) - final_response_content = None - - async for event in self._runner.run_async( - user_id=self._user_id, - session_id=session.id, - new_message=current_message, - ): - logger.info(f"Event from runner: {event}") - if event.is_final_response(): - if ( - event.content - and event.content.parts - and event.content.parts[0].text - ): - final_response_content = "\n".join( - [p.text for p in event.content.parts if p.text] - ) - break # Got the final response, stop consuming events - else: - logger.info(f"Intermediate event: {event}") - # Yield intermediate updates on every attempt - yield { - "is_task_complete": False, - "updates": self.get_processing_message(), - } - - if final_response_content is None: - logger.warning( - f"--- ContactAgent.stream: Received no final response content from runner " - f"(Attempt {attempt}). ---" - ) - if attempt <= max_retries: - current_query_text = ( - "I received no response. Please try again." - f"Please retry the original request: '{query}'" - ) - continue # Go to next retry - else: - # Retries exhausted on no-response - final_response_content = "I'm sorry, I encountered an error and couldn't process your request." - # Fall through to send this as a text-only error - - is_valid = False - error_message = "" - - if self.use_ui: - logger.info( - f"--- ContactAgent.stream: Validating UI response (Attempt {attempt})... ---" - ) - try: - if "---a2ui_JSON---" not in final_response_content: - raise ValueError("Delimiter '---a2ui_JSON---' not found.") - - text_part, json_string = final_response_content.split( - "---a2ui_JSON---", 1 - ) - - # Handle the "no results found" case - json_string_cleaned = ( - json_string.strip().lstrip("```json").rstrip("```").strip() - ) - if not json_string.strip() or json_string_cleaned == "[]": - logger.info( - "--- ContactAgent.stream: Empty JSON list found. Assuming valid (e.g., 'no results'). ---" - ) - is_valid = True - - else: - if not json_string_cleaned: - raise ValueError("Cleaned JSON string is empty.") - - # --- New Validation Steps --- - # 1. Check if it's parsable JSON - parsed_json_data = json.loads(json_string_cleaned) - - # 2. Check if it validates against the A2UI_SCHEMA - # This will raise jsonschema.exceptions.ValidationError if it fails - logger.info( - "--- ContactAgent.stream: Validating against A2UI_SCHEMA... ---" - ) - jsonschema.validate( - instance=parsed_json_data, schema=self.a2ui_schema_object - ) - # --- End New Validation Steps --- - - logger.info( - f"--- ContactAgent.stream: UI JSON successfully parsed AND validated against schema. " - f"Validation OK (Attempt {attempt}). ---" - ) - is_valid = True - - except ( - ValueError, - json.JSONDecodeError, - jsonschema.exceptions.ValidationError, - ) as e: - logger.warning( - f"--- ContactAgent.stream: A2UI validation failed: {e} (Attempt {attempt}) ---" - ) - logger.warning( - f"--- Failed response content: {final_response_content[:500]}... ---" - ) - error_message = f"Validation failed: {e}." - - else: # Not using UI, so text is always "valid" - is_valid = True - - if is_valid: - logger.info( - f"--- ContactAgent.stream: Response is valid. Sending final response (Attempt {attempt}). ---" - ) - logger.info(f"Final response: {final_response_content}") - yield { - "is_task_complete": True, - "content": final_response_content, - } - return # We're done, exit the generator - - # --- If we're here, it means validation failed --- - - if attempt <= max_retries: - logger.warning( - f"--- ContactAgent.stream: Retrying... ({attempt}/{max_retries + 1}) ---" - ) - # Prepare the query for the retry - current_query_text = ( - f"Your previous response was invalid. {error_message} " - "You MUST generate a valid response that strictly follows the A2UI JSON SCHEMA. " - "The response MUST be a JSON list of A2UI messages. " - "Ensure the response is split by '---a2ui_JSON---' and the JSON part is well-formed. " - f"Please retry the original request: '{query}'" - ) - # Loop continues... - - # --- If we're here, it means we've exhausted retries --- - logger.error( - "--- ContactAgent.stream: Max retries exhausted. Sending text-only error. ---" + is_valid = True + + except ( + ValueError, + json.JSONDecodeError, + jsonschema.exceptions.ValidationError, + ) as e: + logger.warning( + f"--- ContactAgent.stream: A2UI validation failed: {e} (Attempt" + f" {attempt}) ---" + ) + logger.warning( + f"--- Failed response content: {final_response_content[:500]}... ---" + ) + error_message = f"Validation failed: {e}." + + else: # Not using UI, so text is always "valid" + is_valid = True + + if is_valid: + logger.info( + "--- ContactAgent.stream: Response is valid. Sending final response" + f" (Attempt {attempt}). ---" ) + logger.info(f"Final response: {final_response_content}") yield { "is_task_complete": True, - "content": ( - "I'm sorry, I'm having trouble generating the interface for that request right now. " - "Please try again in a moment." - ), + "content": final_response_content, } - # --- End: UI Validation and Retry Logic --- + return # We're done, exit the generator + + # --- If we're here, it means validation failed --- + + if attempt <= max_retries: + logger.warning( + f"--- ContactAgent.stream: Retrying... ({attempt}/{max_retries + 1}) ---" + ) + # Prepare the query for the retry + current_query_text = ( + f"Your previous response was invalid. {error_message} You MUST generate a" + " valid response that strictly follows the A2UI JSON SCHEMA. The response" + " MUST be a JSON list of A2UI messages. Ensure the response is split by" + " '---a2ui_JSON---' and the JSON part is well-formed. Please retry the" + f" original request: '{query}'" + ) + # Loop continues... + + # --- If we're here, it means we've exhausted retries --- + logger.error( + "--- ContactAgent.stream: Max retries exhausted. Sending text-only error. ---" + ) + yield { + "is_task_complete": True, + "content": ( + "I'm sorry, I'm having trouble generating the interface for that request" + " right now. Please try again in a moment." + ), + } + # --- End: UI Validation and Retry Logic --- diff --git a/samples/agent/adk/contact_lookup/agent_executor.py b/samples/agent/adk/contact_lookup/agent_executor.py index f32110e7e..c58bbce18 100644 --- a/samples/agent/adk/contact_lookup/agent_executor.py +++ b/samples/agent/adk/contact_lookup/agent_executor.py @@ -39,173 +39,169 @@ class ContactAgentExecutor(AgentExecutor): - """Contact AgentExecutor Example.""" - - def __init__(self, base_url: str): - # Instantiate two agents: one for UI and one for text-only. - # The appropriate one will be chosen at execution time. - self.ui_agent = ContactAgent(base_url=base_url, use_ui=True) - self.text_agent = ContactAgent(base_url=base_url, use_ui=False) - - async def execute( - self, - context: RequestContext, - event_queue: EventQueue, - ) -> None: - query = "" - ui_event_part = None - action = None - - logger.info( - f"--- Client requested extensions: {context.requested_extensions} ---" - ) - use_ui = try_activate_a2ui_extension(context) - - # Determine which agent to use based on whether the a2ui extension is active. - if use_ui: - agent = self.ui_agent - logger.info( - "--- AGENT_EXECUTOR: A2UI extension is active. Using UI agent. ---" - ) - else: - agent = self.text_agent - logger.info( - "--- AGENT_EXECUTOR: A2UI extension is not active. Using text agent. ---" - ) - - if context.message and context.message.parts: - logger.info( - f"--- AGENT_EXECUTOR: Processing {len(context.message.parts)} message parts ---" - ) - for i, part in enumerate(context.message.parts): - if isinstance(part.root, DataPart): - if "userAction" in part.root.data: - logger.info(f" Part {i}: Found a2ui UI ClientEvent payload.") - ui_event_part = part.root.data["userAction"] - else: - logger.info(f" Part {i}: DataPart (data: {part.root.data})") - elif isinstance(part.root, TextPart): - logger.info(f" Part {i}: TextPart (text: {part.root.text})") - else: - logger.info(f" Part {i}: Unknown part type ({type(part.root)})") - - if ui_event_part: - logger.info(f"Received a2ui ClientEvent: {ui_event_part}") - # Fix: Check both 'actionName' and 'name' - action = ui_event_part.get("name") - ctx = ui_event_part.get("context", {}) - - if action == "view_profile": - contact_name = ctx.get("contactName", "Unknown") - department = ctx.get("department", "") - query = f"WHO_IS: {contact_name} from {department}" - - elif action == "send_email": - contact_name = ctx.get("contactName", "Unknown") - email = ctx.get("email", "Unknown") - query = f"USER_WANTS_TO_EMAIL: {contact_name} at {email}" - - elif action == "send_message": - contact_name = ctx.get("contactName", "Unknown") - query = f"USER_WANTS_TO_MESSAGE: {contact_name}" - - elif action == "follow_contact": - query = "ACTION: follow_contact" - - elif action == "view_full_profile": - contact_name = ctx.get("contactName", "Unknown") - query = f"USER_WANTS_FULL_PROFILE: {contact_name}" - - else: - query = f"User submitted an event: {action} with data: {ctx}" + """Contact AgentExecutor Example.""" + + def __init__(self, base_url: str): + # Instantiate two agents: one for UI and one for text-only. + # The appropriate one will be chosen at execution time. + self.ui_agent = ContactAgent(base_url=base_url, use_ui=True) + self.text_agent = ContactAgent(base_url=base_url, use_ui=False) + + async def execute( + self, + context: RequestContext, + event_queue: EventQueue, + ) -> None: + query = "" + ui_event_part = None + action = None + + logger.info(f"--- Client requested extensions: {context.requested_extensions} ---") + use_ui = try_activate_a2ui_extension(context) + + # Determine which agent to use based on whether the a2ui extension is active. + if use_ui: + agent = self.ui_agent + logger.info("--- AGENT_EXECUTOR: A2UI extension is active. Using UI agent. ---") + else: + agent = self.text_agent + logger.info( + "--- AGENT_EXECUTOR: A2UI extension is not active. Using text agent. ---" + ) + + if context.message and context.message.parts: + logger.info( + f"--- AGENT_EXECUTOR: Processing {len(context.message.parts)} message" + " parts ---" + ) + for i, part in enumerate(context.message.parts): + if isinstance(part.root, DataPart): + if "userAction" in part.root.data: + logger.info(f" Part {i}: Found a2ui UI ClientEvent payload.") + ui_event_part = part.root.data["userAction"] + else: + logger.info(f" Part {i}: DataPart (data: {part.root.data})") + elif isinstance(part.root, TextPart): + logger.info(f" Part {i}: TextPart (text: {part.root.text})") else: - logger.info("No a2ui UI event part found. Falling back to text input.") - query = context.get_user_input() + logger.info(f" Part {i}: Unknown part type ({type(part.root)})") + + if ui_event_part: + logger.info(f"Received a2ui ClientEvent: {ui_event_part}") + # Fix: Check both 'actionName' and 'name' + action = ui_event_part.get("name") + ctx = ui_event_part.get("context", {}) + + if action == "view_profile": + contact_name = ctx.get("contactName", "Unknown") + department = ctx.get("department", "") + query = f"WHO_IS: {contact_name} from {department}" + + elif action == "send_email": + contact_name = ctx.get("contactName", "Unknown") + email = ctx.get("email", "Unknown") + query = f"USER_WANTS_TO_EMAIL: {contact_name} at {email}" + + elif action == "send_message": + contact_name = ctx.get("contactName", "Unknown") + query = f"USER_WANTS_TO_MESSAGE: {contact_name}" + + elif action == "follow_contact": + query = "ACTION: follow_contact" + + elif action == "view_full_profile": + contact_name = ctx.get("contactName", "Unknown") + query = f"USER_WANTS_FULL_PROFILE: {contact_name}" + + else: + query = f"User submitted an event: {action} with data: {ctx}" + else: + logger.info("No a2ui UI event part found. Falling back to text input.") + query = context.get_user_input() + + logger.info(f"--- AGENT_EXECUTOR: Final query for LLM: '{query}' ---") + + task = context.current_task + + if not task: + task = new_task(context.message) + await event_queue.enqueue_event(task) + updater = TaskUpdater(event_queue, task.id, task.context_id) + + async for item in agent.stream(query, task.context_id): + is_task_complete = item["is_task_complete"] + if not is_task_complete: + await updater.update_status( + TaskState.working, + new_agent_text_message(item["updates"], task.context_id, task.id), + ) + continue - logger.info(f"--- AGENT_EXECUTOR: Final query for LLM: '{query}' ---") + final_state = TaskState.input_required # Default + if action in ["send_email", "send_message", "view_full_profile"]: + final_state = TaskState.completed - task = context.current_task + content = item["content"] + final_parts = [] + if "---a2ui_JSON---" in content: + logger.info("Splitting final response into text and UI parts.") + text_content, json_string = content.split("---a2ui_JSON---", 1) - if not task: - task = new_task(context.message) - await event_queue.enqueue_event(task) - updater = TaskUpdater(event_queue, task.id, task.context_id) + if text_content.strip(): + final_parts.append(Part(root=TextPart(text=text_content.strip()))) - async for item in agent.stream(query, task.context_id): - is_task_complete = item["is_task_complete"] - if not is_task_complete: - await updater.update_status( - TaskState.working, - new_agent_text_message(item["updates"], task.context_id, task.id), - ) - continue - - final_state = TaskState.input_required # Default - if action in ["send_email", "send_message", "view_full_profile"]: - final_state = TaskState.completed - - content = item["content"] - final_parts = [] - if "---a2ui_JSON---" in content: - logger.info("Splitting final response into text and UI parts.") - text_content, json_string = content.split("---a2ui_JSON---", 1) - - if text_content.strip(): - final_parts.append(Part(root=TextPart(text=text_content.strip()))) - - if json_string.strip(): - try: - json_string_cleaned = ( - json_string.strip().lstrip("```json").rstrip("```").strip() - ) - - # Handle empty JSON list (e.g., no results) - if not json_string_cleaned or json_string_cleaned == "[]": - logger.info("Received empty/no JSON part. Skipping DataPart.") - else: - json_data = json.loads(json_string_cleaned) - if isinstance(json_data, list): - logger.info( - f"Found {len(json_data)} messages. Creating individual DataParts." - ) - for message in json_data: - final_parts.append(create_a2ui_part(message)) - - else: - # Handle the case where a single JSON object is returned - logger.info( - "Received a single JSON object. Creating a DataPart." - ) - final_parts.append(create_a2ui_part(json_data)) - - except json.JSONDecodeError as e: - logger.error(f"Failed to parse UI JSON: {e}") - final_parts.append(Part(root=TextPart(text=json_string))) - else: - final_parts.append(Part(root=TextPart(text=content.strip()))) - - # If after all that, we only have empty parts, add a default text response - if not final_parts or all(isinstance(p.root, TextPart) and not p.root.text for p in final_parts): - final_parts = [Part(root=TextPart(text="OK."))] - - - logger.info("--- FINAL PARTS TO BE SENT ---") - for i, part in enumerate(final_parts): - logger.info(f" - Part {i}: Type = {type(part.root)}") - if isinstance(part.root, TextPart): - logger.info(f" - Text: {part.root.text[:200]}...") - elif isinstance(part.root, DataPart): - logger.info(f" - Data: {str(part.root.data)[:200]}...") - logger.info("-----------------------------") - - await updater.update_status( - final_state, - new_agent_parts_message(final_parts, task.context_id, task.id), - final=(final_state == TaskState.completed), + if json_string.strip(): + try: + json_string_cleaned = ( + json_string.strip().lstrip("```json").rstrip("```").strip() ) - break - async def cancel( - self, request: RequestContext, event_queue: EventQueue - ) -> Task | None: - raise ServerError(error=UnsupportedOperationError()) + # Handle empty JSON list (e.g., no results) + if not json_string_cleaned or json_string_cleaned == "[]": + logger.info("Received empty/no JSON part. Skipping DataPart.") + else: + json_data = json.loads(json_string_cleaned) + if isinstance(json_data, list): + logger.info( + f"Found {len(json_data)} messages. Creating individual DataParts." + ) + for message in json_data: + final_parts.append(create_a2ui_part(message)) + + else: + # Handle the case where a single JSON object is returned + logger.info("Received a single JSON object. Creating a DataPart.") + final_parts.append(create_a2ui_part(json_data)) + + except json.JSONDecodeError as e: + logger.error(f"Failed to parse UI JSON: {e}") + final_parts.append(Part(root=TextPart(text=json_string))) + else: + final_parts.append(Part(root=TextPart(text=content.strip()))) + + # If after all that, we only have empty parts, add a default text response + if not final_parts or all( + isinstance(p.root, TextPart) and not p.root.text for p in final_parts + ): + final_parts = [Part(root=TextPart(text="OK."))] + + logger.info("--- FINAL PARTS TO BE SENT ---") + for i, part in enumerate(final_parts): + logger.info(f" - Part {i}: Type = {type(part.root)}") + if isinstance(part.root, TextPart): + logger.info(f" - Text: {part.root.text[:200]}...") + elif isinstance(part.root, DataPart): + logger.info(f" - Data: {str(part.root.data)[:200]}...") + logger.info("-----------------------------") + + await updater.update_status( + final_state, + new_agent_parts_message(final_parts, task.context_id, task.id), + final=(final_state == TaskState.completed), + ) + break + + async def cancel( + self, request: RequestContext, event_queue: EventQueue + ) -> Task | None: + raise ServerError(error=UnsupportedOperationError()) diff --git a/samples/agent/adk/contact_lookup/prompt_builder.py b/samples/agent/adk/contact_lookup/prompt_builder.py index 3fe269e65..af472be82 100644 --- a/samples/agent/adk/contact_lookup/prompt_builder.py +++ b/samples/agent/adk/contact_lookup/prompt_builder.py @@ -39,23 +39,23 @@ def get_ui_prompt(base_url: str, examples: str) -> str: - """ - Constructs the full prompt with UI instructions, rules, examples, and schema. + """ + Constructs the full prompt with UI instructions, rules, examples, and schema. - Args: - base_url: The base URL for resolving static assets like logos. - examples: A string containing the specific UI examples for the agent's task. + Args: + base_url: The base URL for resolving static assets like logos. + examples: A string containing the specific UI examples for the agent's task. - Returns: - A formatted string to be used as the system prompt for the LLM. - """ + Returns: + A formatted string to be used as the system prompt for the LLM. + """ - # --- THIS IS THE FIX --- - # We no longer call .format() on the examples, as it breaks the JSON. - formatted_examples = examples - # --- END FIX --- + # --- THIS IS THE FIX --- + # We no longer call .format() on the examples, as it breaks the JSON. + formatted_examples = examples + # --- END FIX --- - return f""" + return f""" You are a helpful contact lookup assistant. Your final output MUST be a a2ui UI JSON response. To generate the response, you MUST follow these rules: @@ -90,10 +90,10 @@ def get_ui_prompt(base_url: str, examples: str) -> str: def get_text_prompt() -> str: - """ - Constructs the prompt for a text-only agent. - """ - return """ + """ + Constructs the prompt for a text-only agent. + """ + return """ You are a helpful contact lookup assistant. Your final output MUST be a text response. To generate the response, you MUST follow these rules: @@ -109,10 +109,10 @@ def get_text_prompt() -> str: if __name__ == "__main__": - # Example of how to use the prompt builder - my_base_url = "http://localhost:8000" - contact_prompt = get_ui_prompt(my_base_url, CONTACT_UI_EXAMPLES) - print(contact_prompt) - with open("generated_prompt.txt", "w") as f: - f.write(contact_prompt) - print("\nGenerated prompt saved to generated_prompt.txt") + # Example of how to use the prompt builder + my_base_url = "http://localhost:8000" + contact_prompt = get_ui_prompt(my_base_url, CONTACT_UI_EXAMPLES) + print(contact_prompt) + with open("generated_prompt.txt", "w") as f: + f.write(contact_prompt) + print("\nGenerated prompt saved to generated_prompt.txt") diff --git a/samples/agent/adk/contact_lookup/tools.py b/samples/agent/adk/contact_lookup/tools.py index 56c9e15eb..3bf8989eb 100644 --- a/samples/agent/adk/contact_lookup/tools.py +++ b/samples/agent/adk/contact_lookup/tools.py @@ -22,47 +22,45 @@ def get_contact_info(name: str, tool_context: ToolContext, department: str = "") -> str: - """Call this tool to get a list of contacts based on a name and optional department. - 'name' is the person's name to search for. - 'department' is the optional department to filter by. - """ - logger.info("--- TOOL CALLED: get_contact_info ---") - logger.info(f" - Name: {name}") - logger.info(f" - Department: {department}") + """Call this tool to get a list of contacts based on a name and optional department. + 'name' is the person's name to search for. + 'department' is the optional department to filter by. + """ + logger.info("--- TOOL CALLED: get_contact_info ---") + logger.info(f" - Name: {name}") + logger.info(f" - Department: {department}") - results = [] - try: - script_dir = os.path.dirname(__file__) - file_path = os.path.join(script_dir, "contact_data.json") - with open(file_path) as f: - contact_data_str = f.read() - if base_url := tool_context.state.get("base_url"): - contact_data_str = contact_data_str.replace("http://localhost:10002", base_url) - logger.info(f'Updated base URL from tool context: {base_url}') - all_contacts = json.loads(contact_data_str) + results = [] + try: + script_dir = os.path.dirname(__file__) + file_path = os.path.join(script_dir, "contact_data.json") + with open(file_path) as f: + contact_data_str = f.read() + if base_url := tool_context.state.get("base_url"): + contact_data_str = contact_data_str.replace("http://localhost:10002", base_url) + logger.info(f"Updated base URL from tool context: {base_url}") + all_contacts = json.loads(contact_data_str) - name_lower = name.lower() + name_lower = name.lower() - dept_lower = department.lower() if department else "" + dept_lower = department.lower() if department else "" - # Filter by name - results = [ - contact for contact in all_contacts if name_lower in contact["name"].lower() - ] + # Filter by name + results = [ + contact for contact in all_contacts if name_lower in contact["name"].lower() + ] - # If department is provided, filter results further - if dept_lower: - results = [ - contact - for contact in results - if dept_lower in contact["department"].lower() - ] + # If department is provided, filter results further + if dept_lower: + results = [ + contact for contact in results if dept_lower in contact["department"].lower() + ] - logger.info(f" - Success: Found {len(results)} matching contacts.") + logger.info(f" - Success: Found {len(results)} matching contacts.") - except FileNotFoundError: - logger.error(f" - Error: contact_data.json not found at {file_path}") - except json.JSONDecodeError: - logger.error(f" - Error: Failed to decode JSON from {file_path}") + except FileNotFoundError: + logger.error(f" - Error: contact_data.json not found at {file_path}") + except json.JSONDecodeError: + logger.error(f" - Error: Failed to decode JSON from {file_path}") - return json.dumps(results) + return json.dumps(results) diff --git a/samples/agent/adk/contact_multiple_surfaces/__main__.py b/samples/agent/adk/contact_multiple_surfaces/__main__.py index 76289ba96..b2e73c900 100644 --- a/samples/agent/adk/contact_multiple_surfaces/__main__.py +++ b/samples/agent/adk/contact_multiple_surfaces/__main__.py @@ -34,77 +34,83 @@ class MissingAPIKeyError(Exception): - """Exception for missing API key.""" + """Exception for missing API key.""" @click.command() @click.option("--host", default="localhost") @click.option("--port", default=10004) def main(host, port): - try: - # Check for API key only if Vertex AI is not configured - if not os.getenv("GOOGLE_GENAI_USE_VERTEXAI") == "TRUE": - if not os.getenv("GEMINI_API_KEY"): - raise MissingAPIKeyError( - "GEMINI_API_KEY environment variable not set and GOOGLE_GENAI_USE_VERTEXAI is not TRUE." - ) - - capabilities = AgentCapabilities( - streaming=True, - extensions=[get_a2ui_agent_extension()], + try: + # Check for API key only if Vertex AI is not configured + if not os.getenv("GOOGLE_GENAI_USE_VERTEXAI") == "TRUE": + if not os.getenv("GEMINI_API_KEY"): + raise MissingAPIKeyError( + "GEMINI_API_KEY environment variable not set and GOOGLE_GENAI_USE_VERTEXAI" + " is not TRUE." ) - skill = AgentSkill( - id="find_contact", - name="Find Contact Tool", - description="Helps find contact information for colleagues (e.g., email, location, team).", - tags=["contact", "directory", "people", "finder"], - examples=["Who is David Chen in marketing?", "Find Sarah Lee from engineering"], - ) - - base_url = f"http://{host}:{port}" - - agent_card = AgentCard( - name="Contact Lookup Agent", - description="This agent helps find contact info for people in your organization.", - url=base_url, # <-- Use base_url here - version="1.0.0", - default_input_modes=ContactAgent.SUPPORTED_CONTENT_TYPES, - default_output_modes=ContactAgent.SUPPORTED_CONTENT_TYPES, - capabilities=capabilities, - skills=[skill], - ) - - agent_executor = ContactAgentExecutor(base_url=base_url) - - request_handler = DefaultRequestHandler( - agent_executor=agent_executor, - task_store=InMemoryTaskStore(), - ) - server = A2AStarletteApplication( - agent_card=agent_card, http_handler=request_handler - ) - import uvicorn - - app = server.build() - - app.add_middleware( - CORSMiddleware, - allow_origins=["http://localhost:5173"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], - ) - - app.mount("/static", StaticFiles(directory="images"), name="static") - uvicorn.run(app, host=host, port=port) - except MissingAPIKeyError as e: - logger.error(f"Error: {e}") - exit(1) - except Exception as e: - logger.error(f"An error occurred during server startup: {e}") - exit(1) + capabilities = AgentCapabilities( + streaming=True, + extensions=[get_a2ui_agent_extension()], + ) + skill = AgentSkill( + id="find_contact", + name="Find Contact Tool", + description=( + "Helps find contact information for colleagues (e.g., email, location," + " team)." + ), + tags=["contact", "directory", "people", "finder"], + examples=["Who is David Chen in marketing?", "Find Sarah Lee from engineering"], + ) + + base_url = f"http://{host}:{port}" + + agent_card = AgentCard( + name="Contact Lookup Agent", + description=( + "This agent helps find contact info for people in your organization." + ), + url=base_url, # <-- Use base_url here + version="1.0.0", + default_input_modes=ContactAgent.SUPPORTED_CONTENT_TYPES, + default_output_modes=ContactAgent.SUPPORTED_CONTENT_TYPES, + capabilities=capabilities, + skills=[skill], + ) + + agent_executor = ContactAgentExecutor(base_url=base_url) + + request_handler = DefaultRequestHandler( + agent_executor=agent_executor, + task_store=InMemoryTaskStore(), + ) + server = A2AStarletteApplication( + agent_card=agent_card, http_handler=request_handler + ) + import uvicorn + + app = server.build() + + app.add_middleware( + CORSMiddleware, + allow_origins=["http://localhost:5173"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + + app.mount("/static", StaticFiles(directory="images"), name="static") + + uvicorn.run(app, host=host, port=port) + except MissingAPIKeyError as e: + logger.error(f"Error: {e}") + exit(1) + except Exception as e: + logger.error(f"An error occurred during server startup: {e}") + exit(1) if __name__ == "__main__": - main() + main() diff --git a/samples/agent/adk/contact_multiple_surfaces/a2ui_examples.py b/samples/agent/adk/contact_multiple_surfaces/a2ui_examples.py index da2884d9c..07a064527 100644 --- a/samples/agent/adk/contact_multiple_surfaces/a2ui_examples.py +++ b/samples/agent/adk/contact_multiple_surfaces/a2ui_examples.py @@ -34,79 +34,84 @@ FLOOR_PLAN_FILE = "floor_plan.json" + def load_examples(base_url: str = "http://localhost:10004") -> str: - """ - Loads, validates, and formats the UI examples from JSON files. - - Args: - base_url: The base URL to replace placeholder URLs with. - (Currently examples have http://localhost:10004 hardcoded, - but we can make this dynamic if needed). - - Returns: - A string containing all formatted examples for the prompt. - """ - - # Pre-parse validator + """ + Loads, validates, and formats the UI examples from JSON files. + + Args: + base_url: The base URL to replace placeholder URLs with. + (Currently examples have http://localhost:10004 hardcoded, + but we can make this dynamic if needed). + + Returns: + A string containing all formatted examples for the prompt. + """ + + # Pre-parse validator + try: + single_msg_schema = json.loads(A2UI_SCHEMA) + # Examples are typically lists of messages + list_schema = {"type": "array", "items": single_msg_schema} + except json.JSONDecodeError: + logger.error("Failed to parse A2UI_SCHEMA for validation") + list_schema = None + + examples_dir = Path(os.path.dirname(__file__)) / "examples" + formatted_output = [] + + for curr_name, filename in EXAMPLE_FILES.items(): + file_path = examples_dir / filename try: - single_msg_schema = json.loads(A2UI_SCHEMA) - # Examples are typically lists of messages - list_schema = {"type": "array", "items": single_msg_schema} - except json.JSONDecodeError: - logger.error("Failed to parse A2UI_SCHEMA for validation") - list_schema = None - - examples_dir = Path(os.path.dirname(__file__)) / "examples" - formatted_output = [] - - for curr_name, filename in EXAMPLE_FILES.items(): - file_path = examples_dir / filename + content = file_path.read_text(encoding="utf-8") + + # basic replacement if we decide to template the URL in JSON files + # content = content.replace("{{BASE_URL}}", base_url) + + # Validation + if list_schema: try: - content = file_path.read_text(encoding="utf-8") - - # basic replacement if we decide to template the URL in JSON files - # content = content.replace("{{BASE_URL}}", base_url) - - # Validation - if list_schema: - try: - data = json.loads(content) - jsonschema.validate(instance=data, schema=list_schema) - except (json.JSONDecodeError, jsonschema.ValidationError) as e: - logger.warning(f"Example {filename} validation failed: {e}") - - formatted_output.append(f"---BEGIN {curr_name}---") - # Handle examples that include user/model text - if curr_name == "ORG_CHART_EXAMPLE": - formatted_output.append("User: Show me the org chart for Casey Smith") - formatted_output.append("Model: Here is the organizational chart.") - formatted_output.append("---a2ui_JSON---") - elif curr_name == "MULTI_SURFACE_EXAMPLE": - formatted_output.append("User: Full profile for Casey Smith") - formatted_output.append("Model: Here is the full profile including contact details and org chart.") - formatted_output.append("---a2ui_JSON---") - elif curr_name == "CHART_NODE_CLICK_EXAMPLE": - formatted_output.append('User: ACTION: chart_node_click (context: clickedNodeName="John Smith") (from modal)') - formatted_output.append("Model: Here is the profile for John Smith.") - formatted_output.append("---a2ui_JSON---") - - formatted_output.append(content.strip()) - formatted_output.append(f"---END {curr_name}---") - formatted_output.append("") # Newline - - except FileNotFoundError: - logger.error(f"Example file not found: {file_path}") - - return "\n".join(formatted_output) + data = json.loads(content) + jsonschema.validate(instance=data, schema=list_schema) + except (json.JSONDecodeError, jsonschema.ValidationError) as e: + logger.warning(f"Example {filename} validation failed: {e}") + + formatted_output.append(f"---BEGIN {curr_name}---") + # Handle examples that include user/model text + if curr_name == "ORG_CHART_EXAMPLE": + formatted_output.append("User: Show me the org chart for Casey Smith") + formatted_output.append("Model: Here is the organizational chart.") + formatted_output.append("---a2ui_JSON---") + elif curr_name == "MULTI_SURFACE_EXAMPLE": + formatted_output.append("User: Full profile for Casey Smith") + formatted_output.append( + "Model: Here is the full profile including contact details and org chart." + ) + formatted_output.append("---a2ui_JSON---") + elif curr_name == "CHART_NODE_CLICK_EXAMPLE": + formatted_output.append( + 'User: ACTION: chart_node_click (context: clickedNodeName="John Smith")' + " (from modal)" + ) + formatted_output.append("Model: Here is the profile for John Smith.") + formatted_output.append("---a2ui_JSON---") + + formatted_output.append(content.strip()) + formatted_output.append(f"---END {curr_name}---") + formatted_output.append("") # Newline -def load_floor_plan_example() -> str: - """Loads the floor plan example specifically.""" - examples_dir = Path(os.path.dirname(__file__)) / "examples" - file_path = examples_dir / FLOOR_PLAN_FILE - try: - return file_path.read_text(encoding="utf-8") except FileNotFoundError: - logger.error(f"Floor plan example not found: {file_path}") - return "[]" + logger.error(f"Example file not found: {file_path}") + + return "\n".join(formatted_output) +def load_floor_plan_example() -> str: + """Loads the floor plan example specifically.""" + examples_dir = Path(os.path.dirname(__file__)) / "examples" + file_path = examples_dir / FLOOR_PLAN_FILE + try: + return file_path.read_text(encoding="utf-8") + except FileNotFoundError: + logger.error(f"Floor plan example not found: {file_path}") + return "[]" diff --git a/samples/agent/adk/contact_multiple_surfaces/a2ui_schema.py b/samples/agent/adk/contact_multiple_surfaces/a2ui_schema.py index f4c776d80..29c72d1d8 100644 --- a/samples/agent/adk/contact_multiple_surfaces/a2ui_schema.py +++ b/samples/agent/adk/contact_multiple_surfaces/a2ui_schema.py @@ -15,7 +15,7 @@ # a2ui_schema.py -A2UI_SCHEMA = r''' +A2UI_SCHEMA = r""" { "title": "A2UI Message Schema", "description": "Describes a JSON payload for an A2UI (Agent to UI) message, which is used to dynamically construct and update user interfaces. A message MUST contain exactly ONE of the action properties: 'beginRendering', 'surfaceUpdate', 'dataModelUpdate', or 'deleteSurface'.", @@ -789,4 +789,4 @@ } } } -''' +""" diff --git a/samples/agent/adk/contact_multiple_surfaces/agent.py b/samples/agent/adk/contact_multiple_surfaces/agent.py index 29001eef0..91245a85b 100644 --- a/samples/agent/adk/contact_multiple_surfaces/agent.py +++ b/samples/agent/adk/contact_multiple_surfaces/agent.py @@ -31,7 +31,6 @@ from google.adk.sessions import InMemorySessionService from google.genai import types from prompt_builder import ( - get_text_prompt, get_ui_prompt, ) @@ -41,320 +40,327 @@ class ContactAgent: - """An agent that finds contact info for colleagues.""" - - SUPPORTED_CONTENT_TYPES = ["text", "text/plain"] - - def __init__(self, base_url: str, use_ui: bool = False): - self.base_url = base_url - self.use_ui = use_ui - self._agent = self._build_agent(use_ui) - self._user_id = "remote_agent" - self._runner = Runner( - app_name=self._agent.name, - agent=self._agent, - artifact_service=InMemoryArtifactService(), - session_service=InMemorySessionService(), - memory_service=InMemoryMemoryService(), - ) - - # Load A2UI_SCHEMA and wrap it in an array validator for list responses + """An agent that finds contact info for colleagues.""" + + SUPPORTED_CONTENT_TYPES = ["text", "text/plain"] + + def __init__(self, base_url: str, use_ui: bool = False): + self.base_url = base_url + self.use_ui = use_ui + self._agent = self._build_agent(use_ui) + self._user_id = "remote_agent" + self._runner = Runner( + app_name=self._agent.name, + agent=self._agent, + artifact_service=InMemoryArtifactService(), + session_service=InMemorySessionService(), + memory_service=InMemoryMemoryService(), + ) + + # Load A2UI_SCHEMA and wrap it in an array validator for list responses + try: + single_message_schema = json.loads(A2UI_SCHEMA) + self.a2ui_schema_object = {"type": "array", "items": single_message_schema} + logger.info("A2UI_SCHEMA successfully loaded and wrapped in an array validator.") + except json.JSONDecodeError as e: + logger.error(f"CRITICAL: Failed to parse A2UI_SCHEMA: {e}") + self.a2ui_schema_object = None + # --- END MODIFICATION --- + + def get_processing_message(self) -> str: + return "Looking up contact information..." + + def _build_agent(self, use_ui: bool) -> LlmAgent: + """Builds the LLM agent for the contact agent.""" + LITELLM_MODEL = os.getenv("LITELLM_MODEL", "gemini/gemini-2.5-flash") + + if use_ui: + examples = load_examples(self.base_url) + instruction = get_ui_prompt(self.base_url, examples) + else: + # The text prompt function also returns a complete prompt. + instruction = get_text_prompt() + + return LlmAgent( + model=LiteLlm(model=LITELLM_MODEL), + name="contact_agent", + description="An agent that finds colleague contact info.", + instruction=instruction, + tools=[get_contact_info], + ) + + async def stream(self, query, session_id) -> AsyncIterable[dict[str, Any]]: + session_state = {"base_url": self.base_url} + + session = await self._runner.session_service.get_session( + app_name=self._agent.name, + user_id=self._user_id, + session_id=session_id, + ) + if session is None: + session = await self._runner.session_service.create_session( + app_name=self._agent.name, + user_id=self._user_id, + state=session_state, + session_id=session_id, + ) + elif "base_url" not in session.state: + session.state["base_url"] = self.base_url + + # --- Begin: UI Validation and Retry Logic --- + max_retries = 1 # Total 2 attempts + attempt = 0 + current_query_text = query + + # Ensure schema was loaded + if self.use_ui and self.a2ui_schema_object is None: + logger.error( + "--- ContactAgent.stream: A2UI_SCHEMA is not loaded. " + "Cannot perform UI validation. ---" + ) + yield { + "is_task_complete": True, + "content": ( + "I'm sorry, I'm facing an internal configuration error with my UI" + " components. Please contact support." + ), + } + return + + while attempt <= max_retries: + attempt += 1 + logger.info( + f"--- ContactAgent.stream: Attempt {attempt}/{max_retries + 1} " + f"for session {session_id} ---" + ) + logger.info(f"--- ContactAgent.stream: Received query: '{query}' ---") + + # --- Check for User Action --- + # If the query looks like an action (starts with "ACTION:"), parsing it to see if it's send_message + + if query.startswith("ACTION:") and "send_message" in query: + logger.info("--- ContactAgent.stream: Detected send_message ACTION ---") + + # Load the action confirmation example dynamically try: - single_message_schema = json.loads(A2UI_SCHEMA) - self.a2ui_schema_object = {"type": "array", "items": single_message_schema} - logger.info( - "A2UI_SCHEMA successfully loaded and wrapped in an array validator." + from a2ui_examples import load_examples + # We might want to expose a specific loader for this, or just read the file here. + # Since we moved logic to a2ui_examples check if we can import the file constant or just read. + # Actually, a2ui_examples has EXAMPLE_FILES, let's just re-read using pathlib for simplicity or add a helper. + # But wait, load_examples returns the formatted string, including delimiters. + # Let's use the helper we added in a2ui_examples if possible, or just read the file. + # I didn't add a specific helper for action confirmation in a2ui_examples, but I can read the file. + pass + except ImportError: + pass + + # Re-implement logic to read from file + from pathlib import Path + + examples_dir = Path(__file__).parent / "examples" + action_file = examples_dir / "action_confirmation.json" + + if action_file.exists(): + json_content = action_file.read_text(encoding="utf-8").strip() + + # Extract contact name from query if present + contact_name = "Unknown" + if "(contact:" in query: + try: + contact_name = query.split("(contact:")[1].split(")")[0].strip() + except Exception: + pass + + # Inject contact name into the message + if contact_name != "Unknown": + json_content = json_content.replace( + "Your action has been processed.", f"Message sent to {contact_name}!" ) - except json.JSONDecodeError as e: - logger.error(f"CRITICAL: Failed to parse A2UI_SCHEMA: {e}") - self.a2ui_schema_object = None - # --- END MODIFICATION --- - def get_processing_message(self) -> str: - return "Looking up contact information..." - - def _build_agent(self, use_ui: bool) -> LlmAgent: - """Builds the LLM agent for the contact agent.""" - LITELLM_MODEL = os.getenv("LITELLM_MODEL", "gemini/gemini-2.5-flash") - - if use_ui: - examples = load_examples(self.base_url) - instruction = get_ui_prompt(self.base_url, examples) else: - # The text prompt function also returns a complete prompt. - instruction = get_text_prompt() - - return LlmAgent( - model=LiteLlm(model=LITELLM_MODEL), - name="contact_agent", - description="An agent that finds colleague contact info.", - instruction=instruction, - tools=[get_contact_info], + logger.error( + "Could not find ACTION_CONFIRMATION_EXAMPLE in CONTACT_UI_EXAMPLES" + ) + # Fallback to a minimal valid response to avoid crash + json_content = ( + '[{ "beginRendering": { "surfaceId": "action-modal", "root":' + ' "modal-wrapper" } }, { "surfaceUpdate": { "surfaceId": "action-modal",' + ' "components": [ { "id": "modal-wrapper", "component": { "Modal": {' + ' "entryPointChild": "hidden", "contentChild": "msg", "open": true } } },' + ' { "id": "hidden", "component": { "Text": { "text": {"literalString": "' + ' "} } } }, { "id": "msg", "component": { "Text": { "text":' + ' {"literalString": "Message Sent (Fallback)"} } } } ] } }]' + ) + + final_response_content = ( + f"Message sent to {contact_name}\n---a2ui_JSON---\n{json_content}" ) - async def stream(self, query, session_id) -> AsyncIterable[dict[str, Any]]: - session_state = {"base_url": self.base_url} + yield { + "is_task_complete": True, + "content": final_response_content, + } + return + + if query.startswith("ACTION:") and "view_location" in query: + logger.info("--- ContactAgent.stream: Detected view_location ACTION ---") + + # Use the predefined example floor plan + json_content = load_floor_plan_example().strip() + start_idx = json_content.find("[") + end_idx = json_content.rfind("]") + if start_idx != -1 and end_idx != -1: + json_content = json_content[start_idx : end_idx + 1] - session = await self._runner.session_service.get_session( - app_name=self._agent.name, - user_id=self._user_id, - session_id=session_id, + logger.info(f"--- ContactAgent.stream: Sending Floor Plan ---") + final_response_content = ( + f"Here is the floor plan.\n---a2ui_JSON---\n{json_content}" ) - if session is None: - session = await self._runner.session_service.create_session( - app_name=self._agent.name, - user_id=self._user_id, - state=session_state, - session_id=session_id, + yield {"is_task_complete": True, "content": final_response_content} + return + + current_message = types.Content( + role="user", parts=[types.Part.from_text(text=current_query_text)] + ) + final_response_content = None + + async for event in self._runner.run_async( + user_id=self._user_id, + session_id=session.id, + new_message=current_message, + ): + logger.info(f"Event from runner: {event}") + if event.is_final_response(): + if event.content and event.content.parts and event.content.parts[0].text: + final_response_content = "\n".join( + [p.text for p in event.content.parts if p.text] ) - elif "base_url" not in session.state: - session.state["base_url"] = self.base_url - - # --- Begin: UI Validation and Retry Logic --- - max_retries = 1 # Total 2 attempts - attempt = 0 - current_query_text = query - - # Ensure schema was loaded - if self.use_ui and self.a2ui_schema_object is None: - logger.error( - "--- ContactAgent.stream: A2UI_SCHEMA is not loaded. " - "Cannot perform UI validation. ---" + break # Got the final response, stop consuming events + else: + logger.info(f"Intermediate event: {event}") + # Yield intermediate updates on every attempt + yield { + "is_task_complete": False, + "updates": self.get_processing_message(), + } + + if final_response_content is None: + logger.warning( + "--- ContactAgent.stream: Received no final response content from runner " + f"(Attempt {attempt}). ---" + ) + if attempt <= max_retries: + current_query_text = ( + "I received no response. Please try again." + f"Please retry the original request: '{query}'" + ) + continue # Go to next retry + else: + # Retries exhausted on no-response + final_response_content = ( + "I'm sorry, I encountered an error and couldn't process your request." + ) + # Fall through to send this as a text-only error + + is_valid = False + error_message = "" + + if self.use_ui: + logger.info( + "--- ContactAgent.stream: Validating UI response (Attempt" + f" {attempt})... ---" + ) + try: + if "---a2ui_JSON---" not in final_response_content: + raise ValueError("Delimiter '---a2ui_JSON---' not found.") + + text_part, json_string = final_response_content.split("---a2ui_JSON---", 1) + + # Handle the "no results found" case + json_string_cleaned = ( + json_string.strip().lstrip("```json").rstrip("```").strip() + ) + if not json_string.strip() or json_string_cleaned == "[]": + logger.info( + "--- ContactAgent.stream: Empty JSON list found. Assuming valid (e.g.," + " 'no results'). ---" ) - yield { - "is_task_complete": True, - "content": ( - "I'm sorry, I'm facing an internal configuration error with my UI components. " - "Please contact support." - ), - } - return - - while attempt <= max_retries: - attempt += 1 + is_valid = True + + else: + if not json_string_cleaned: + raise ValueError("Cleaned JSON string is empty.") + + # Validate parsed JSON against A2UI_SCHEMA + parsed_json_data = json.loads(json_string_cleaned) logger.info( - f"--- ContactAgent.stream: Attempt {attempt}/{max_retries + 1} " - f"for session {session_id} ---" + "--- ContactAgent.stream: Validating against A2UI_SCHEMA... ---" ) - logger.info(f"--- ContactAgent.stream: Received query: '{query}' ---") - - - # --- Check for User Action --- - # If the query looks like an action (starts with "ACTION:"), parsing it to see if it's send_message - - if query.startswith("ACTION:") and "send_message" in query: - logger.info("--- ContactAgent.stream: Detected send_message ACTION ---") - - # Load the action confirmation example dynamically - try: - from a2ui_examples import load_examples - # We might want to expose a specific loader for this, or just read the file here. - # Since we moved logic to a2ui_examples check if we can import the file constant or just read. - # Actually, a2ui_examples has EXAMPLE_FILES, let's just re-read using pathlib for simplicity or add a helper. - # But wait, load_examples returns the formatted string, including delimiters. - # Let's use the helper we added in a2ui_examples if possible, or just read the file. - # I didn't add a specific helper for action confirmation in a2ui_examples, but I can read the file. - pass - except ImportError: - pass - - # Re-implement logic to read from file - from pathlib import Path - examples_dir = Path(__file__).parent / "examples" - action_file = examples_dir / "action_confirmation.json" - - if action_file.exists(): - json_content = action_file.read_text(encoding="utf-8").strip() - - # Extract contact name from query if present - contact_name = "Unknown" - if "(contact:" in query: - try: - contact_name = query.split("(contact:")[1].split(")")[0].strip() - except Exception: - pass - - # Inject contact name into the message - if contact_name != "Unknown": - json_content = json_content.replace( - "Your action has been processed.", - f"Message sent to {contact_name}!" - ) - - else: - logger.error("Could not find ACTION_CONFIRMATION_EXAMPLE in CONTACT_UI_EXAMPLES") - # Fallback to a minimal valid response to avoid crash - json_content = '[{ "beginRendering": { "surfaceId": "action-modal", "root": "modal-wrapper" } }, { "surfaceUpdate": { "surfaceId": "action-modal", "components": [ { "id": "modal-wrapper", "component": { "Modal": { "entryPointChild": "hidden", "contentChild": "msg", "open": true } } }, { "id": "hidden", "component": { "Text": { "text": {"literalString": " "} } } }, { "id": "msg", "component": { "Text": { "text": {"literalString": "Message Sent (Fallback)"} } } } ] } }]' - - final_response_content = f"Message sent to {contact_name}\n---a2ui_JSON---\n{json_content}" - - yield { - "is_task_complete": True, - "content": final_response_content, - } - return - - if query.startswith("ACTION:") and "view_location" in query: - logger.info("--- ContactAgent.stream: Detected view_location ACTION ---") - - # Use the predefined example floor plan - json_content = load_floor_plan_example().strip() - start_idx = json_content.find("[") - end_idx = json_content.rfind("]") - if start_idx != -1 and end_idx != -1: - json_content = json_content[start_idx:end_idx+1] - - logger.info(f"--- ContactAgent.stream: Sending Floor Plan ---") - final_response_content = f"Here is the floor plan.\n---a2ui_JSON---\n{json_content}" - yield { "is_task_complete": True, "content": final_response_content } - return - - - - - - current_message = types.Content( - role="user", parts=[types.Part.from_text(text=current_query_text)] + jsonschema.validate( + instance=parsed_json_data, schema=self.a2ui_schema_object ) - final_response_content = None - - async for event in self._runner.run_async( - user_id=self._user_id, - session_id=session.id, - new_message=current_message, - ): - logger.info(f"Event from runner: {event}") - if event.is_final_response(): - if ( - event.content - and event.content.parts - and event.content.parts[0].text - ): - final_response_content = "\n".join( - [p.text for p in event.content.parts if p.text] - ) - break # Got the final response, stop consuming events - else: - logger.info(f"Intermediate event: {event}") - # Yield intermediate updates on every attempt - yield { - "is_task_complete": False, - "updates": self.get_processing_message(), - } - - if final_response_content is None: - logger.warning( - f"--- ContactAgent.stream: Received no final response content from runner " - f"(Attempt {attempt}). ---" - ) - if attempt <= max_retries: - current_query_text = ( - "I received no response. Please try again." - f"Please retry the original request: '{query}'" - ) - continue # Go to next retry - else: - # Retries exhausted on no-response - final_response_content = "I'm sorry, I encountered an error and couldn't process your request." - # Fall through to send this as a text-only error - - is_valid = False - error_message = "" - - if self.use_ui: - logger.info( - f"--- ContactAgent.stream: Validating UI response (Attempt {attempt})... ---" - ) - try: - if "---a2ui_JSON---" not in final_response_content: - raise ValueError("Delimiter '---a2ui_JSON---' not found.") - - text_part, json_string = final_response_content.split( - "---a2ui_JSON---", 1 - ) - - # Handle the "no results found" case - json_string_cleaned = ( - json_string.strip().lstrip("```json").rstrip("```").strip() - ) - if not json_string.strip() or json_string_cleaned == "[]": - logger.info( - "--- ContactAgent.stream: Empty JSON list found. Assuming valid (e.g., 'no results'). ---" - ) - is_valid = True - - else: - if not json_string_cleaned: - raise ValueError("Cleaned JSON string is empty.") - - # Validate parsed JSON against A2UI_SCHEMA - parsed_json_data = json.loads(json_string_cleaned) - logger.info( - "--- ContactAgent.stream: Validating against A2UI_SCHEMA... ---" - ) - jsonschema.validate( - instance=parsed_json_data, schema=self.a2ui_schema_object - ) - # --- End New Validation Steps --- - - logger.info( - f"--- ContactAgent.stream: UI JSON successfully parsed AND validated against schema. " - f"Validation OK (Attempt {attempt}). ---" - ) - is_valid = True - - except ( - ValueError, - json.JSONDecodeError, - jsonschema.exceptions.ValidationError, - ) as e: - logger.warning( - f"--- ContactAgent.stream: A2UI validation failed: {e} (Attempt {attempt}) ---" - ) - logger.warning( - f"--- Failed response content: {final_response_content[:500]}... ---" - ) - error_message = f"Validation failed: {e}." - - else: # Not using UI, so text is always "valid" - is_valid = True - - if is_valid: - logger.info( - f"--- ContactAgent.stream: Response is valid. Sending final response (Attempt {attempt}). ---" - ) - logger.info(f"Final response: {final_response_content}") - yield { - "is_task_complete": True, - "content": final_response_content, - } - return # We're done, exit the generator - - # --- If we're here, it means validation failed --- - - if attempt <= max_retries: - logger.warning( - f"--- ContactAgent.stream: Retrying... ({attempt}/{max_retries + 1}) ---" - ) - # Prepare the query for the retry - current_query_text = ( - f"Your previous response was invalid. {error_message} " - "You MUST generate a valid response that strictly follows the A2UI JSON SCHEMA. " - "The response MUST be a JSON list of A2UI messages. " - "Ensure the response is split by '---a2ui_JSON---' and the JSON part is well-formed. " - f"Please retry the original request: '{query}'" - ) - # Loop continues... - - # --- If we're here, it means we've exhausted retries --- - logger.error( - "--- ContactAgent.stream: Max retries exhausted. Sending text-only error. ---" + # --- End New Validation Steps --- + + logger.info( + "--- ContactAgent.stream: UI JSON successfully parsed AND validated" + f" against schema. Validation OK (Attempt {attempt}). ---" + ) + is_valid = True + + except ( + ValueError, + json.JSONDecodeError, + jsonschema.exceptions.ValidationError, + ) as e: + logger.warning( + f"--- ContactAgent.stream: A2UI validation failed: {e} (Attempt" + f" {attempt}) ---" + ) + logger.warning( + f"--- Failed response content: {final_response_content[:500]}... ---" + ) + error_message = f"Validation failed: {e}." + + else: # Not using UI, so text is always "valid" + is_valid = True + + if is_valid: + logger.info( + "--- ContactAgent.stream: Response is valid. Sending final response" + f" (Attempt {attempt}). ---" ) + logger.info(f"Final response: {final_response_content}") yield { "is_task_complete": True, - "content": ( - "I'm sorry, I'm having trouble generating the interface for that request right now. " - "Please try again in a moment." - ), + "content": final_response_content, } - # --- End: UI Validation and Retry Logic --- + return # We're done, exit the generator + + # --- If we're here, it means validation failed --- + + if attempt <= max_retries: + logger.warning( + f"--- ContactAgent.stream: Retrying... ({attempt}/{max_retries + 1}) ---" + ) + # Prepare the query for the retry + current_query_text = ( + f"Your previous response was invalid. {error_message} You MUST generate a" + " valid response that strictly follows the A2UI JSON SCHEMA. The response" + " MUST be a JSON list of A2UI messages. Ensure the response is split by" + " '---a2ui_JSON---' and the JSON part is well-formed. Please retry the" + f" original request: '{query}'" + ) + # Loop continues... + + # --- If we're here, it means we've exhausted retries --- + logger.error( + "--- ContactAgent.stream: Max retries exhausted. Sending text-only error. ---" + ) + yield { + "is_task_complete": True, + "content": ( + "I'm sorry, I'm having trouble generating the interface for that request" + " right now. Please try again in a moment." + ), + } + # --- End: UI Validation and Retry Logic --- diff --git a/samples/agent/adk/contact_multiple_surfaces/agent_executor.py b/samples/agent/adk/contact_multiple_surfaces/agent_executor.py index 47e39fd40..55fd2ad85 100644 --- a/samples/agent/adk/contact_multiple_surfaces/agent_executor.py +++ b/samples/agent/adk/contact_multiple_surfaces/agent_executor.py @@ -40,197 +40,199 @@ class ContactAgentExecutor(AgentExecutor): - """Contact AgentExecutor Example.""" - - def __init__(self, base_url: str): - # Instantiate the UI agent. - self.ui_agent = ContactAgent(base_url=base_url, use_ui=True) - - async def execute( - self, - context: RequestContext, - event_queue: EventQueue, - ) -> None: - query = "" - ui_event_part = None - action = None - - logger.info( - f"--- Client requested extensions: {context.requested_extensions} ---" - ) - use_ui = try_activate_a2ui_extension(context) - - # Determine which agent to use based on whether the a2ui extension is active. - if use_ui: - agent = self.ui_agent - logger.info( - "--- AGENT_EXECUTOR: A2UI extension is active. Using UI agent. ---" - ) - else: - # Enforce A2UI extension as per review comment - error_msg = "A2UI extension is NOT active. This agent requires A2UI to function." - logger.error(f"--- AGENT_EXECUTOR: {error_msg} ---") - raise ServerError(error=UnsupportedOperationError(error_msg)) - - if context.message and context.message.parts: - logger.info( - f"--- AGENT_EXECUTOR: Processing {len(context.message.parts)} message parts ---" - ) - for i, part in enumerate(context.message.parts): - if isinstance(part.root, DataPart): - if "userAction" in part.root.data: - logger.info(f" Part {i}: Found a2ui UI ClientEvent payload.") - ui_event_part = part.root.data["userAction"] - elif "request" in part.root.data: - logger.info(f" Part {i}: Found 'request' in DataPart.") - query = part.root.data["request"] - - # Check for inline catalog - if "metadata" in part.root.data and "inlineCatalogs" in part.root.data["metadata"]: - logger.info(f" Part {i}: Found 'inlineCatalogs' in DataPart.") - inline_catalog = part.root.data["metadata"]["inlineCatalogs"] - catalog_json = json.dumps(inline_catalog) - # Append to query so the agent sees it (simple injection) - query += f"\n\n[SYSTEM: The client supports the following custom components: {catalog_json}]" - else: - logger.info(f" Part {i}: DataPart (data: {part.root.data})") - elif isinstance(part.root, TextPart): - logger.info(f" Part {i}: TextPart (text: {part.root.text})") - else: - logger.info(f" Part {i}: Unknown part type ({type(part.root)})") - - if ui_event_part: - logger.info(f"Received a2ui ClientEvent: {ui_event_part}") - action = ui_event_part.get("name") - ctx = ui_event_part.get("context", {}) - - if action == "view_profile": - contact_name = ctx.get("contactName", "Unknown") - department = ctx.get("department", "") - query = f"WHO_IS: {contact_name} from {department}" - - elif action == "send_email": - contact_name = ctx.get("contactName", "Unknown") - email = ctx.get("email", "Unknown") - query = f"USER_WANTS_TO_EMAIL: {contact_name} at {email}" - - elif action == "send_message": - contact_name = ctx.get("contactName", "Unknown") - query = f"ACTION: send_message (contact: {contact_name})" - - elif action == "view_full_profile": - contact_name = ctx.get("contactName", "Unknown") - query = f"USER_WANTS_FULL_PROFILE: {contact_name}" - - elif action == "view_location": - contact_id = ctx.get("contactId", "Unknown") - query = f"ACTION: view_location (contactId: {contact_id})" - - elif action == "select_desk": - contact_id = ctx.get("contactId", "Unknown") - query = f"ACTION: select_desk contactId:{contact_id}" - - elif action == "chart_node_click": - node_name = ctx.get("clickedNodeName", "Unknown") - source = ctx.get("source", "") - query = f"ACTION: chart_node_click (context: clickedNodeName=\"{node_name}\")" - if source == "modal": - query += " (from modal)" - - elif action == "dismiss_modal" or action == "close_modal": - query = "ACTION: close_modal" - - else: - query = f"User submitted an event: {action} with data: {ctx}" + """Contact AgentExecutor Example.""" + + def __init__(self, base_url: str): + # Instantiate the UI agent. + self.ui_agent = ContactAgent(base_url=base_url, use_ui=True) + + async def execute( + self, + context: RequestContext, + event_queue: EventQueue, + ) -> None: + query = "" + ui_event_part = None + action = None + + logger.info(f"--- Client requested extensions: {context.requested_extensions} ---") + use_ui = try_activate_a2ui_extension(context) + + # Determine which agent to use based on whether the a2ui extension is active. + if use_ui: + agent = self.ui_agent + logger.info("--- AGENT_EXECUTOR: A2UI extension is active. Using UI agent. ---") + else: + # Enforce A2UI extension as per review comment + error_msg = "A2UI extension is NOT active. This agent requires A2UI to function." + logger.error(f"--- AGENT_EXECUTOR: {error_msg} ---") + raise ServerError(error=UnsupportedOperationError(error_msg)) + + if context.message and context.message.parts: + logger.info( + f"--- AGENT_EXECUTOR: Processing {len(context.message.parts)} message" + " parts ---" + ) + for i, part in enumerate(context.message.parts): + if isinstance(part.root, DataPart): + if "userAction" in part.root.data: + logger.info(f" Part {i}: Found a2ui UI ClientEvent payload.") + ui_event_part = part.root.data["userAction"] + elif "request" in part.root.data: + logger.info(f" Part {i}: Found 'request' in DataPart.") + query = part.root.data["request"] + + # Check for inline catalog + if ( + "metadata" in part.root.data + and "inlineCatalogs" in part.root.data["metadata"] + ): + logger.info(f" Part {i}: Found 'inlineCatalogs' in DataPart.") + inline_catalog = part.root.data["metadata"]["inlineCatalogs"] + catalog_json = json.dumps(inline_catalog) + # Append to query so the agent sees it (simple injection) + query += ( + "\n\n[SYSTEM: The client supports the following custom components:" + f" {catalog_json}]" + ) + else: + logger.info(f" Part {i}: DataPart (data: {part.root.data})") + elif isinstance(part.root, TextPart): + logger.info(f" Part {i}: TextPart (text: {part.root.text})") else: - if not query: - logger.info("No a2ui UI event part found. Falling back to text input.") - query = context.get_user_input() + logger.info(f" Part {i}: Unknown part type ({type(part.root)})") + + if ui_event_part: + logger.info(f"Received a2ui ClientEvent: {ui_event_part}") + action = ui_event_part.get("name") + ctx = ui_event_part.get("context", {}) + + if action == "view_profile": + contact_name = ctx.get("contactName", "Unknown") + department = ctx.get("department", "") + query = f"WHO_IS: {contact_name} from {department}" + + elif action == "send_email": + contact_name = ctx.get("contactName", "Unknown") + email = ctx.get("email", "Unknown") + query = f"USER_WANTS_TO_EMAIL: {contact_name} at {email}" + + elif action == "send_message": + contact_name = ctx.get("contactName", "Unknown") + query = f"ACTION: send_message (contact: {contact_name})" + + elif action == "view_full_profile": + contact_name = ctx.get("contactName", "Unknown") + query = f"USER_WANTS_FULL_PROFILE: {contact_name}" + + elif action == "view_location": + contact_id = ctx.get("contactId", "Unknown") + query = f"ACTION: view_location (contactId: {contact_id})" + + elif action == "select_desk": + contact_id = ctx.get("contactId", "Unknown") + query = f"ACTION: select_desk contactId:{contact_id}" + + elif action == "chart_node_click": + node_name = ctx.get("clickedNodeName", "Unknown") + source = ctx.get("source", "") + query = f'ACTION: chart_node_click (context: clickedNodeName="{node_name}")' + if source == "modal": + query += " (from modal)" + + elif action == "dismiss_modal" or action == "close_modal": + query = "ACTION: close_modal" + + else: + query = f"User submitted an event: {action} with data: {ctx}" + else: + if not query: + logger.info("No a2ui UI event part found. Falling back to text input.") + query = context.get_user_input() + + logger.info(f"--- AGENT_EXECUTOR: Final query for LLM: '{query}' ---") + + task = context.current_task + + if not task: + task = new_task(context.message) + await event_queue.enqueue_event(task) + updater = TaskUpdater(event_queue, task.id, task.context_id) + + async for item in agent.stream(query, task.context_id): + is_task_complete = item["is_task_complete"] + if not is_task_complete: + await updater.update_status( + TaskState.working, + new_agent_text_message(item["updates"], task.context_id, task.id), + ) + continue - logger.info(f"--- AGENT_EXECUTOR: Final query for LLM: '{query}' ---") + final_state = TaskState.input_required # Default + if action in ["send_email", "send_message", "view_full_profile"]: + final_state = TaskState.completed - task = context.current_task + content = item["content"] + final_parts = [] + if "---a2ui_JSON---" in content: + logger.info("Splitting final response into text and UI parts.") + text_content, json_string = content.split("---a2ui_JSON---", 1) - if not task: - task = new_task(context.message) - await event_queue.enqueue_event(task) - updater = TaskUpdater(event_queue, task.id, task.context_id) + if text_content.strip(): + final_parts.append(Part(root=TextPart(text=text_content.strip()))) - async for item in agent.stream(query, task.context_id): - is_task_complete = item["is_task_complete"] - if not is_task_complete: - await updater.update_status( - TaskState.working, - new_agent_text_message(item["updates"], task.context_id, task.id), - ) - continue - - final_state = TaskState.input_required # Default - if action in ["send_email", "send_message", "view_full_profile"]: - final_state = TaskState.completed - - content = item["content"] - final_parts = [] - if "---a2ui_JSON---" in content: - logger.info("Splitting final response into text and UI parts.") - text_content, json_string = content.split("---a2ui_JSON---", 1) - - if text_content.strip(): - final_parts.append(Part(root=TextPart(text=text_content.strip()))) - - if json_string.strip(): - try: - json_string_cleaned = ( - json_string.strip().lstrip("```json").rstrip("```").strip() - ) - - # Handle empty JSON list (e.g., no results) - if not json_string_cleaned or json_string_cleaned == "[]": - logger.info("Received empty/no JSON part. Skipping DataPart.") - else: - json_data = json.loads(json_string_cleaned) - if isinstance(json_data, list): - logger.info( - f"Found {len(json_data)} messages. Creating individual DataParts." - ) - for message in json_data: - final_parts.append(create_a2ui_part(message)) - - else: - # Handle the case where a single JSON object is returned - logger.info( - "Received a single JSON object. Creating a DataPart." - ) - final_parts.append(create_a2ui_part(json_data)) - - except json.JSONDecodeError as e: - logger.error(f"Failed to parse UI JSON: {e}") - final_parts.append(Part(root=TextPart(text=json_string))) - else: - final_parts.append(Part(root=TextPart(text=content.strip()))) - - # If after all that, we only have empty parts, add a default text response - if not final_parts or all(isinstance(p.root, TextPart) and not p.root.text for p in final_parts): - final_parts = [Part(root=TextPart(text="OK."))] - - - logger.info("--- FINAL PARTS TO BE SENT ---") - for i, part in enumerate(final_parts): - logger.info(f" - Part {i}: Type = {type(part.root)}") - if isinstance(part.root, TextPart): - logger.info(f" - Text: {part.root.text[:200]}...") - elif isinstance(part.root, DataPart): - logger.info(f" - Data: {str(part.root.data)[:200]}...") - logger.info("-----------------------------") - - await updater.update_status( - final_state, - new_agent_parts_message(final_parts, task.context_id, task.id), - final=(final_state == TaskState.completed), + if json_string.strip(): + try: + json_string_cleaned = ( + json_string.strip().lstrip("```json").rstrip("```").strip() ) - break - async def cancel( - self, request: RequestContext, event_queue: EventQueue - ) -> Task | None: - raise ServerError(error=UnsupportedOperationError()) + # Handle empty JSON list (e.g., no results) + if not json_string_cleaned or json_string_cleaned == "[]": + logger.info("Received empty/no JSON part. Skipping DataPart.") + else: + json_data = json.loads(json_string_cleaned) + if isinstance(json_data, list): + logger.info( + f"Found {len(json_data)} messages. Creating individual DataParts." + ) + for message in json_data: + final_parts.append(create_a2ui_part(message)) + + else: + # Handle the case where a single JSON object is returned + logger.info("Received a single JSON object. Creating a DataPart.") + final_parts.append(create_a2ui_part(json_data)) + + except json.JSONDecodeError as e: + logger.error(f"Failed to parse UI JSON: {e}") + final_parts.append(Part(root=TextPart(text=json_string))) + else: + final_parts.append(Part(root=TextPart(text=content.strip()))) + + # If after all that, we only have empty parts, add a default text response + if not final_parts or all( + isinstance(p.root, TextPart) and not p.root.text for p in final_parts + ): + final_parts = [Part(root=TextPart(text="OK."))] + + logger.info("--- FINAL PARTS TO BE SENT ---") + for i, part in enumerate(final_parts): + logger.info(f" - Part {i}: Type = {type(part.root)}") + if isinstance(part.root, TextPart): + logger.info(f" - Text: {part.root.text[:200]}...") + elif isinstance(part.root, DataPart): + logger.info(f" - Data: {str(part.root.data)[:200]}...") + logger.info("-----------------------------") + + await updater.update_status( + final_state, + new_agent_parts_message(final_parts, task.context_id, task.id), + final=(final_state == TaskState.completed), + ) + break + + async def cancel( + self, request: RequestContext, event_queue: EventQueue + ) -> Task | None: + raise ServerError(error=UnsupportedOperationError()) diff --git a/samples/agent/adk/contact_multiple_surfaces/prompt_builder.py b/samples/agent/adk/contact_multiple_surfaces/prompt_builder.py index c1b8be5f6..1914e13ed 100644 --- a/samples/agent/adk/contact_multiple_surfaces/prompt_builder.py +++ b/samples/agent/adk/contact_multiple_surfaces/prompt_builder.py @@ -38,23 +38,23 @@ def get_ui_prompt(base_url: str, examples: str) -> str: - """ - Constructs the full prompt with UI instructions, rules, examples, and schema. + """ + Constructs the full prompt with UI instructions, rules, examples, and schema. - Args: - base_url: The base URL for resolving static assets like logos. - examples: A string containing the specific UI examples for the agent's task. + Args: + base_url: The base URL for resolving static assets like logos. + examples: A string containing the specific UI examples for the agent's task. - Returns: - A formatted string to be used as the system prompt for the LLM. - """ + Returns: + A formatted string to be used as the system prompt for the LLM. + """ - # --- THIS IS THE FIX --- - # We no longer call .format() on the examples, as it breaks the JSON. - formatted_examples = examples - # --- END FIX --- + # --- THIS IS THE FIX --- + # We no longer call .format() on the examples, as it breaks the JSON. + formatted_examples = examples + # --- END FIX --- - return f""" + return f""" {AGENT_INSTRUCTION} You are a helpful contact lookup assistant. Your final output MUST be a a2ui UI JSON response. @@ -91,10 +91,10 @@ def get_ui_prompt(base_url: str, examples: str) -> str: def get_text_prompt() -> str: - """ - Constructs the prompt for a text-only agent. - """ - return """ + """ + Constructs the prompt for a text-only agent. + """ + return """ You are a helpful contact lookup assistant. Your final output MUST be a text response. To generate the response, you MUST follow these rules: @@ -110,11 +110,12 @@ def get_text_prompt() -> str: if __name__ == "__main__": - # Example of how to use the prompt builder - my_base_url = "http://localhost:8000" - from a2ui_examples import load_examples - contact_prompt = get_ui_prompt(my_base_url, load_examples(my_base_url)) - print(contact_prompt) - with open("generated_prompt.txt", "w") as f: - f.write(contact_prompt) - print("\nGenerated prompt saved to generated_prompt.txt") + # Example of how to use the prompt builder + my_base_url = "http://localhost:8000" + from a2ui_examples import load_examples + + contact_prompt = get_ui_prompt(my_base_url, load_examples(my_base_url)) + print(contact_prompt) + with open("generated_prompt.txt", "w") as f: + f.write(contact_prompt) + print("\nGenerated prompt saved to generated_prompt.txt") diff --git a/samples/agent/adk/contact_multiple_surfaces/tools.py b/samples/agent/adk/contact_multiple_surfaces/tools.py index 0b679c486..2a0c845f4 100644 --- a/samples/agent/adk/contact_multiple_surfaces/tools.py +++ b/samples/agent/adk/contact_multiple_surfaces/tools.py @@ -22,44 +22,42 @@ def get_contact_info(name: str, tool_context: ToolContext, department: str = "") -> str: - """Call this tool to get a list of contacts based on a name and optional department. - 'name' is the person's name to search for. - 'department' is the optional department to filter by. - """ - logger.info("--- TOOL CALLED: get_contact_info ---") - logger.info(f" - Name: {name}") - logger.info(f" - Department: {department}") - - results = [] - try: - script_dir = os.path.dirname(__file__) - file_path = os.path.join(script_dir, "contact_data.json") - with open(file_path) as f: - contact_data_str = f.read() - all_contacts = json.loads(contact_data_str) - - name_lower = name.lower() - - dept_lower = department.lower() if department else "" - - # Filter by name - results = [ - contact for contact in all_contacts if name_lower in contact["name"].lower() - ] - - # If department is provided, filter results further - if dept_lower: - results = [ - contact - for contact in results - if dept_lower in contact["department"].lower() - ] - - logger.info(f" - Success: Found {len(results)} matching contacts.") - - except FileNotFoundError: - logger.error(f" - Error: contact_data.json not found at {file_path}") - except json.JSONDecodeError: - logger.error(f" - Error: Failed to decode JSON from {file_path}") - - return json.dumps(results) + """Call this tool to get a list of contacts based on a name and optional department. + 'name' is the person's name to search for. + 'department' is the optional department to filter by. + """ + logger.info("--- TOOL CALLED: get_contact_info ---") + logger.info(f" - Name: {name}") + logger.info(f" - Department: {department}") + + results = [] + try: + script_dir = os.path.dirname(__file__) + file_path = os.path.join(script_dir, "contact_data.json") + with open(file_path) as f: + contact_data_str = f.read() + all_contacts = json.loads(contact_data_str) + + name_lower = name.lower() + + dept_lower = department.lower() if department else "" + + # Filter by name + results = [ + contact for contact in all_contacts if name_lower in contact["name"].lower() + ] + + # If department is provided, filter results further + if dept_lower: + results = [ + contact for contact in results if dept_lower in contact["department"].lower() + ] + + logger.info(f" - Success: Found {len(results)} matching contacts.") + + except FileNotFoundError: + logger.error(f" - Error: contact_data.json not found at {file_path}") + except json.JSONDecodeError: + logger.error(f" - Error: Failed to decode JSON from {file_path}") + + return json.dumps(results) diff --git a/samples/agent/adk/mcp/__main__.py b/samples/agent/adk/mcp/__main__.py index d6931ceaf..4efdd53e1 100644 --- a/samples/agent/adk/mcp/__main__.py +++ b/samples/agent/adk/mcp/__main__.py @@ -16,4 +16,4 @@ from server import main -sys.exit(main()) # type: ignore[call-arg] \ No newline at end of file +sys.exit(main()) # type: ignore[call-arg] diff --git a/samples/agent/adk/mcp/server.py b/samples/agent/adk/mcp/server.py index 464df4dea..19a9e6af3 100644 --- a/samples/agent/adk/mcp/server.py +++ b/samples/agent/adk/mcp/server.py @@ -26,29 +26,33 @@ def load_a2ui_schema() -> dict[str, Any]: - current_dir = pathlib.Path(__file__).resolve().parent - spec_root = current_dir / "../../../../specification/v0_8/json" + current_dir = pathlib.Path(__file__).resolve().parent + spec_root = current_dir / "../../../../specification/v0_8/json" - server_to_client_content = (spec_root / "server_to_client.json").read_text() - server_to_client_json = json.loads(server_to_client_content) - - standard_catalog_content = ( - spec_root / "standard_catalog_definition.json" - ).read_text() - standard_catalog_json = json.loads(standard_catalog_content) - - server_to_client_json["properties"]["surfaceUpdate"]["properties"]["components"]["items"]["properties"]["component"]["properties"] = standard_catalog_json + server_to_client_content = (spec_root / "server_to_client.json").read_text() + server_to_client_json = json.loads(server_to_client_content) + + standard_catalog_content = ( + spec_root / "standard_catalog_definition.json" + ).read_text() + standard_catalog_json = json.loads(standard_catalog_content) + + server_to_client_json["properties"]["surfaceUpdate"]["properties"]["components"][ + "items" + ]["properties"]["component"]["properties"] = standard_catalog_json + + return wrap_as_json_array(server_to_client_json) - return wrap_as_json_array(server_to_client_json) def load_a2ui_client_to_server_schema() -> dict[str, Any]: - current_dir = pathlib.Path(__file__).resolve().parent - spec_root = current_dir / "../../../../specification/v0_8/json" + current_dir = pathlib.Path(__file__).resolve().parent + spec_root = current_dir / "../../../../specification/v0_8/json" + + client_to_server_content = (spec_root / "client_to_server.json").read_text() + client_to_server_json = json.loads(client_to_server_content) + + return client_to_server_json - client_to_server_content = (spec_root / "client_to_server.json").read_text() - client_to_server_json = json.loads(client_to_server_content) - - return client_to_server_json @click.command() @click.option("--port", default=8000, help="Port to listen on for SSE") @@ -59,110 +63,107 @@ def load_a2ui_client_to_server_schema() -> dict[str, Any]: help="Transport type", ) def main(port: int, transport: str) -> int: - a2ui_schema = load_a2ui_schema() - print(f"Loaded A2UI schema: {a2ui_schema}") - - recipe_a2ui_json = json.loads((pathlib.Path(__file__).resolve().parent / "recipe_a2ui.json").read_text()) - jsonschema.validate( - instance=recipe_a2ui_json, schema=a2ui_schema - ) - print(f"Loaded Recipe A2UI JSON: {recipe_a2ui_json}") - - a2ui_client_to_server_schema = load_a2ui_client_to_server_schema() - print(f"Loaded A2UI client to server schema: {a2ui_client_to_server_schema}") - - app = Server("a2ui-over-mcp-demo") - - @app.call_tool() - async def handle_call_tool(name: str, arguments: dict[str, Any]) -> dict[str, Any]: - if name=="get_recipe_a2ui": - return {"events": recipe_a2ui_json} - - if name=="send_a2ui_user_action": - return {"response": f"Received A2UI user action", "args": arguments} - - if name=="send_a2ui_error": - return {"response": f"Received A2UI error", "args": arguments} - - raise ValueError(f"Unknown tool: {name}") - - @app.list_tools() - async def list_tools() -> list[types.Tool]: - return [ - types.Tool( - name="get_recipe_a2ui", - title="Get Recipe A2UI", - description="Returns the A2UI JSON to show a recipe", - inputSchema={ - "type": "object", - "additionalProperties": False - }, - # MCP throws an error for "type":"array" so wrapping in an object - # TODO fix this in MCP SDK - outputSchema={ - "type": "object", - "properties": {"events": a2ui_schema}, - "required": ["events"], - "additionalProperties": False - } - ), - types.Tool( - name="send_a2ui_user_action", - title="Send A2UI User Action", - description="Sends an A2UI user action", - inputSchema=a2ui_client_to_server_schema["properties"]["userAction"], - ), - types.Tool( - name="send_a2ui_error", - title="Send A2UI Error", - description="Sends an A2UI error", - inputSchema=a2ui_client_to_server_schema["properties"]["error"], + a2ui_schema = load_a2ui_schema() + print(f"Loaded A2UI schema: {a2ui_schema}") + + recipe_a2ui_json = json.loads( + (pathlib.Path(__file__).resolve().parent / "recipe_a2ui.json").read_text() + ) + jsonschema.validate(instance=recipe_a2ui_json, schema=a2ui_schema) + print(f"Loaded Recipe A2UI JSON: {recipe_a2ui_json}") + + a2ui_client_to_server_schema = load_a2ui_client_to_server_schema() + print(f"Loaded A2UI client to server schema: {a2ui_client_to_server_schema}") + + app = Server("a2ui-over-mcp-demo") + + @app.call_tool() + async def handle_call_tool(name: str, arguments: dict[str, Any]) -> dict[str, Any]: + if name == "get_recipe_a2ui": + return {"events": recipe_a2ui_json} + + if name == "send_a2ui_user_action": + return {"response": f"Received A2UI user action", "args": arguments} + + if name == "send_a2ui_error": + return {"response": f"Received A2UI error", "args": arguments} + + raise ValueError(f"Unknown tool: {name}") + + @app.list_tools() + async def list_tools() -> list[types.Tool]: + return [ + types.Tool( + name="get_recipe_a2ui", + title="Get Recipe A2UI", + description="Returns the A2UI JSON to show a recipe", + inputSchema={"type": "object", "additionalProperties": False}, + # MCP throws an error for "type":"array" so wrapping in an object + # TODO fix this in MCP SDK + outputSchema={ + "type": "object", + "properties": {"events": a2ui_schema}, + "required": ["events"], + "additionalProperties": False, + }, + ), + types.Tool( + name="send_a2ui_user_action", + title="Send A2UI User Action", + description="Sends an A2UI user action", + inputSchema=a2ui_client_to_server_schema["properties"]["userAction"], + ), + types.Tool( + name="send_a2ui_error", + title="Send A2UI Error", + description="Sends an A2UI error", + inputSchema=a2ui_client_to_server_schema["properties"]["error"], + ), + ] + + if transport == "sse": + from mcp.server.sse import SseServerTransport + from starlette.applications import Starlette + from starlette.responses import Response + from starlette.routing import Mount, Route + from starlette.middleware import Middleware + from starlette.middleware.cors import CORSMiddleware + + sse = SseServerTransport("/messages/") + + async def handle_sse(request: Request): + async with sse.connect_sse(request.scope, request.receive, request._send) as streams: # type: ignore[reportPrivateUsage] + await app.run(streams[0], streams[1], app.create_initialization_options()) + return Response() + + starlette_app = Starlette( + debug=True, + routes=[ + Route("/sse", endpoint=handle_sse, methods=["GET"]), + Mount("/messages/", app=sse.handle_post_message), + ], + middleware=[ + Middleware( + CORSMiddleware, + allow_origins=["*"], + allow_methods=["*"], + allow_headers=["*"], ) - ] - - if transport == "sse": - from mcp.server.sse import SseServerTransport - from starlette.applications import Starlette - from starlette.responses import Response - from starlette.routing import Mount, Route - from starlette.middleware import Middleware - from starlette.middleware.cors import CORSMiddleware - - sse = SseServerTransport("/messages/") - - async def handle_sse(request: Request): - async with sse.connect_sse(request.scope, request.receive, request._send) as streams: # type: ignore[reportPrivateUsage] - await app.run(streams[0], streams[1], app.create_initialization_options()) - return Response() - - starlette_app = Starlette( - debug=True, - routes=[ - Route("/sse", endpoint=handle_sse, methods=["GET"]), - Mount("/messages/", app=sse.handle_post_message), - ], - middleware=[ - Middleware( - CORSMiddleware, - allow_origins=["*"], - allow_methods=["*"], - allow_headers=["*"], - ) - ], - ) - - import uvicorn - - print(f"Server running at 127.0.0.1:{port} using sse") - uvicorn.run(starlette_app, host="127.0.0.1", port=port) - else: - from mcp.server.stdio import stdio_server - - async def arun(): - async with stdio_server() as streams: - await app.run(streams[0], streams[1], app.create_initialization_options()) - - click.echo("Server running using stdio", err=True) - anyio.run(arun) - - return 0 \ No newline at end of file + ], + ) + + import uvicorn + + print(f"Server running at 127.0.0.1:{port} using sse") + uvicorn.run(starlette_app, host="127.0.0.1", port=port) + else: + from mcp.server.stdio import stdio_server + + async def arun(): + async with stdio_server() as streams: + await app.run(streams[0], streams[1], app.create_initialization_options()) + + click.echo("Server running using stdio", err=True) + anyio.run(arun) + + return 0 diff --git a/samples/agent/adk/orchestrator/__main__.py b/samples/agent/adk/orchestrator/__main__.py index 92514d4fb..f7fa23cb9 100644 --- a/samples/agent/adk/orchestrator/__main__.py +++ b/samples/agent/adk/orchestrator/__main__.py @@ -32,7 +32,7 @@ class MissingAPIKeyError(Exception): - """Exception for missing API key.""" + """Exception for missing API key.""" @click.command() @@ -40,48 +40,51 @@ class MissingAPIKeyError(Exception): @click.option("--port", default=10002, type=int) @click.option("--subagent_urls", multiple=True, type=str, required=True) def main(host, port, subagent_urls): - try: - # Check for API key only if Vertex AI is not configured - if not os.getenv("GOOGLE_GENAI_USE_VERTEXAI") == "TRUE": - if not os.getenv("GEMINI_API_KEY"): - raise MissingAPIKeyError( - "GEMINI_API_KEY environment variable not set and GOOGLE_GENAI_USE_VERTEXAI is not TRUE." - ) + try: + # Check for API key only if Vertex AI is not configured + if not os.getenv("GOOGLE_GENAI_USE_VERTEXAI") == "TRUE": + if not os.getenv("GEMINI_API_KEY"): + raise MissingAPIKeyError( + "GEMINI_API_KEY environment variable not set and GOOGLE_GENAI_USE_VERTEXAI" + " is not TRUE." + ) - base_url = f"http://{host}:{port}" - - orchestrator_agent, agent_card = asyncio.run(OrchestratorAgent.build_agent(base_url=base_url, subagent_urls=subagent_urls)) - agent_executor = OrchestratorAgentExecutor(agent=orchestrator_agent) + base_url = f"http://{host}:{port}" - request_handler = DefaultRequestHandler( - agent_executor=agent_executor, - task_store=InMemoryTaskStore(), - ) - server = A2AStarletteApplication( - agent_card=agent_card, http_handler=request_handler - ) - import uvicorn + orchestrator_agent, agent_card = asyncio.run( + OrchestratorAgent.build_agent(base_url=base_url, subagent_urls=subagent_urls) + ) + agent_executor = OrchestratorAgentExecutor(agent=orchestrator_agent) - app = server.build() + request_handler = DefaultRequestHandler( + agent_executor=agent_executor, + task_store=InMemoryTaskStore(), + ) + server = A2AStarletteApplication( + agent_card=agent_card, http_handler=request_handler + ) + import uvicorn - app.add_middleware( - CORSMiddleware, - allow_origins=["http://localhost:5173"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], - ) + app = server.build() - uvicorn.run(app, host=host, port=port) - except MissingAPIKeyError as e: - logger.error(f"Error: {e} {traceback.format_exc()}") - exit(1) - except Exception as e: - logger.error( - f"An error occurred during server startup: {e} {traceback.format_exc()}" - ) - exit(1) + app.add_middleware( + CORSMiddleware, + allow_origins=["http://localhost:5173"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + + uvicorn.run(app, host=host, port=port) + except MissingAPIKeyError as e: + logger.error(f"Error: {e} {traceback.format_exc()}") + exit(1) + except Exception as e: + logger.error( + f"An error occurred during server startup: {e} {traceback.format_exc()}" + ) + exit(1) if __name__ == "__main__": - main() + main() diff --git a/samples/agent/adk/orchestrator/agent.py b/samples/agent/adk/orchestrator/agent.py index 7298dd627..23fe938ab 100644 --- a/samples/agent/adk/orchestrator/agent.py +++ b/samples/agent/adk/orchestrator/agent.py @@ -25,7 +25,7 @@ import httpx import re import part_converters -from google.adk.agents.callback_context import CallbackContext +from google.adk.agents.callback_context import CallbackContext from google.adk.models.llm_request import LlmRequest from google.adk.models.llm_response import LlmResponse from subagent_route_manager import SubagentRouteManager @@ -41,177 +41,225 @@ logger = logging.getLogger(__name__) + class A2UIMetadataInterceptor(ClientCallInterceptor): - @override - async def intercept( - self, - method_name: str, - request_payload: dict[str, Any], - http_kwargs: dict[str, Any], - agent_card: AgentCard | None, - context: ClientCallContext | None, - ) -> tuple[dict[str, Any], dict[str, Any]]: - """Enables the A2UI extension header and adds A2UI client capabilities to remote agent message metadata.""" - logger.info("Intercepting client call to method: " + method_name + " and payload " + json.dumps(request_payload)) - - if context and context.state and context.state.get("use_ui"): - # Add A2UI extension header - http_kwargs["headers"] = {HTTP_EXTENSION_HEADER: A2UI_EXTENSION_URI} - - # Add A2UI client capabilities (supported catalogs, etc) to message metadata - if (params := request_payload.get("params")) and (message := params.get("message")): - client_capabilities = context.state.get("client_capabilities") - if "metadata" not in message: - message["metadata"] = {} - message["metadata"][A2UI_CLIENT_CAPABILITIES_KEY] = client_capabilities - logger.info(f"Added client capabilities to remote agent message metadata: {client_capabilities}") - - return request_payload, http_kwargs + + @override + async def intercept( + self, + method_name: str, + request_payload: dict[str, Any], + http_kwargs: dict[str, Any], + agent_card: AgentCard | None, + context: ClientCallContext | None, + ) -> tuple[dict[str, Any], dict[str, Any]]: + """Enables the A2UI extension header and adds A2UI client capabilities to remote agent message metadata.""" + logger.info( + "Intercepting client call to method: " + + method_name + + " and payload " + + json.dumps(request_payload) + ) + + if context and context.state and context.state.get("use_ui"): + # Add A2UI extension header + http_kwargs["headers"] = {HTTP_EXTENSION_HEADER: A2UI_EXTENSION_URI} + + # Add A2UI client capabilities (supported catalogs, etc) to message metadata + if (params := request_payload.get("params")) and ( + message := params.get("message") + ): + client_capabilities = context.state.get("client_capabilities") + if "metadata" not in message: + message["metadata"] = {} + message["metadata"][A2UI_CLIENT_CAPABILITIES_KEY] = client_capabilities + logger.info( + "Added client capabilities to remote agent message metadata:" + f" {client_capabilities}" + ) + + return request_payload, http_kwargs + class A2AClientFactoryWithA2UIMetadata(A2AClientFactory): - @override - def create( - self, - card: AgentCard, - consumers: list[Consumer] | None = None, - interceptors: list[ClientCallInterceptor] | None = None, - ) -> Client: - # Add A2UI metadata interceptor - return super().create(card, consumers, (interceptors or []) + [A2UIMetadataInterceptor()]) + + @override + def create( + self, + card: AgentCard, + consumers: list[Consumer] | None = None, + interceptors: list[ClientCallInterceptor] | None = None, + ) -> Client: + # Add A2UI metadata interceptor + return super().create( + card, consumers, (interceptors or []) + [A2UIMetadataInterceptor()] + ) + class OrchestratorAgent: - """An agent that runs an ecommerce dashboard""" - - SUPPORTED_CONTENT_TYPES = ["text", "text/plain"] - - @classmethod - async def programmtically_route_user_action_to_subagent( - cls, - callback_context: CallbackContext, - llm_request: LlmRequest, - ) -> LlmResponse: - if ( - llm_request.contents - and (last_content := llm_request.contents[-1]).parts - and (a2a_part := part_converters.convert_genai_part_to_a2a_part(last_content.parts[-1])) - and is_a2ui_part(a2a_part) - and (user_action := a2a_part.root.data.get("userAction")) - and (surface_id := user_action.get("surfaceId")) - and (target_agent := await SubagentRouteManager.get_route_to_subagent_name(surface_id, callback_context.state)) - ): - logger.info(f"Programmatically routing userAction for surfaceId '{surface_id}' to subagent '{target_agent}'") - return LlmResponse( - content=genai_types.Content( - parts=[ - genai_types.Part( - function_call=genai_types.FunctionCall( - name="transfer_to_agent", - args={"agent_name": target_agent}, - ) - ) - ] - ) + """An agent that runs an ecommerce dashboard""" + + SUPPORTED_CONTENT_TYPES = ["text", "text/plain"] + + @classmethod + async def programmtically_route_user_action_to_subagent( + cls, + callback_context: CallbackContext, + llm_request: LlmRequest, + ) -> LlmResponse: + if ( + llm_request.contents + and (last_content := llm_request.contents[-1]).parts + and ( + a2a_part := part_converters.convert_genai_part_to_a2a_part( + last_content.parts[-1] ) - - return None - - @classmethod - async def build_agent(cls, base_url: str, subagent_urls: List[str]) -> (LlmAgent, AgentCard): - """Builds the LLM agent for the orchestrator_agent agent.""" - - subagents = [] - supported_catalog_ids = set() - skills = [] - accepts_inline_catalogs = False - for subagent_url in subagent_urls: - async with httpx.AsyncClient() as httpx_client: - resolver = A2ACardResolver( - httpx_client=httpx_client, - base_url=subagent_url, - ) - - subagent_card = await resolver.get_agent_card() - for extension in subagent_card.capabilities.extensions or []: - if extension.uri == A2UI_EXTENSION_URI and extension.params: - supported_catalog_ids.update(extension.params.get(AGENT_EXTENSION_SUPPORTED_CATALOG_IDS_KEY) or []) - accepts_inline_catalogs |= bool(extension.params.get(AGENT_EXTENSION_ACCEPTS_INLINE_CATALOGS_KEY)) - - skills.extend(subagent_card.skills) - - logger.info('Successfully fetched public agent card:' + subagent_card.model_dump_json(indent=2, exclude_none=True)) - - # clean name for adk - clean_name = re.sub(r'[^0-9a-zA-Z_]+', '_', subagent_card.name) - if clean_name == "": - clean_name = "_" - if clean_name[0].isdigit(): - clean_name = f"_{clean_name}" - - # make remote agent - description = json.dumps({ - "id": clean_name, - "name": subagent_card.name, - "description": subagent_card.description, - "skills": [ - { - "name": skill.name, - "description": skill.description, - "examples": skill.examples, - "tags": skill.tags - } for skill in subagent_card.skills - ] - }, indent=2) - remote_a2a_agent = RemoteA2aAgent( - clean_name, - subagent_card, - description=description, # This will be appended to system instructions - a2a_part_converter=part_converters.convert_a2a_part_to_genai_part, - genai_part_converter=part_converters.convert_genai_part_to_a2a_part, - a2a_client_factory=A2AClientFactoryWithA2UIMetadata( - config=A2AClientConfig( - httpx_client=httpx.AsyncClient( - timeout=httpx.Timeout(timeout=DEFAULT_TIMEOUT), - ), - streaming=False, - polling=False, - supported_transports=[A2ATransport.jsonrpc], - ) - ) - ) - subagents.append(remote_a2a_agent) - - logger.info(f'Created remote agent with description: {description}') - - LITELLM_MODEL = os.getenv("LITELLM_MODEL", "gemini/gemini-2.5-flash") - agent = LlmAgent( - model=LiteLlm(model=LITELLM_MODEL), - name="orchestrator_agent", - description="An agent that orchestrates requests to multiple other agents", - instruction="You are an orchestrator agent. Your sole responsibility is to analyze the incoming user request, determine the user's intent, and route the task to exactly one of your expert subagents", - tools=[], - planner=BuiltInPlanner( - thinking_config=genai_types.ThinkingConfig( - include_thoughts=True, + ) + and is_a2ui_part(a2a_part) + and (user_action := a2a_part.root.data.get("userAction")) + and (surface_id := user_action.get("surfaceId")) + and ( + target_agent := await SubagentRouteManager.get_route_to_subagent_name( + surface_id, callback_context.state + ) + ) + ): + logger.info( + f"Programmatically routing userAction for surfaceId '{surface_id}' to" + f" subagent '{target_agent}'" + ) + return LlmResponse( + content=genai_types.Content( + parts=[ + genai_types.Part( + function_call=genai_types.FunctionCall( + name="transfer_to_agent", + args={"agent_name": target_agent}, + ) + ) + ] + ) + ) + + return None + + @classmethod + async def build_agent( + cls, base_url: str, subagent_urls: List[str] + ) -> (LlmAgent, AgentCard): + """Builds the LLM agent for the orchestrator_agent agent.""" + + subagents = [] + supported_catalog_ids = set() + skills = [] + accepts_inline_catalogs = False + for subagent_url in subagent_urls: + async with httpx.AsyncClient() as httpx_client: + resolver = A2ACardResolver( + httpx_client=httpx_client, + base_url=subagent_url, + ) + + subagent_card = await resolver.get_agent_card() + for extension in subagent_card.capabilities.extensions or []: + if extension.uri == A2UI_EXTENSION_URI and extension.params: + supported_catalog_ids.update( + extension.params.get(AGENT_EXTENSION_SUPPORTED_CATALOG_IDS_KEY) or [] + ) + accepts_inline_catalogs |= bool( + extension.params.get(AGENT_EXTENSION_ACCEPTS_INLINE_CATALOGS_KEY) + ) + + skills.extend(subagent_card.skills) + + logger.info( + "Successfully fetched public agent card:" + + subagent_card.model_dump_json(indent=2, exclude_none=True) + ) + + # clean name for adk + clean_name = re.sub(r"[^0-9a-zA-Z_]+", "_", subagent_card.name) + if clean_name == "": + clean_name = "_" + if clean_name[0].isdigit(): + clean_name = f"_{clean_name}" + + # make remote agent + description = json.dumps( + { + "id": clean_name, + "name": subagent_card.name, + "description": subagent_card.description, + "skills": [ + { + "name": skill.name, + "description": skill.description, + "examples": skill.examples, + "tags": skill.tags, + } + for skill in subagent_card.skills + ], + }, + indent=2, + ) + remote_a2a_agent = RemoteA2aAgent( + clean_name, + subagent_card, + description=description, # This will be appended to system instructions + a2a_part_converter=part_converters.convert_a2a_part_to_genai_part, + genai_part_converter=part_converters.convert_genai_part_to_a2a_part, + a2a_client_factory=A2AClientFactoryWithA2UIMetadata( + config=A2AClientConfig( + httpx_client=httpx.AsyncClient( + timeout=httpx.Timeout(timeout=DEFAULT_TIMEOUT), + ), + streaming=False, + polling=False, + supported_transports=[A2ATransport.jsonrpc], ) ), - sub_agents=subagents, - before_model_callback=cls.programmtically_route_user_action_to_subagent, ) + subagents.append(remote_a2a_agent) - agent_card = AgentCard( - name="Orchestrator Agent", - description="This agent orchestrates requests to multiple subagents.", - url=base_url, - version="1.0.0", - default_input_modes=OrchestratorAgent.SUPPORTED_CONTENT_TYPES, - default_output_modes=OrchestratorAgent.SUPPORTED_CONTENT_TYPES, - capabilities=AgentCapabilities( - streaming=True, - extensions=[get_a2ui_agent_extension( + logger.info(f"Created remote agent with description: {description}") + + LITELLM_MODEL = os.getenv("LITELLM_MODEL", "gemini/gemini-2.5-flash") + agent = LlmAgent( + model=LiteLlm(model=LITELLM_MODEL), + name="orchestrator_agent", + description="An agent that orchestrates requests to multiple other agents", + instruction=( + "You are an orchestrator agent. Your sole responsibility is to analyze the" + " incoming user request, determine the user's intent, and route the task to" + " exactly one of your expert subagents" + ), + tools=[], + planner=BuiltInPlanner( + thinking_config=genai_types.ThinkingConfig( + include_thoughts=True, + ) + ), + sub_agents=subagents, + before_model_callback=cls.programmtically_route_user_action_to_subagent, + ) + + agent_card = AgentCard( + name="Orchestrator Agent", + description="This agent orchestrates requests to multiple subagents.", + url=base_url, + version="1.0.0", + default_input_modes=OrchestratorAgent.SUPPORTED_CONTENT_TYPES, + default_output_modes=OrchestratorAgent.SUPPORTED_CONTENT_TYPES, + capabilities=AgentCapabilities( + streaming=True, + extensions=[ + get_a2ui_agent_extension( accepts_inline_catalogs=accepts_inline_catalogs, - supported_catalog_ids=list(supported_catalog_ids))], - ), - skills=skills, - ) + supported_catalog_ids=list(supported_catalog_ids), + ) + ], + ), + skills=skills, + ) - return agent, agent_card \ No newline at end of file + return agent, agent_card diff --git a/samples/agent/adk/orchestrator/agent_executor.py b/samples/agent/adk/orchestrator/agent_executor.py index 0d1dab4cd..64d8dc509 100644 --- a/samples/agent/adk/orchestrator/agent_executor.py +++ b/samples/agent/adk/orchestrator/agent_executor.py @@ -46,100 +46,113 @@ class OrchestratorAgentExecutor(A2aAgentExecutor): - """Contact AgentExecutor Example.""" - - def __init__(self, agent: LlmAgent): - config = A2aAgentExecutorConfig( - gen_ai_part_converter=part_converters.convert_genai_part_to_a2a_part, - a2a_part_converter=part_converters.convert_a2a_part_to_genai_part, - event_converter=self.convert_event_to_a2a_events_and_save_surface_id_to_subagent_name, - ) - - runner = Runner( - app_name=agent.name, - agent=agent, - artifact_service=InMemoryArtifactService(), - session_service=InMemorySessionService(), - memory_service=InMemoryMemoryService(), - ) - - super().__init__(runner=runner, config=config) - - @classmethod - def convert_event_to_a2a_events_and_save_surface_id_to_subagent_name( - cls, - event: Event, - invocation_context: InvocationContext, - task_id: Optional[str] = None, - context_id: Optional[str] = None, - part_converter: part_converter.GenAIPartToA2APartConverter = part_converter.convert_genai_part_to_a2a_part, - ) -> List[A2AEvent]: - a2a_events = event_converter.convert_event_to_a2a_events( - event, - invocation_context, - task_id, - context_id, - part_converter, - ) - - for a2a_event in a2a_events: - # Try to populate subagent agent card if available. - subagent_card = None - if (active_subagent_name := event.author): - # We need to find the subagent by name - if (subagent := next((sub for sub in invocation_context.agent.sub_agents if sub.name == active_subagent_name), None)): - try: - subagent_card = json.loads(subagent.description) - except Exception: - logger.warning(f"Failed to parse agent description for {active_subagent_name}") - if subagent_card: - if a2a_event.metadata is None: - a2a_event.metadata = {} - a2a_event.metadata["a2a_subagent"] = subagent_card - - for a2a_part in a2a_event.status.message.parts: - if ( - is_a2ui_part(a2a_part) - and (begin_rendering := a2a_part.root.data.get("beginRendering")) - and (surface_id := begin_rendering.get("surfaceId")) - ): - asyncio.run_coroutine_threadsafe( - SubagentRouteManager.set_route_to_subagent_name( - surface_id, - event.author, - invocation_context.session_service, - invocation_context.session, - ), - asyncio.get_event_loop(), - ) - - return a2a_events - - @override - async def _prepare_session( - self, - context: RequestContext, - run_request: AgentRunRequest, - runner: Runner, - ): - session = await super()._prepare_session(context, run_request, runner) - - if try_activate_a2ui_extension(context): - client_capabilities = context.message.metadata.get(A2UI_CLIENT_CAPABILITIES_KEY) if context.message and context.message.metadata else None - - await runner.session_service.append_event( - session, - Event( - invocation_id=new_invocation_context_id(), - author="system", - actions=EventActions( - state_delta={ - # These values are used to configure A2UI messages to remote agent calls - "use_ui": True, - "client_capabilities": client_capabilities - } - ), - ), - ) - - return session \ No newline at end of file + """Contact AgentExecutor Example.""" + + def __init__(self, agent: LlmAgent): + config = A2aAgentExecutorConfig( + gen_ai_part_converter=part_converters.convert_genai_part_to_a2a_part, + a2a_part_converter=part_converters.convert_a2a_part_to_genai_part, + event_converter=self.convert_event_to_a2a_events_and_save_surface_id_to_subagent_name, + ) + + runner = Runner( + app_name=agent.name, + agent=agent, + artifact_service=InMemoryArtifactService(), + session_service=InMemorySessionService(), + memory_service=InMemoryMemoryService(), + ) + + super().__init__(runner=runner, config=config) + + @classmethod + def convert_event_to_a2a_events_and_save_surface_id_to_subagent_name( + cls, + event: Event, + invocation_context: InvocationContext, + task_id: Optional[str] = None, + context_id: Optional[str] = None, + part_converter: part_converter.GenAIPartToA2APartConverter = part_converter.convert_genai_part_to_a2a_part, + ) -> List[A2AEvent]: + a2a_events = event_converter.convert_event_to_a2a_events( + event, + invocation_context, + task_id, + context_id, + part_converter, + ) + + for a2a_event in a2a_events: + # Try to populate subagent agent card if available. + subagent_card = None + if active_subagent_name := event.author: + # We need to find the subagent by name + if subagent := next( + ( + sub + for sub in invocation_context.agent.sub_agents + if sub.name == active_subagent_name + ), + None, + ): + try: + subagent_card = json.loads(subagent.description) + except Exception: + logger.warning( + f"Failed to parse agent description for {active_subagent_name}" + ) + if subagent_card: + if a2a_event.metadata is None: + a2a_event.metadata = {} + a2a_event.metadata["a2a_subagent"] = subagent_card + + for a2a_part in a2a_event.status.message.parts: + if ( + is_a2ui_part(a2a_part) + and (begin_rendering := a2a_part.root.data.get("beginRendering")) + and (surface_id := begin_rendering.get("surfaceId")) + ): + asyncio.run_coroutine_threadsafe( + SubagentRouteManager.set_route_to_subagent_name( + surface_id, + event.author, + invocation_context.session_service, + invocation_context.session, + ), + asyncio.get_event_loop(), + ) + + return a2a_events + + @override + async def _prepare_session( + self, + context: RequestContext, + run_request: AgentRunRequest, + runner: Runner, + ): + session = await super()._prepare_session(context, run_request, runner) + + if try_activate_a2ui_extension(context): + client_capabilities = ( + context.message.metadata.get(A2UI_CLIENT_CAPABILITIES_KEY) + if context.message and context.message.metadata + else None + ) + + await runner.session_service.append_event( + session, + Event( + invocation_id=new_invocation_context_id(), + author="system", + actions=EventActions( + state_delta={ + # These values are used to configure A2UI messages to remote agent calls + "use_ui": True, + "client_capabilities": client_capabilities, + } + ), + ), + ) + + return session diff --git a/samples/agent/adk/orchestrator/part_converters.py b/samples/agent/adk/orchestrator/part_converters.py index d9c3b656d..ad15c3648 100644 --- a/samples/agent/adk/orchestrator/part_converters.py +++ b/samples/agent/adk/orchestrator/part_converters.py @@ -25,27 +25,39 @@ logger = logging.getLogger(__name__) + def convert_a2a_part_to_genai_part( a2a_part: a2a_types.Part, -) -> Optional[genai_types.Part]: - if is_a2ui_part(a2a_part): - genai_part = genai_types.Part(text=a2a_part.model_dump_json()) - logger.info(f'Converted A2UI part from A2A: {a2a_part.model_dump_json(exclude_none=True)} to GenAI: {genai_part.model_dump_json(exclude_none=True)}'[:200] + "...") - return genai_part - - return part_converter.convert_a2a_part_to_genai_part(a2a_part) - -def convert_genai_part_to_a2a_part( +) -> Optional[genai_types.Part]: + if is_a2ui_part(a2a_part): + genai_part = genai_types.Part(text=a2a_part.model_dump_json()) + logger.info( + f"Converted A2UI part from A2A: {a2a_part.model_dump_json(exclude_none=True)} to GenAI: {genai_part.model_dump_json(exclude_none=True)}"[ + :200 + ] + + "..." + ) + return genai_part + + return part_converter.convert_a2a_part_to_genai_part(a2a_part) + + +def convert_genai_part_to_a2a_part( part: genai_types.Part, ) -> Optional[a2a_types.Part]: - if part.text: - try: - a2a_part = a2a_types.Part.model_validate_json(part.text) - if is_a2ui_part(a2a_part): - logger.info(f'Converted A2UI part from GenAI: {part.model_dump_json(exclude_none=True)} to A2A: {a2a_part.model_dump_json(exclude_none=True)}'[:200] + "...") - return a2a_part - except pydantic.ValidationError: - # Expected for normal text input - pass - - return part_converter.convert_genai_part_to_a2a_part(part) \ No newline at end of file + if part.text: + try: + a2a_part = a2a_types.Part.model_validate_json(part.text) + if is_a2ui_part(a2a_part): + logger.info( + f"Converted A2UI part from GenAI: {part.model_dump_json(exclude_none=True)} to A2A: {a2a_part.model_dump_json(exclude_none=True)}"[ + :200 + ] + + "..." + ) + return a2a_part + except pydantic.ValidationError: + # Expected for normal text input + pass + + return part_converter.convert_genai_part_to_a2a_part(part) diff --git a/samples/agent/adk/orchestrator/subagent_route_manager.py b/samples/agent/adk/orchestrator/subagent_route_manager.py index 2bd025cdd..bd6dae9c7 100644 --- a/samples/agent/adk/orchestrator/subagent_route_manager.py +++ b/samples/agent/adk/orchestrator/subagent_route_manager.py @@ -37,7 +37,11 @@ async def get_route_to_subagent_name( ) -> Optional[str]: """Gets the subagent route for the given tool call id.""" subagent_name = state.get(cls._get_routing_key(surface_id), None) - logging.info("Got subagent route for surface_id %s to subagent_name %s", surface_id, subagent_name) + logging.info( + "Got subagent route for surface_id %s to subagent_name %s", + surface_id, + subagent_name, + ) return subagent_name @classmethod @@ -49,7 +53,7 @@ async def set_route_to_subagent_name( session: Session, ): """Sets the subagent route for the given tool call id.""" - key = cls._get_routing_key(surface_id) + key = cls._get_routing_key(surface_id) if session.state.get(key) != subagent_name: await session_service.append_event( @@ -61,4 +65,8 @@ async def set_route_to_subagent_name( ), ) - logging.info("Set subagent route for surface_id %s to subagent_name %s", surface_id, subagent_name) \ No newline at end of file + logging.info( + "Set subagent route for surface_id %s to subagent_name %s", + surface_id, + subagent_name, + ) diff --git a/samples/agent/adk/pyproject.toml b/samples/agent/adk/pyproject.toml index 0f0914e10..8df3e4626 100644 --- a/samples/agent/adk/pyproject.toml +++ b/samples/agent/adk/pyproject.toml @@ -8,3 +8,22 @@ members = ["contact_lookup", "contact_multiple_surfaces", "orchestrator", "resta [tool.uv.sources] a2ui-agent = { path = "../../../a2a_agents/python/a2ui_agent", editable = true } +[tool.pyink] +unstable = true +target-version = [] +pyink-indentation = 2 +pyink-use-majority-quotes = true +pyink-annotation-pragmas = [ + "noqa", + "pylint:", + "type: ignore", + "pytype:", + "mypy:", + "pyright:", + "pyre-", +] + +[dependency-groups] +dev = [ + "pyink>=24.10.0", +] diff --git a/samples/agent/adk/restaurant_finder/__main__.py b/samples/agent/adk/restaurant_finder/__main__.py index b7114fd78..ee4e9115b 100644 --- a/samples/agent/adk/restaurant_finder/__main__.py +++ b/samples/agent/adk/restaurant_finder/__main__.py @@ -34,77 +34,80 @@ class MissingAPIKeyError(Exception): - """Exception for missing API key.""" + """Exception for missing API key.""" @click.command() @click.option("--host", default="localhost") @click.option("--port", default=10002) def main(host, port): - try: - # Check for API key only if Vertex AI is not configured - if not os.getenv("GOOGLE_GENAI_USE_VERTEXAI") == "TRUE": - if not os.getenv("GEMINI_API_KEY"): - raise MissingAPIKeyError( - "GEMINI_API_KEY environment variable not set and GOOGLE_GENAI_USE_VERTEXAI is not TRUE." - ) - - capabilities = AgentCapabilities( - streaming=True, - extensions=[get_a2ui_agent_extension()], + try: + # Check for API key only if Vertex AI is not configured + if not os.getenv("GOOGLE_GENAI_USE_VERTEXAI") == "TRUE": + if not os.getenv("GEMINI_API_KEY"): + raise MissingAPIKeyError( + "GEMINI_API_KEY environment variable not set and GOOGLE_GENAI_USE_VERTEXAI" + " is not TRUE." ) - skill = AgentSkill( - id="find_restaurants", - name="Find Restaurants Tool", - description="Helps find restaurants based on user criteria (e.g., cuisine, location).", - tags=["restaurant", "finder"], - examples=["Find me the top 10 chinese restaurants in the US"], - ) - - base_url = f"http://{host}:{port}" - - agent_card = AgentCard( - name="Restaurant Agent", - description="This agent helps find restaurants based on user criteria.", - url=base_url, # <-- Use base_url here - version="1.0.0", - default_input_modes=RestaurantAgent.SUPPORTED_CONTENT_TYPES, - default_output_modes=RestaurantAgent.SUPPORTED_CONTENT_TYPES, - capabilities=capabilities, - skills=[skill], - ) - - agent_executor = RestaurantAgentExecutor(base_url=base_url) - - request_handler = DefaultRequestHandler( - agent_executor=agent_executor, - task_store=InMemoryTaskStore(), - ) - server = A2AStarletteApplication( - agent_card=agent_card, http_handler=request_handler - ) - import uvicorn - - app = server.build() - - app.add_middleware( - CORSMiddleware, - allow_origin_regex=r"http://localhost:\d+", - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], - ) - - app.mount("/static", StaticFiles(directory="images"), name="static") - uvicorn.run(app, host=host, port=port) - except MissingAPIKeyError as e: - logger.error(f"Error: {e}") - exit(1) - except Exception as e: - logger.error(f"An error occurred during server startup: {e}") - exit(1) + capabilities = AgentCapabilities( + streaming=True, + extensions=[get_a2ui_agent_extension()], + ) + skill = AgentSkill( + id="find_restaurants", + name="Find Restaurants Tool", + description=( + "Helps find restaurants based on user criteria (e.g., cuisine, location)." + ), + tags=["restaurant", "finder"], + examples=["Find me the top 10 chinese restaurants in the US"], + ) + + base_url = f"http://{host}:{port}" + + agent_card = AgentCard( + name="Restaurant Agent", + description="This agent helps find restaurants based on user criteria.", + url=base_url, # <-- Use base_url here + version="1.0.0", + default_input_modes=RestaurantAgent.SUPPORTED_CONTENT_TYPES, + default_output_modes=RestaurantAgent.SUPPORTED_CONTENT_TYPES, + capabilities=capabilities, + skills=[skill], + ) + + agent_executor = RestaurantAgentExecutor(base_url=base_url) + + request_handler = DefaultRequestHandler( + agent_executor=agent_executor, + task_store=InMemoryTaskStore(), + ) + server = A2AStarletteApplication( + agent_card=agent_card, http_handler=request_handler + ) + import uvicorn + + app = server.build() + + app.add_middleware( + CORSMiddleware, + allow_origin_regex=r"http://localhost:\d+", + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + + app.mount("/static", StaticFiles(directory="images"), name="static") + + uvicorn.run(app, host=host, port=port) + except MissingAPIKeyError as e: + logger.error(f"Error: {e}") + exit(1) + except Exception as e: + logger.error(f"An error occurred during server startup: {e}") + exit(1) if __name__ == "__main__": - main() + main() diff --git a/samples/agent/adk/restaurant_finder/agent.py b/samples/agent/adk/restaurant_finder/agent.py index 5283cd882..e4f9e3fef 100644 --- a/samples/agent/adk/restaurant_finder/agent.py +++ b/samples/agent/adk/restaurant_finder/agent.py @@ -54,250 +54,246 @@ class RestaurantAgent: - """An agent that finds restaurants based on user criteria.""" - - SUPPORTED_CONTENT_TYPES = ["text", "text/plain"] - - def __init__(self, base_url: str, use_ui: bool = False): - self.base_url = base_url - self.use_ui = use_ui - self._agent = self._build_agent(use_ui) - self._user_id = "remote_agent" - self._runner = Runner( - app_name=self._agent.name, - agent=self._agent, - artifact_service=InMemoryArtifactService(), - session_service=InMemorySessionService(), - memory_service=InMemoryMemoryService(), - ) - - # --- MODIFICATION: Wrap the schema --- - # Load the A2UI_SCHEMA string into a Python object for validation - try: - # First, load the schema for a *single message* - single_message_schema = json.loads(A2UI_SCHEMA) - - # The prompt instructs the LLM to return a *list* of messages. - # Therefore, our validation schema must be an *array* of the single message schema. - self.a2ui_schema_object = {"type": "array", "items": single_message_schema} - logger.info( - "A2UI_SCHEMA successfully loaded and wrapped in an array validator." - ) - except json.JSONDecodeError as e: - logger.error(f"CRITICAL: Failed to parse A2UI_SCHEMA: {e}") - self.a2ui_schema_object = None - # --- END MODIFICATION --- - - def get_processing_message(self) -> str: - return "Finding restaurants that match your criteria..." - - def _build_agent(self, use_ui: bool) -> LlmAgent: - """Builds the LLM agent for the restaurant agent.""" - LITELLM_MODEL = os.getenv("LITELLM_MODEL", "gemini/gemini-2.5-flash") - - if use_ui: - # Construct the full prompt with UI instructions, examples, and schema - instruction = AGENT_INSTRUCTION + get_ui_prompt( - self.base_url, RESTAURANT_UI_EXAMPLES + """An agent that finds restaurants based on user criteria.""" + + SUPPORTED_CONTENT_TYPES = ["text", "text/plain"] + + def __init__(self, base_url: str, use_ui: bool = False): + self.base_url = base_url + self.use_ui = use_ui + self._agent = self._build_agent(use_ui) + self._user_id = "remote_agent" + self._runner = Runner( + app_name=self._agent.name, + agent=self._agent, + artifact_service=InMemoryArtifactService(), + session_service=InMemorySessionService(), + memory_service=InMemoryMemoryService(), + ) + + # --- MODIFICATION: Wrap the schema --- + # Load the A2UI_SCHEMA string into a Python object for validation + try: + # First, load the schema for a *single message* + single_message_schema = json.loads(A2UI_SCHEMA) + + # The prompt instructs the LLM to return a *list* of messages. + # Therefore, our validation schema must be an *array* of the single message schema. + self.a2ui_schema_object = {"type": "array", "items": single_message_schema} + logger.info("A2UI_SCHEMA successfully loaded and wrapped in an array validator.") + except json.JSONDecodeError as e: + logger.error(f"CRITICAL: Failed to parse A2UI_SCHEMA: {e}") + self.a2ui_schema_object = None + # --- END MODIFICATION --- + + def get_processing_message(self) -> str: + return "Finding restaurants that match your criteria..." + + def _build_agent(self, use_ui: bool) -> LlmAgent: + """Builds the LLM agent for the restaurant agent.""" + LITELLM_MODEL = os.getenv("LITELLM_MODEL", "gemini/gemini-2.5-flash") + + if use_ui: + # Construct the full prompt with UI instructions, examples, and schema + instruction = AGENT_INSTRUCTION + get_ui_prompt( + self.base_url, RESTAURANT_UI_EXAMPLES + ) + else: + instruction = get_text_prompt() + + return LlmAgent( + model=LiteLlm(model=LITELLM_MODEL), + name="restaurant_agent", + description="An agent that finds restaurants and helps book tables.", + instruction=instruction, + tools=[get_restaurants], + ) + + async def stream(self, query, session_id) -> AsyncIterable[dict[str, Any]]: + session_state = {"base_url": self.base_url} + + session = await self._runner.session_service.get_session( + app_name=self._agent.name, + user_id=self._user_id, + session_id=session_id, + ) + if session is None: + session = await self._runner.session_service.create_session( + app_name=self._agent.name, + user_id=self._user_id, + state=session_state, + session_id=session_id, + ) + elif "base_url" not in session.state: + session.state["base_url"] = self.base_url + + # --- Begin: UI Validation and Retry Logic --- + max_retries = 1 # Total 2 attempts + attempt = 0 + current_query_text = query + + # Ensure schema was loaded + if self.use_ui and self.a2ui_schema_object is None: + logger.error( + "--- RestaurantAgent.stream: A2UI_SCHEMA is not loaded. " + "Cannot perform UI validation. ---" + ) + yield { + "is_task_complete": True, + "content": ( + "I'm sorry, I'm facing an internal configuration error with my UI" + " components. Please contact support." + ), + } + return + + while attempt <= max_retries: + attempt += 1 + logger.info( + f"--- RestaurantAgent.stream: Attempt {attempt}/{max_retries + 1} " + f"for session {session_id} ---" + ) + + current_message = types.Content( + role="user", parts=[types.Part.from_text(text=current_query_text)] + ) + final_response_content = None + + async for event in self._runner.run_async( + user_id=self._user_id, + session_id=session.id, + new_message=current_message, + ): + logger.info(f"Event from runner: {event}") + if event.is_final_response(): + if event.content and event.content.parts and event.content.parts[0].text: + final_response_content = "\n".join( + [p.text for p in event.content.parts if p.text] ) + break # Got the final response, stop consuming events else: - instruction = get_text_prompt() - - return LlmAgent( - model=LiteLlm(model=LITELLM_MODEL), - name="restaurant_agent", - description="An agent that finds restaurants and helps book tables.", - instruction=instruction, - tools=[get_restaurants], + logger.info(f"Intermediate event: {event}") + # Yield intermediate updates on every attempt + yield { + "is_task_complete": False, + "updates": self.get_processing_message(), + } + + if final_response_content is None: + logger.warning( + "--- RestaurantAgent.stream: Received no final response content from" + f" runner (Attempt {attempt}). ---" ) - - async def stream(self, query, session_id) -> AsyncIterable[dict[str, Any]]: - session_state = {"base_url": self.base_url} - - session = await self._runner.session_service.get_session( - app_name=self._agent.name, - user_id=self._user_id, - session_id=session_id, + if attempt <= max_retries: + current_query_text = ( + "I received no response. Please try again." + f"Please retry the original request: '{query}'" + ) + continue # Go to next retry + else: + # Retries exhausted on no-response + final_response_content = ( + "I'm sorry, I encountered an error and couldn't process your request." + ) + # Fall through to send this as a text-only error + + is_valid = False + error_message = "" + + if self.use_ui: + logger.info( + "--- RestaurantAgent.stream: Validating UI response (Attempt" + f" {attempt})... ---" ) - if session is None: - session = await self._runner.session_service.create_session( - app_name=self._agent.name, - user_id=self._user_id, - state=session_state, - session_id=session_id, - ) - elif "base_url" not in session.state: - session.state["base_url"] = self.base_url - - # --- Begin: UI Validation and Retry Logic --- - max_retries = 1 # Total 2 attempts - attempt = 0 - current_query_text = query - - # Ensure schema was loaded - if self.use_ui and self.a2ui_schema_object is None: - logger.error( - "--- RestaurantAgent.stream: A2UI_SCHEMA is not loaded. " - "Cannot perform UI validation. ---" - ) - yield { - "is_task_complete": True, - "content": ( - "I'm sorry, I'm facing an internal configuration error with my UI components. " - "Please contact support." - ), - } - return - - while attempt <= max_retries: - attempt += 1 - logger.info( - f"--- RestaurantAgent.stream: Attempt {attempt}/{max_retries + 1} " - f"for session {session_id} ---" - ) - - current_message = types.Content( - role="user", parts=[types.Part.from_text(text=current_query_text)] - ) - final_response_content = None - - async for event in self._runner.run_async( - user_id=self._user_id, - session_id=session.id, - new_message=current_message, - ): - logger.info(f"Event from runner: {event}") - if event.is_final_response(): - if ( - event.content - and event.content.parts - and event.content.parts[0].text - ): - final_response_content = "\n".join( - [p.text for p in event.content.parts if p.text] - ) - break # Got the final response, stop consuming events - else: - logger.info(f"Intermediate event: {event}") - # Yield intermediate updates on every attempt - yield { - "is_task_complete": False, - "updates": self.get_processing_message(), - } - - if final_response_content is None: - logger.warning( - f"--- RestaurantAgent.stream: Received no final response content from runner " - f"(Attempt {attempt}). ---" - ) - if attempt <= max_retries: - current_query_text = ( - "I received no response. Please try again." - f"Please retry the original request: '{query}'" - ) - continue # Go to next retry - else: - # Retries exhausted on no-response - final_response_content = "I'm sorry, I encountered an error and couldn't process your request." - # Fall through to send this as a text-only error - - is_valid = False - error_message = "" - - if self.use_ui: - logger.info( - f"--- RestaurantAgent.stream: Validating UI response (Attempt {attempt})... ---" - ) - try: - if "---a2ui_JSON---" not in final_response_content: - raise ValueError("Delimiter '---a2ui_JSON---' not found.") - - text_part, json_string = final_response_content.split( - "---a2ui_JSON---", 1 - ) - - if not json_string.strip(): - raise ValueError("JSON part is empty.") - - json_string_cleaned = ( - json_string.strip().lstrip("```json").rstrip("```").strip() - ) - - if not json_string_cleaned: - raise ValueError("Cleaned JSON string is empty.") - - # --- New Validation Steps --- - # 1. Check if it's parsable JSON - parsed_json_data = json.loads(json_string_cleaned) - - # 2. Check if it validates against the A2UI_SCHEMA - # This will raise jsonschema.exceptions.ValidationError if it fails - logger.info( - "--- RestaurantAgent.stream: Validating against A2UI_SCHEMA... ---" - ) - jsonschema.validate( - instance=parsed_json_data, schema=self.a2ui_schema_object - ) - # --- End New Validation Steps --- - - logger.info( - f"--- RestaurantAgent.stream: UI JSON successfully parsed AND validated against schema. " - f"Validation OK (Attempt {attempt}). ---" - ) - is_valid = True - - except ( - ValueError, - json.JSONDecodeError, - jsonschema.exceptions.ValidationError, - ) as e: - logger.warning( - f"--- RestaurantAgent.stream: A2UI validation failed: {e} (Attempt {attempt}) ---" - ) - logger.warning( - f"--- Failed response content: {final_response_content[:500]}... ---" - ) - error_message = f"Validation failed: {e}." - - else: # Not using UI, so text is always "valid" - is_valid = True - - if is_valid: - logger.info( - f"--- RestaurantAgent.stream: Response is valid. Sending final response (Attempt {attempt}). ---" - ) - logger.info(f"Final response: {final_response_content}") - yield { - "is_task_complete": True, - "content": final_response_content, - } - return # We're done, exit the generator - - # --- If we're here, it means validation failed --- - - if attempt <= max_retries: - logger.warning( - f"--- RestaurantAgent.stream: Retrying... ({attempt}/{max_retries + 1}) ---" - ) - # Prepare the query for the retry - current_query_text = ( - f"Your previous response was invalid. {error_message} " - "You MUST generate a valid response that strictly follows the A2UI JSON SCHEMA. " - "The response MUST be a JSON list of A2UI messages. " - "Ensure the response is split by '---a2ui_JSON---' and the JSON part is well-formed. " - f"Please retry the original request: '{query}'" - ) - # Loop continues... - - # --- If we're here, it means we've exhausted retries --- - logger.error( - "--- RestaurantAgent.stream: Max retries exhausted. Sending text-only error. ---" + try: + if "---a2ui_JSON---" not in final_response_content: + raise ValueError("Delimiter '---a2ui_JSON---' not found.") + + text_part, json_string = final_response_content.split("---a2ui_JSON---", 1) + + if not json_string.strip(): + raise ValueError("JSON part is empty.") + + json_string_cleaned = ( + json_string.strip().lstrip("```json").rstrip("```").strip() + ) + + if not json_string_cleaned: + raise ValueError("Cleaned JSON string is empty.") + + # --- New Validation Steps --- + # 1. Check if it's parsable JSON + parsed_json_data = json.loads(json_string_cleaned) + + # 2. Check if it validates against the A2UI_SCHEMA + # This will raise jsonschema.exceptions.ValidationError if it fails + logger.info( + "--- RestaurantAgent.stream: Validating against A2UI_SCHEMA... ---" + ) + jsonschema.validate(instance=parsed_json_data, schema=self.a2ui_schema_object) + # --- End New Validation Steps --- + + logger.info( + "--- RestaurantAgent.stream: UI JSON successfully parsed AND validated" + f" against schema. Validation OK (Attempt {attempt}). ---" + ) + is_valid = True + + except ( + ValueError, + json.JSONDecodeError, + jsonschema.exceptions.ValidationError, + ) as e: + logger.warning( + f"--- RestaurantAgent.stream: A2UI validation failed: {e} (Attempt" + f" {attempt}) ---" + ) + logger.warning( + f"--- Failed response content: {final_response_content[:500]}... ---" + ) + error_message = f"Validation failed: {e}." + + else: # Not using UI, so text is always "valid" + is_valid = True + + if is_valid: + logger.info( + "--- RestaurantAgent.stream: Response is valid. Sending final response" + f" (Attempt {attempt}). ---" ) + logger.info(f"Final response: {final_response_content}") yield { "is_task_complete": True, - "content": ( - "I'm sorry, I'm having trouble generating the interface for that request right now. " - "Please try again in a moment." - ), + "content": final_response_content, } - # --- End: UI Validation and Retry Logic --- + return # We're done, exit the generator + + # --- If we're here, it means validation failed --- + + if attempt <= max_retries: + logger.warning( + f"--- RestaurantAgent.stream: Retrying... ({attempt}/{max_retries + 1}) ---" + ) + # Prepare the query for the retry + current_query_text = ( + f"Your previous response was invalid. {error_message} You MUST generate a" + " valid response that strictly follows the A2UI JSON SCHEMA. The response" + " MUST be a JSON list of A2UI messages. Ensure the response is split by" + " '---a2ui_JSON---' and the JSON part is well-formed. Please retry the" + f" original request: '{query}'" + ) + # Loop continues... + + # --- If we're here, it means we've exhausted retries --- + logger.error( + "--- RestaurantAgent.stream: Max retries exhausted. Sending text-only" + " error. ---" + ) + yield { + "is_task_complete": True, + "content": ( + "I'm sorry, I'm having trouble generating the interface for that request" + " right now. Please try again in a moment." + ), + } + # --- End: UI Validation and Retry Logic --- diff --git a/samples/agent/adk/restaurant_finder/agent_executor.py b/samples/agent/adk/restaurant_finder/agent_executor.py index a693e45de..9664d6071 100644 --- a/samples/agent/adk/restaurant_finder/agent_executor.py +++ b/samples/agent/adk/restaurant_finder/agent_executor.py @@ -39,159 +39,161 @@ class RestaurantAgentExecutor(AgentExecutor): - """Restaurant AgentExecutor Example.""" - - def __init__(self, base_url: str): - # Instantiate two agents: one for UI and one for text-only. - # The appropriate one will be chosen at execution time. - self.ui_agent = RestaurantAgent(base_url=base_url, use_ui=True) - self.text_agent = RestaurantAgent(base_url=base_url, use_ui=False) - - async def execute( - self, - context: RequestContext, - event_queue: EventQueue, - ) -> None: - query = "" - ui_event_part = None - action = None - - logger.info( - f"--- Client requested extensions: {context.requested_extensions} ---" + """Restaurant AgentExecutor Example.""" + + def __init__(self, base_url: str): + # Instantiate two agents: one for UI and one for text-only. + # The appropriate one will be chosen at execution time. + self.ui_agent = RestaurantAgent(base_url=base_url, use_ui=True) + self.text_agent = RestaurantAgent(base_url=base_url, use_ui=False) + + async def execute( + self, + context: RequestContext, + event_queue: EventQueue, + ) -> None: + query = "" + ui_event_part = None + action = None + + logger.info(f"--- Client requested extensions: {context.requested_extensions} ---") + use_ui = try_activate_a2ui_extension(context) + + # Determine which agent to use based on whether the a2ui extension is active. + if use_ui: + agent = self.ui_agent + logger.info("--- AGENT_EXECUTOR: A2UI extension is active. Using UI agent. ---") + else: + agent = self.text_agent + logger.info( + "--- AGENT_EXECUTOR: A2UI extension is not active. Using text agent. ---" + ) + + if context.message and context.message.parts: + logger.info( + f"--- AGENT_EXECUTOR: Processing {len(context.message.parts)} message" + " parts ---" + ) + for i, part in enumerate(context.message.parts): + if isinstance(part.root, DataPart): + if "userAction" in part.root.data: + logger.info(f" Part {i}: Found a2ui UI ClientEvent payload.") + ui_event_part = part.root.data["userAction"] + else: + logger.info(f" Part {i}: DataPart (data: {part.root.data})") + elif isinstance(part.root, TextPart): + logger.info(f" Part {i}: TextPart (text: {part.root.text})") + else: + logger.info(f" Part {i}: Unknown part type ({type(part.root)})") + + if ui_event_part: + logger.info(f"Received a2ui ClientEvent: {ui_event_part}") + action = ui_event_part.get("actionName") + ctx = ui_event_part.get("context", {}) + + if action == "book_restaurant": + restaurant_name = ctx.get("restaurantName", "Unknown Restaurant") + address = ctx.get("address", "Address not provided") + image_url = ctx.get("imageUrl", "") + query = ( + f"USER_WANTS_TO_BOOK: {restaurant_name}, Address: {address}, ImageURL:" + f" {image_url}" ) - use_ui = try_activate_a2ui_extension(context) - # Determine which agent to use based on whether the a2ui extension is active. - if use_ui: - agent = self.ui_agent - logger.info( - "--- AGENT_EXECUTOR: A2UI extension is active. Using UI agent. ---" - ) - else: - agent = self.text_agent - logger.info( - "--- AGENT_EXECUTOR: A2UI extension is not active. Using text agent. ---" - ) + elif action == "submit_booking": + restaurant_name = ctx.get("restaurantName", "Unknown Restaurant") + party_size = ctx.get("partySize", "Unknown Size") + reservation_time = ctx.get("reservationTime", "Unknown Time") + dietary_reqs = ctx.get("dietary", "None") + image_url = ctx.get("imageUrl", "") + query = ( + f"User submitted a booking for {restaurant_name} for {party_size} people at" + f" {reservation_time} with dietary requirements: {dietary_reqs}. The image" + f" URL is {image_url}" + ) - if context.message and context.message.parts: - logger.info( - f"--- AGENT_EXECUTOR: Processing {len(context.message.parts)} message parts ---" - ) - for i, part in enumerate(context.message.parts): - if isinstance(part.root, DataPart): - if "userAction" in part.root.data: - logger.info(f" Part {i}: Found a2ui UI ClientEvent payload.") - ui_event_part = part.root.data["userAction"] - else: - logger.info(f" Part {i}: DataPart (data: {part.root.data})") - elif isinstance(part.root, TextPart): - logger.info(f" Part {i}: TextPart (text: {part.root.text})") - else: - logger.info(f" Part {i}: Unknown part type ({type(part.root)})") - - if ui_event_part: - logger.info(f"Received a2ui ClientEvent: {ui_event_part}") - action = ui_event_part.get("actionName") - ctx = ui_event_part.get("context", {}) - - if action == "book_restaurant": - restaurant_name = ctx.get("restaurantName", "Unknown Restaurant") - address = ctx.get("address", "Address not provided") - image_url = ctx.get("imageUrl", "") - query = f"USER_WANTS_TO_BOOK: {restaurant_name}, Address: {address}, ImageURL: {image_url}" - - elif action == "submit_booking": - restaurant_name = ctx.get("restaurantName", "Unknown Restaurant") - party_size = ctx.get("partySize", "Unknown Size") - reservation_time = ctx.get("reservationTime", "Unknown Time") - dietary_reqs = ctx.get("dietary", "None") - image_url = ctx.get("imageUrl", "") - query = f"User submitted a booking for {restaurant_name} for {party_size} people at {reservation_time} with dietary requirements: {dietary_reqs}. The image URL is {image_url}" + else: + query = f"User submitted an event: {action} with data: {ctx}" + else: + logger.info("No a2ui UI event part found. Falling back to text input.") + query = context.get_user_input() - else: - query = f"User submitted an event: {action} with data: {ctx}" - else: - logger.info("No a2ui UI event part found. Falling back to text input.") - query = context.get_user_input() - - logger.info(f"--- AGENT_EXECUTOR: Final query for LLM: '{query}' ---") - - task = context.current_task - - if not task: - task = new_task(context.message) - await event_queue.enqueue_event(task) - updater = TaskUpdater(event_queue, task.id, task.context_id) - - async for item in agent.stream(query, task.context_id): - is_task_complete = item["is_task_complete"] - if not is_task_complete: - await updater.update_status( - TaskState.working, - new_agent_text_message(item["updates"], task.context_id, task.id), - ) - continue - - final_state = ( - TaskState.completed - if action == "submit_booking" - else TaskState.input_required - ) + logger.info(f"--- AGENT_EXECUTOR: Final query for LLM: '{query}' ---") - content = item["content"] - final_parts = [] - if "---a2ui_JSON---" in content: - logger.info("Splitting final response into text and UI parts.") - text_content, json_string = content.split("---a2ui_JSON---", 1) - - if text_content.strip(): - final_parts.append(Part(root=TextPart(text=text_content.strip()))) - - if json_string.strip(): - try: - json_string_cleaned = ( - json_string.strip().lstrip("```json").rstrip("```").strip() - ) - # The new protocol sends a stream of JSON objects. - # For this example, we'll assume they are sent as a list in the final response. - json_data = json.loads(json_string_cleaned) - - if isinstance(json_data, list): - logger.info( - f"Found {len(json_data)} messages. Creating individual DataParts." - ) - for message in json_data: - final_parts.append(create_a2ui_part(message)) - else: - # Handle the case where a single JSON object is returned - logger.info( - "Received a single JSON object. Creating a DataPart." - ) - final_parts.append(create_a2ui_part(json_data)) - - except json.JSONDecodeError as e: - logger.error(f"Failed to parse UI JSON: {e}") - final_parts.append(Part(root=TextPart(text=json_string))) - else: - final_parts.append(Part(root=TextPart(text=content.strip()))) - - logger.info("--- FINAL PARTS TO BE SENT ---") - for i, part in enumerate(final_parts): - logger.info(f" - Part {i}: Type = {type(part.root)}") - if isinstance(part.root, TextPart): - logger.info(f" - Text: {part.root.text[:200]}...") - elif isinstance(part.root, DataPart): - logger.info(f" - Data: {str(part.root.data)[:200]}...") - logger.info("-----------------------------") - - await updater.update_status( - final_state, - new_agent_parts_message(final_parts, task.context_id, task.id), - final=(final_state == TaskState.completed), - ) - break + task = context.current_task + + if not task: + task = new_task(context.message) + await event_queue.enqueue_event(task) + updater = TaskUpdater(event_queue, task.id, task.context_id) - async def cancel( - self, request: RequestContext, event_queue: EventQueue - ) -> Task | None: - raise ServerError(error=UnsupportedOperationError()) + async for item in agent.stream(query, task.context_id): + is_task_complete = item["is_task_complete"] + if not is_task_complete: + await updater.update_status( + TaskState.working, + new_agent_text_message(item["updates"], task.context_id, task.id), + ) + continue + + final_state = ( + TaskState.completed + if action == "submit_booking" + else TaskState.input_required + ) + + content = item["content"] + final_parts = [] + if "---a2ui_JSON---" in content: + logger.info("Splitting final response into text and UI parts.") + text_content, json_string = content.split("---a2ui_JSON---", 1) + + if text_content.strip(): + final_parts.append(Part(root=TextPart(text=text_content.strip()))) + + if json_string.strip(): + try: + json_string_cleaned = ( + json_string.strip().lstrip("```json").rstrip("```").strip() + ) + # The new protocol sends a stream of JSON objects. + # For this example, we'll assume they are sent as a list in the final response. + json_data = json.loads(json_string_cleaned) + + if isinstance(json_data, list): + logger.info( + f"Found {len(json_data)} messages. Creating individual DataParts." + ) + for message in json_data: + final_parts.append(create_a2ui_part(message)) + else: + # Handle the case where a single JSON object is returned + logger.info("Received a single JSON object. Creating a DataPart.") + final_parts.append(create_a2ui_part(json_data)) + + except json.JSONDecodeError as e: + logger.error(f"Failed to parse UI JSON: {e}") + final_parts.append(Part(root=TextPart(text=json_string))) + else: + final_parts.append(Part(root=TextPart(text=content.strip()))) + + logger.info("--- FINAL PARTS TO BE SENT ---") + for i, part in enumerate(final_parts): + logger.info(f" - Part {i}: Type = {type(part.root)}") + if isinstance(part.root, TextPart): + logger.info(f" - Text: {part.root.text[:200]}...") + elif isinstance(part.root, DataPart): + logger.info(f" - Data: {str(part.root.data)[:200]}...") + logger.info("-----------------------------") + + await updater.update_status( + final_state, + new_agent_parts_message(final_parts, task.context_id, task.id), + final=(final_state == TaskState.completed), + ) + break + + async def cancel( + self, request: RequestContext, event_queue: EventQueue + ) -> Task | None: + raise ServerError(error=UnsupportedOperationError()) diff --git a/samples/agent/adk/restaurant_finder/prompt_builder.py b/samples/agent/adk/restaurant_finder/prompt_builder.py index 1cc8445e9..0985df8a3 100644 --- a/samples/agent/adk/restaurant_finder/prompt_builder.py +++ b/samples/agent/adk/restaurant_finder/prompt_builder.py @@ -13,7 +13,7 @@ # limitations under the License. # The A2UI schema remains constant for all A2UI responses. -A2UI_SCHEMA = r''' +A2UI_SCHEMA = r""" { "title": "A2UI Message Schema", "description": "Describes a JSON payload for an A2UI (Agent to UI) message, which is used to dynamically construct and update user interfaces. A message MUST contain exactly ONE of the action properties: 'beginRendering', 'surfaceUpdate', 'dataModelUpdate', or 'deleteSurface'.", @@ -783,26 +783,26 @@ } } } -''' +""" from a2ui_examples import RESTAURANT_UI_EXAMPLES def get_ui_prompt(base_url: str, examples: str) -> str: - """ - Constructs the full prompt with UI instructions, rules, examples, and schema. + """ + Constructs the full prompt with UI instructions, rules, examples, and schema. - Args: - base_url: The base URL for resolving static assets like logos. - examples: A string containing the specific UI examples for the agent's task. + Args: + base_url: The base URL for resolving static assets like logos. + examples: A string containing the specific UI examples for the agent's task. - Returns: - A formatted string to be used as the system prompt for the LLM. - """ - # The f-string substitution for base_url happens here, at runtime. - formatted_examples = examples.format(base_url=base_url) + Returns: + A formatted string to be used as the system prompt for the LLM. + """ + # The f-string substitution for base_url happens here, at runtime. + formatted_examples = examples.format(base_url=base_url) - return f""" + return f""" You are a helpful restaurant finding assistant. Your final output MUST be a a2ui UI JSON response. To generate the response, you MUST follow these rules: @@ -827,10 +827,10 @@ def get_ui_prompt(base_url: str, examples: str) -> str: def get_text_prompt() -> str: - """ - Constructs the prompt for a text-only agent. - """ - return """ + """ + Constructs the prompt for a text-only agent. + """ + return """ You are a helpful restaurant finding assistant. Your final output MUST be a text response. To generate the response, you MUST follow these rules: @@ -847,18 +847,18 @@ def get_text_prompt() -> str: if __name__ == "__main__": - # Example of how to use the prompt builder - # In your actual application, you would call this from your main agent logic. - my_base_url = "http://localhost:8000" + # Example of how to use the prompt builder + # In your actual application, you would call this from your main agent logic. + my_base_url = "http://localhost:8000" - # You can now easily construct a prompt with the relevant examples. - # For a different agent (e.g., a flight booker), you would pass in - # different examples but use the same `get_ui_prompt` function. - restaurant_prompt = get_ui_prompt(my_base_url, RESTAURANT_UI_EXAMPLES) + # You can now easily construct a prompt with the relevant examples. + # For a different agent (e.g., a flight booker), you would pass in + # different examples but use the same `get_ui_prompt` function. + restaurant_prompt = get_ui_prompt(my_base_url, RESTAURANT_UI_EXAMPLES) - print(restaurant_prompt) + print(restaurant_prompt) - # This demonstrates how you could save the prompt to a file for inspection - with open("generated_prompt.txt", "w") as f: - f.write(restaurant_prompt) - print("\nGenerated prompt saved to generated_prompt.txt") + # This demonstrates how you could save the prompt to a file for inspection + with open("generated_prompt.txt", "w") as f: + f.write(restaurant_prompt) + print("\nGenerated prompt saved to generated_prompt.txt") diff --git a/samples/agent/adk/restaurant_finder/tools.py b/samples/agent/adk/restaurant_finder/tools.py index 6a6dd453a..da0e48e52 100644 --- a/samples/agent/adk/restaurant_finder/tools.py +++ b/samples/agent/adk/restaurant_finder/tools.py @@ -21,35 +21,39 @@ logger = logging.getLogger(__name__) -def get_restaurants(cuisine: str, location: str, tool_context: ToolContext, count: int = 5) -> str: - """Call this tool to get a list of restaurants based on a cuisine and location. - 'count' is the number of restaurants to return. - """ - logger.info(f"--- TOOL CALLED: get_restaurants (count: {count}) ---") - logger.info(f" - Cuisine: {cuisine}") - logger.info(f" - Location: {location}") - - items = [] - if "new york" in location.lower() or "ny" in location.lower(): - try: - script_dir = os.path.dirname(__file__) - file_path = os.path.join(script_dir, "restaurant_data.json") - with open(file_path) as f: - restaurant_data_str = f.read() - if base_url := tool_context.state.get("base_url"): - restaurant_data_str = restaurant_data_str.replace("http://localhost:10002", base_url) - logger.info(f'Updated base URL from tool context: {base_url}') - all_items = json.loads(restaurant_data_str) - - # Slice the list to return only the requested number of items - items = all_items[:count] - logger.info( - f" - Success: Found {len(all_items)} restaurants, returning {len(items)}." - ) - - except FileNotFoundError: - logger.error(f" - Error: restaurant_data.json not found at {file_path}") - except json.JSONDecodeError: - logger.error(f" - Error: Failed to decode JSON from {file_path}") - - return json.dumps(items) +def get_restaurants( + cuisine: str, location: str, tool_context: ToolContext, count: int = 5 +) -> str: + """Call this tool to get a list of restaurants based on a cuisine and location. + 'count' is the number of restaurants to return. + """ + logger.info(f"--- TOOL CALLED: get_restaurants (count: {count}) ---") + logger.info(f" - Cuisine: {cuisine}") + logger.info(f" - Location: {location}") + + items = [] + if "new york" in location.lower() or "ny" in location.lower(): + try: + script_dir = os.path.dirname(__file__) + file_path = os.path.join(script_dir, "restaurant_data.json") + with open(file_path) as f: + restaurant_data_str = f.read() + if base_url := tool_context.state.get("base_url"): + restaurant_data_str = restaurant_data_str.replace( + "http://localhost:10002", base_url + ) + logger.info(f"Updated base URL from tool context: {base_url}") + all_items = json.loads(restaurant_data_str) + + # Slice the list to return only the requested number of items + items = all_items[:count] + logger.info( + f" - Success: Found {len(all_items)} restaurants, returning {len(items)}." + ) + + except FileNotFoundError: + logger.error(f" - Error: restaurant_data.json not found at {file_path}") + except json.JSONDecodeError: + logger.error(f" - Error: Failed to decode JSON from {file_path}") + + return json.dumps(items) diff --git a/samples/agent/adk/rizzcharts/__main__.py b/samples/agent/adk/rizzcharts/__main__.py index ab459ee07..7b9481214 100644 --- a/samples/agent/adk/rizzcharts/__main__.py +++ b/samples/agent/adk/rizzcharts/__main__.py @@ -38,90 +38,91 @@ class MissingAPIKeyError(Exception): - """Exception for missing API key.""" + """Exception for missing API key.""" @click.command() @click.option("--host", default="localhost") @click.option("--port", default=10002) def main(host, port): - try: - # Check for API key only if Vertex AI is not configured - if not os.getenv("GOOGLE_GENAI_USE_VERTEXAI") == "TRUE": - if not os.getenv("GEMINI_API_KEY"): - raise MissingAPIKeyError( - "GEMINI_API_KEY environment variable not set and GOOGLE_GENAI_USE_VERTEXAI is not TRUE." - ) - - lite_llm_model = os.getenv("LITELLM_MODEL", "gemini/gemini-2.5-flash") - agent = RizzchartsAgent( - model=LiteLlm(model=lite_llm_model), - a2ui_enabled_provider=get_a2ui_enabled, - a2ui_schema_provider=get_a2ui_schema, - ) - runner = Runner( - app_name=agent.name, - agent=agent, - artifact_service=InMemoryArtifactService(), - session_service=InMemorySessionService(), - memory_service=InMemoryMemoryService(), - ) - - current_dir = pathlib.Path(__file__).resolve().parent - spec_root = current_dir / "../../../../specification/v0_8/json" - - try: - a2ui_schema_content = (spec_root / "server_to_client.json").read_text() - standard_catalog_content = ( - spec_root / "standard_catalog_definition.json" - ).read_text() - rizzcharts_catalog_content = ( - current_dir / "rizzcharts_catalog_definition.json" - ).read_text() - except FileNotFoundError as e: - logger.error(f"Failed to load required JSON files: {e}") - exit(1) - - logger.info(f"Loaded schema from {spec_root}") - - base_url = f"http://{host}:{port}" - agent_executor = RizzchartsAgentExecutor( - base_url=base_url, - runner=runner, - a2ui_schema_content=a2ui_schema_content, - standard_catalog_content=standard_catalog_content, - rizzcharts_catalog_content=rizzcharts_catalog_content, + try: + # Check for API key only if Vertex AI is not configured + if not os.getenv("GOOGLE_GENAI_USE_VERTEXAI") == "TRUE": + if not os.getenv("GEMINI_API_KEY"): + raise MissingAPIKeyError( + "GEMINI_API_KEY environment variable not set and GOOGLE_GENAI_USE_VERTEXAI" + " is not TRUE." ) - request_handler = DefaultRequestHandler( - agent_executor=agent_executor, - task_store=InMemoryTaskStore(), - ) - server = A2AStarletteApplication( - agent_card=agent_executor.get_agent_card(), http_handler=request_handler - ) - import uvicorn + lite_llm_model = os.getenv("LITELLM_MODEL", "gemini/gemini-2.5-flash") + agent = RizzchartsAgent( + model=LiteLlm(model=lite_llm_model), + a2ui_enabled_provider=get_a2ui_enabled, + a2ui_schema_provider=get_a2ui_schema, + ) + runner = Runner( + app_name=agent.name, + agent=agent, + artifact_service=InMemoryArtifactService(), + session_service=InMemorySessionService(), + memory_service=InMemoryMemoryService(), + ) + + current_dir = pathlib.Path(__file__).resolve().parent + spec_root = current_dir / "../../../../specification/v0_8/json" - app = server.build() - - app.add_middleware( - CORSMiddleware, - allow_origins=["http://localhost:5173"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], - ) - - uvicorn.run(app, host=host, port=port) - except MissingAPIKeyError as e: - logger.error(f"Error: {e} {traceback.format_exc()}") - exit(1) - except Exception as e: - logger.error( - f"An error occurred during server startup: {e} {traceback.format_exc()}" - ) - exit(1) + try: + a2ui_schema_content = (spec_root / "server_to_client.json").read_text() + standard_catalog_content = ( + spec_root / "standard_catalog_definition.json" + ).read_text() + rizzcharts_catalog_content = ( + current_dir / "rizzcharts_catalog_definition.json" + ).read_text() + except FileNotFoundError as e: + logger.error(f"Failed to load required JSON files: {e}") + exit(1) + + logger.info(f"Loaded schema from {spec_root}") + + base_url = f"http://{host}:{port}" + agent_executor = RizzchartsAgentExecutor( + base_url=base_url, + runner=runner, + a2ui_schema_content=a2ui_schema_content, + standard_catalog_content=standard_catalog_content, + rizzcharts_catalog_content=rizzcharts_catalog_content, + ) + + request_handler = DefaultRequestHandler( + agent_executor=agent_executor, + task_store=InMemoryTaskStore(), + ) + server = A2AStarletteApplication( + agent_card=agent_executor.get_agent_card(), http_handler=request_handler + ) + import uvicorn + + app = server.build() + + app.add_middleware( + CORSMiddleware, + allow_origins=["http://localhost:5173"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + + uvicorn.run(app, host=host, port=port) + except MissingAPIKeyError as e: + logger.error(f"Error: {e} {traceback.format_exc()}") + exit(1) + except Exception as e: + logger.error( + f"An error occurred during server startup: {e} {traceback.format_exc()}" + ) + exit(1) if __name__ == "__main__": - main() + main() diff --git a/samples/agent/adk/rizzcharts/agent.py b/samples/agent/adk/rizzcharts/agent.py index e85e7a194..a1958d4fb 100644 --- a/samples/agent/adk/rizzcharts/agent.py +++ b/samples/agent/adk/rizzcharts/agent.py @@ -29,123 +29,134 @@ from pydantic import PrivateAttr try: - from .tools import get_sales_data, get_store_sales + from .tools import get_sales_data, get_store_sales except ImportError: - from tools import get_sales_data, get_store_sales + from tools import get_sales_data, get_store_sales logger = logging.getLogger(__name__) RIZZCHARTS_CATALOG_URI = "https://github.com/google/A2UI/blob/main/samples/agent/adk/rizzcharts/rizzcharts_catalog_definition.json" A2UI_CATALOG_URI_STATE_KEY = "user:a2ui_catalog_uri" + class RizzchartsAgent(LlmAgent): - """An agent that runs an ecommerce dashboard""" - - SUPPORTED_CONTENT_TYPES: ClassVar[list[str]] = ["text", "text/plain"] - _a2ui_enabled_provider: A2uiEnabledProvider = PrivateAttr() - _a2ui_schema_provider: A2uiSchemaProvider = PrivateAttr() - - def __init__( - self, - model: Any, - a2ui_enabled_provider: A2uiEnabledProvider, - a2ui_schema_provider: A2uiSchemaProvider - ): - """Initializes the RizzchartsAgent. - - Args: - model: The LLM model to use. - a2ui_enabled_provider: A provider to check if A2UI is enabled. - a2ui_schema_provider: A provider to retrieve the A2UI schema. - """ - super().__init__( - model=model, - name="rizzcharts_agent", - description="An agent that lets sales managers request sales data.", - instruction=self.get_instructions, - tools=[get_store_sales, get_sales_data, SendA2uiToClientToolset( + """An agent that runs an ecommerce dashboard""" + + SUPPORTED_CONTENT_TYPES: ClassVar[list[str]] = ["text", "text/plain"] + _a2ui_enabled_provider: A2uiEnabledProvider = PrivateAttr() + _a2ui_schema_provider: A2uiSchemaProvider = PrivateAttr() + + def __init__( + self, + model: Any, + a2ui_enabled_provider: A2uiEnabledProvider, + a2ui_schema_provider: A2uiSchemaProvider, + ): + """Initializes the RizzchartsAgent. + + Args: + model: The LLM model to use. + a2ui_enabled_provider: A provider to check if A2UI is enabled. + a2ui_schema_provider: A provider to retrieve the A2UI schema. + """ + super().__init__( + model=model, + name="rizzcharts_agent", + description="An agent that lets sales managers request sales data.", + instruction=self.get_instructions, + tools=[ + get_store_sales, + get_sales_data, + SendA2uiToClientToolset( a2ui_schema=a2ui_schema_provider, a2ui_enabled=a2ui_enabled_provider, - )], - planner=BuiltInPlanner( - thinking_config=types.ThinkingConfig( - include_thoughts=True, - ) ), - disallow_transfer_to_peers=True, - ) - - self._a2ui_enabled_provider = a2ui_enabled_provider - self._a2ui_schema_provider = a2ui_schema_provider - - def get_a2ui_schema(self, ctx: ReadonlyContext) -> dict[str, Any]: - """Retrieves and wraps the A2UI schema from the session state. - - Args: - ctx: The ReadonlyContext for resolving the schema. - - Returns: - The wrapped A2UI schema. - """ - a2ui_schema = self._a2ui_schema_provider(ctx) - return wrap_as_json_array(a2ui_schema) - - def load_example(self, path: str, a2ui_schema: dict[str, Any]) -> dict[str, Any]: - """Loads an example JSON file and validates it against the A2UI schema. - - Args: - path: Relative path to the example JSON file. - a2ui_schema: The A2UI schema to validate against. - - Returns: - The loaded and validated JSON data. - """ - data = None - try: - # Try pkgutil first (for Google3) - package_name = __package__ or "" - data = pkgutil.get_data(package_name, path) - except ImportError: - logger.info("pkgutil failed to get data, falling back to file system.") - - if data: - example_str = data.decode("utf-8") - else: - # Fallback to direct Path relative to this file (for local dev) - full_path = Path(__file__).parent / path - example_str = full_path.read_text() - - example_json = json.loads(example_str) - jsonschema.validate( - instance=example_json, schema=a2ui_schema - ) - return example_json - - def get_instructions(self, readonly_context: ReadonlyContext) -> str: - """Generates the system instructions for the agent. - - Args: - readonly_context: The ReadonlyContext for resolving instructions. - - Returns: - The generated system instructions. - """ - use_ui = self._a2ui_enabled_provider(readonly_context) - if not use_ui: - raise ValueError("A2UI must be enabled to run rizzcharts agent") - - a2ui_schema = self.get_a2ui_schema(readonly_context) - catalog_uri = readonly_context.state.get(A2UI_CATALOG_URI_STATE_KEY) - if catalog_uri == RIZZCHARTS_CATALOG_URI: - map_example = self.load_example("examples/rizzcharts_catalog/map.json", a2ui_schema) - chart_example = self.load_example("examples/rizzcharts_catalog/chart.json", a2ui_schema) - elif catalog_uri == STANDARD_CATALOG_ID: - map_example = self.load_example("examples/standard_catalog/map.json", a2ui_schema) - chart_example = self.load_example("examples/standard_catalog/chart.json", a2ui_schema) - else: - raise ValueError(f"Unsupported catalog uri: {catalog_uri if catalog_uri else 'None'}") - - final_prompt = f""" + ], + planner=BuiltInPlanner( + thinking_config=types.ThinkingConfig( + include_thoughts=True, + ) + ), + disallow_transfer_to_peers=True, + ) + + self._a2ui_enabled_provider = a2ui_enabled_provider + self._a2ui_schema_provider = a2ui_schema_provider + + def get_a2ui_schema(self, ctx: ReadonlyContext) -> dict[str, Any]: + """Retrieves and wraps the A2UI schema from the session state. + + Args: + ctx: The ReadonlyContext for resolving the schema. + + Returns: + The wrapped A2UI schema. + """ + a2ui_schema = self._a2ui_schema_provider(ctx) + return wrap_as_json_array(a2ui_schema) + + def load_example(self, path: str, a2ui_schema: dict[str, Any]) -> dict[str, Any]: + """Loads an example JSON file and validates it against the A2UI schema. + + Args: + path: Relative path to the example JSON file. + a2ui_schema: The A2UI schema to validate against. + + Returns: + The loaded and validated JSON data. + """ + data = None + try: + # Try pkgutil first (for Google3) + package_name = __package__ or "" + data = pkgutil.get_data(package_name, path) + except ImportError: + logger.info("pkgutil failed to get data, falling back to file system.") + + if data: + example_str = data.decode("utf-8") + else: + # Fallback to direct Path relative to this file (for local dev) + full_path = Path(__file__).parent / path + example_str = full_path.read_text() + + example_json = json.loads(example_str) + jsonschema.validate(instance=example_json, schema=a2ui_schema) + return example_json + + def get_instructions(self, readonly_context: ReadonlyContext) -> str: + """Generates the system instructions for the agent. + + Args: + readonly_context: The ReadonlyContext for resolving instructions. + + Returns: + The generated system instructions. + """ + use_ui = self._a2ui_enabled_provider(readonly_context) + if not use_ui: + raise ValueError("A2UI must be enabled to run rizzcharts agent") + + a2ui_schema = self.get_a2ui_schema(readonly_context) + catalog_uri = readonly_context.state.get(A2UI_CATALOG_URI_STATE_KEY) + if catalog_uri == RIZZCHARTS_CATALOG_URI: + map_example = self.load_example( + "examples/rizzcharts_catalog/map.json", a2ui_schema + ) + chart_example = self.load_example( + "examples/rizzcharts_catalog/chart.json", a2ui_schema + ) + elif catalog_uri == STANDARD_CATALOG_ID: + map_example = self.load_example("examples/standard_catalog/map.json", a2ui_schema) + chart_example = self.load_example( + "examples/standard_catalog/chart.json", a2ui_schema + ) + else: + raise ValueError( + f"Unsupported catalog uri: {catalog_uri if catalog_uri else 'None'}" + ) + + final_prompt = f""" ### System Instructions You are an expert A2UI Ecommerce Dashboard analyst. Your primary function is to translate user requests for ecommerce data into A2UI JSON payloads to display charts and visualizations. You MUST use the `send_a2ui_json_to_client` tool with the `a2ui_json` argument set to the A2UI JSON payload to send to the client. You should also include a brief text message with each response saying what you did and asking if you can help with anything else. @@ -199,7 +210,10 @@ def get_instructions(self, readonly_context: ReadonlyContext) -> str: {json.dumps(map_example)} ---END MAP EXAMPLE--- """ - - logger.info(f"Generated system instructions for A2UI {'ENABLED' if use_ui else 'DISABLED'} and catalog {catalog_uri}") - return final_prompt + logger.info( + f"Generated system instructions for A2UI {'ENABLED' if use_ui else 'DISABLED'}" + f" and catalog {catalog_uri}" + ) + + return final_prompt diff --git a/samples/agent/adk/rizzcharts/agent_executor.py b/samples/agent/adk/rizzcharts/agent_executor.py index bd2002724..05d985fc9 100644 --- a/samples/agent/adk/rizzcharts/agent_executor.py +++ b/samples/agent/adk/rizzcharts/agent_executor.py @@ -24,16 +24,17 @@ from a2ui.extension.a2ui_extension import get_a2ui_agent_extension from a2ui.extension.a2ui_extension import try_activate_a2ui_extension from a2ui.extension.send_a2ui_to_client_toolset import convert_send_a2ui_to_client_genai_part_to_a2a_part + try: - from .agent import A2UI_CATALOG_URI_STATE_KEY # pylint: disable=import-error - from .agent import RIZZCHARTS_CATALOG_URI # pylint: disable=import-error - from .agent import RizzchartsAgent # pylint: disable=import-error - from .component_catalog_builder import ComponentCatalogBuilder # pylint: disable=import-error + from .agent import A2UI_CATALOG_URI_STATE_KEY # pylint: disable=import-error + from .agent import RIZZCHARTS_CATALOG_URI # pylint: disable=import-error + from .agent import RizzchartsAgent # pylint: disable=import-error + from .component_catalog_builder import ComponentCatalogBuilder # pylint: disable=import-error except ImportError: - from agent import A2UI_CATALOG_URI_STATE_KEY - from agent import RIZZCHARTS_CATALOG_URI - from agent import RizzchartsAgent - from component_catalog_builder import ComponentCatalogBuilder + from agent import A2UI_CATALOG_URI_STATE_KEY + from agent import RIZZCHARTS_CATALOG_URI + from agent import RizzchartsAgent + from component_catalog_builder import ComponentCatalogBuilder from google.adk.a2a.converters.request_converter import AgentRunRequest from google.adk.a2a.executor.a2a_agent_executor import A2aAgentExecutor from google.adk.a2a.executor.a2a_agent_executor import A2aAgentExecutorConfig @@ -48,131 +49,148 @@ _A2UI_ENABLED_KEY = "system:a2ui_enabled" _A2UI_SCHEMA_KEY = "system:a2ui_schema" + def get_a2ui_schema(ctx: ReadonlyContext): - """Retrieves the A2UI schema from the session state. + """Retrieves the A2UI schema from the session state. - Args: - ctx: The ReadonlyContext for resolving the schema. + Args: + ctx: The ReadonlyContext for resolving the schema. + + Returns: + The A2UI schema or None if not found. + """ + return ctx.state.get(_A2UI_SCHEMA_KEY) - Returns: - The A2UI schema or None if not found. - """ - return ctx.state.get(_A2UI_SCHEMA_KEY) def get_a2ui_enabled(ctx: ReadonlyContext): - """Checks if A2UI is enabled in the current session. + """Checks if A2UI is enabled in the current session. - Args: - ctx: The ReadonlyContext for resolving enablement. + Args: + ctx: The ReadonlyContext for resolving enablement. + + Returns: + True if A2UI is enabled, False otherwise. + """ + return ctx.state.get(_A2UI_ENABLED_KEY, False) - Returns: - True if A2UI is enabled, False otherwise. - """ - return ctx.state.get(_A2UI_ENABLED_KEY, False) class RizzchartsAgentExecutor(A2aAgentExecutor): - """Executor for the Rizzcharts agent that handles A2UI session setup.""" - - def __init__( - self, - base_url: str, - runner: Runner, - a2ui_schema_content: str, - standard_catalog_content: str, - rizzcharts_catalog_content: str, - ): - self._base_url = base_url - self._component_catalog_builder = ComponentCatalogBuilder( - a2ui_schema_content=a2ui_schema_content, - uri_to_local_catalog_content={ - STANDARD_CATALOG_ID: standard_catalog_content, - RIZZCHARTS_CATALOG_URI: rizzcharts_catalog_content, - }, - default_catalog_uri=STANDARD_CATALOG_ID, - ) - - config = A2aAgentExecutorConfig( - gen_ai_part_converter=convert_send_a2ui_to_client_genai_part_to_a2a_part - ) - super().__init__(runner=runner, config=config) - - def get_agent_card(self) -> AgentCard: - """Returns the AgentCard defining this agent's metadata and skills. - - Returns: - An AgentCard object. - """ - return AgentCard( - name="Ecommerce Dashboard Agent", - description="This agent visualizes ecommerce data, showing sales breakdowns, YOY revenue performance, and regional sales outliers.", - url=self._base_url, - version="1.0.0", - default_input_modes=RizzchartsAgent.SUPPORTED_CONTENT_TYPES, - default_output_modes=RizzchartsAgent.SUPPORTED_CONTENT_TYPES, - capabilities=AgentCapabilities( - streaming=True, - extensions=[get_a2ui_agent_extension( - supported_catalog_ids=[STANDARD_CATALOG_ID, RIZZCHARTS_CATALOG_URI])], - ), - skills=[ - AgentSkill( - id="view_sales_by_category", - name="View Sales by Category", - description="Displays a pie chart of sales broken down by product category for a given time period.", - tags=["sales", "breakdown", "category", "pie chart", "revenue"], - examples=[ - "show my sales breakdown by product category for q3", - "What's the sales breakdown for last month?", - ], - ), - AgentSkill( - id="view_regional_outliers", - name="View Regional Sales Outliers", - description="Displays a map showing regional sales outliers or store-level performance.", - tags=["sales", "regional", "outliers", "stores", "map", "performance"], - examples=[ - "interesting. were there any outlier stores", - "show me a map of store performance", - ], - ), + """Executor for the Rizzcharts agent that handles A2UI session setup.""" + + def __init__( + self, + base_url: str, + runner: Runner, + a2ui_schema_content: str, + standard_catalog_content: str, + rizzcharts_catalog_content: str, + ): + self._base_url = base_url + self._component_catalog_builder = ComponentCatalogBuilder( + a2ui_schema_content=a2ui_schema_content, + uri_to_local_catalog_content={ + STANDARD_CATALOG_ID: standard_catalog_content, + RIZZCHARTS_CATALOG_URI: rizzcharts_catalog_content, + }, + default_catalog_uri=STANDARD_CATALOG_ID, + ) + + config = A2aAgentExecutorConfig( + gen_ai_part_converter=convert_send_a2ui_to_client_genai_part_to_a2a_part + ) + super().__init__(runner=runner, config=config) + + def get_agent_card(self) -> AgentCard: + """Returns the AgentCard defining this agent's metadata and skills. + + Returns: + An AgentCard object. + """ + return AgentCard( + name="Ecommerce Dashboard Agent", + description=( + "This agent visualizes ecommerce data, showing sales breakdowns, YOY" + " revenue performance, and regional sales outliers." + ), + url=self._base_url, + version="1.0.0", + default_input_modes=RizzchartsAgent.SUPPORTED_CONTENT_TYPES, + default_output_modes=RizzchartsAgent.SUPPORTED_CONTENT_TYPES, + capabilities=AgentCapabilities( + streaming=True, + extensions=[ + get_a2ui_agent_extension( + supported_catalog_ids=[STANDARD_CATALOG_ID, RIZZCHARTS_CATALOG_URI] + ) ], - ) - - @override - async def _prepare_session( - self, - context: RequestContext, - run_request: AgentRunRequest, - runner: Runner, - ): - logger.info(f"Loading session for message {context.message}") - - session = await super()._prepare_session(context, run_request, runner) - - if "base_url" not in session.state: - session.state["base_url"] = self._base_url - - use_ui = try_activate_a2ui_extension(context) - if use_ui: - a2ui_schema, catalog_uri = self._component_catalog_builder.load_a2ui_schema( - client_ui_capabilities=context.message.metadata.get(A2UI_CLIENT_CAPABILITIES_KEY) - if context.message and context.message.metadata - else None - ) - - await runner.session_service.append_event( - session, - Event( - invocation_id=new_invocation_context_id(), - author="system", - actions=EventActions( - state_delta={ - _A2UI_ENABLED_KEY: True, - _A2UI_SCHEMA_KEY: a2ui_schema, - A2UI_CATALOG_URI_STATE_KEY: catalog_uri, - } - ), + ), + skills=[ + AgentSkill( + id="view_sales_by_category", + name="View Sales by Category", + description=( + "Displays a pie chart of sales broken down by product category for" + " a given time period." ), - ) - - return session + tags=["sales", "breakdown", "category", "pie chart", "revenue"], + examples=[ + "show my sales breakdown by product category for q3", + "What's the sales breakdown for last month?", + ], + ), + AgentSkill( + id="view_regional_outliers", + name="View Regional Sales Outliers", + description=( + "Displays a map showing regional sales outliers or store-level" + " performance." + ), + tags=["sales", "regional", "outliers", "stores", "map", "performance"], + examples=[ + "interesting. were there any outlier stores", + "show me a map of store performance", + ], + ), + ], + ) + + @override + async def _prepare_session( + self, + context: RequestContext, + run_request: AgentRunRequest, + runner: Runner, + ): + logger.info(f"Loading session for message {context.message}") + + session = await super()._prepare_session(context, run_request, runner) + + if "base_url" not in session.state: + session.state["base_url"] = self._base_url + + use_ui = try_activate_a2ui_extension(context) + if use_ui: + a2ui_schema, catalog_uri = self._component_catalog_builder.load_a2ui_schema( + client_ui_capabilities=context.message.metadata.get( + A2UI_CLIENT_CAPABILITIES_KEY + ) + if context.message and context.message.metadata + else None + ) + + await runner.session_service.append_event( + session, + Event( + invocation_id=new_invocation_context_id(), + author="system", + actions=EventActions( + state_delta={ + _A2UI_ENABLED_KEY: True, + _A2UI_SCHEMA_KEY: a2ui_schema, + A2UI_CATALOG_URI_STATE_KEY: catalog_uri, + } + ), + ), + ) + + return session diff --git a/samples/agent/adk/rizzcharts/component_catalog_builder.py b/samples/agent/adk/rizzcharts/component_catalog_builder.py index c362b60bf..4b7f45dd5 100644 --- a/samples/agent/adk/rizzcharts/component_catalog_builder.py +++ b/samples/agent/adk/rizzcharts/component_catalog_builder.py @@ -16,70 +16,88 @@ import logging from typing import Any, List, Optional from a2ui.extension.a2ui_extension import INLINE_CATALOGS_KEY, SUPPORTED_CATALOG_IDS_KEY + try: - from .agent import RIZZCHARTS_CATALOG_URI, STANDARD_CATALOG_ID + from .agent import RIZZCHARTS_CATALOG_URI, STANDARD_CATALOG_ID except ImportError: - from agent import RIZZCHARTS_CATALOG_URI, STANDARD_CATALOG_ID + from agent import RIZZCHARTS_CATALOG_URI, STANDARD_CATALOG_ID logger = logging.getLogger(__name__) class ComponentCatalogBuilder: - def __init__(self, - a2ui_schema_content: str, - uri_to_local_catalog_content: dict[str, str], - default_catalog_uri: Optional[str], - ): - self._a2ui_schema_content = a2ui_schema_content - self._uri_to_local_catalog_content = uri_to_local_catalog_content - self._default_catalog_uri = default_catalog_uri - def load_a2ui_schema(self, client_ui_capabilities: Optional[dict[str, Any]]) -> tuple[dict[str, Any], Optional[str]]: - """ - Returns: - A tuple of the a2ui_schema and the catalog uri - """ - try: - logger.info(f"Loading A2UI client capabilities {client_ui_capabilities}") - - if client_ui_capabilities: - supported_catalog_uris: List[str] = client_ui_capabilities.get(SUPPORTED_CATALOG_IDS_KEY) - if RIZZCHARTS_CATALOG_URI in supported_catalog_uris: - catalog_uri = RIZZCHARTS_CATALOG_URI - elif STANDARD_CATALOG_ID in supported_catalog_uris: - catalog_uri = STANDARD_CATALOG_ID - else: - catalog_uri = None + def __init__( + self, + a2ui_schema_content: str, + uri_to_local_catalog_content: dict[str, str], + default_catalog_uri: Optional[str], + ): + self._a2ui_schema_content = a2ui_schema_content + self._uri_to_local_catalog_content = uri_to_local_catalog_content + self._default_catalog_uri = default_catalog_uri + + def load_a2ui_schema( + self, client_ui_capabilities: Optional[dict[str, Any]] + ) -> tuple[dict[str, Any], Optional[str]]: + """ + Returns: + A tuple of the a2ui_schema and the catalog uri + """ + try: + logger.info(f"Loading A2UI client capabilities {client_ui_capabilities}") + + if client_ui_capabilities: + supported_catalog_uris: List[str] = client_ui_capabilities.get( + SUPPORTED_CATALOG_IDS_KEY + ) + if RIZZCHARTS_CATALOG_URI in supported_catalog_uris: + catalog_uri = RIZZCHARTS_CATALOG_URI + elif STANDARD_CATALOG_ID in supported_catalog_uris: + catalog_uri = STANDARD_CATALOG_ID + else: + catalog_uri = None + + inline_catalog_str = client_ui_capabilities.get(INLINE_CATALOGS_KEY) + elif self._default_catalog_uri: + logger.info( + f"Using default catalog {self._default_catalog_uri} since client UI" + " capabilities not found" + ) + catalog_uri = self._default_catalog_uri + inline_catalog_str = None + else: + raise ValueError("Client UI capabilities not provided") - inline_catalog_str = client_ui_capabilities.get(INLINE_CATALOGS_KEY) - elif self._default_catalog_uri: - logger.info(f"Using default catalog {self._default_catalog_uri} since client UI capabilities not found") - catalog_uri = self._default_catalog_uri - inline_catalog_str = None - else: - raise ValueError("Client UI capabilities not provided") - - if catalog_uri and inline_catalog_str: - raise ValueError(f"Cannot set both {SUPPORTED_CATALOG_IDS_KEY} and {INLINE_CATALOGS_KEY} in ClientUiCapabilities: {client_ui_capabilities}") - elif catalog_uri: - if catalog_str := self._uri_to_local_catalog_content.get(catalog_uri): - logger.info(f"Loading local component catalog with uri {catalog_uri}") - catalog_json = json.loads(catalog_str) - else: - raise ValueError(f"Local component catalog with URI {catalog_uri} not found") - elif inline_catalog_str: - logger.info(f"Loading inline component catalog {inline_catalog_str[:200]}") - catalog_json = json.loads(inline_catalog_str) - else: - raise ValueError("No supported catalogs found in client UI capabilities") + if catalog_uri and inline_catalog_str: + raise ValueError( + f"Cannot set both {SUPPORTED_CATALOG_IDS_KEY} and {INLINE_CATALOGS_KEY} in" + f" ClientUiCapabilities: {client_ui_capabilities}" + ) + elif catalog_uri: + if catalog_str := self._uri_to_local_catalog_content.get(catalog_uri): + logger.info(f"Loading local component catalog with uri {catalog_uri}") + catalog_json = json.loads(catalog_str) + else: + raise ValueError(f"Local component catalog with URI {catalog_uri} not found") + elif inline_catalog_str: + logger.info(f"Loading inline component catalog {inline_catalog_str[:200]}") + catalog_json = json.loads(inline_catalog_str) + else: + raise ValueError("No supported catalogs found in client UI capabilities") - logger.info("Loading A2UI schema") - a2ui_schema_json = json.loads(self._a2ui_schema_content) + logger.info("Loading A2UI schema") + a2ui_schema_json = json.loads(self._a2ui_schema_content) - a2ui_schema_json["properties"]["surfaceUpdate"]["properties"]["components"]["items"]["properties"]["component"]["properties"] = catalog_json + a2ui_schema_json["properties"]["surfaceUpdate"]["properties"]["components"][ + "items" + ]["properties"]["component"]["properties"] = catalog_json - return a2ui_schema_json, catalog_uri + return a2ui_schema_json, catalog_uri - except Exception as e: - logger.error(f"Failed to a2ui schema with client ui capabilities {client_ui_capabilities}: {e}") - raise e + except Exception as e: + logger.error( + "Failed to a2ui schema with client ui capabilities" + f" {client_ui_capabilities}: {e}" + ) + raise e diff --git a/samples/agent/adk/rizzcharts/tools.py b/samples/agent/adk/rizzcharts/tools.py index 61424872e..9acca87a0 100644 --- a/samples/agent/adk/rizzcharts/tools.py +++ b/samples/agent/adk/rizzcharts/tools.py @@ -19,89 +19,89 @@ def get_store_sales(region: str = "all", **kwargs: Any) -> dict[str, Any]: - """ - Gets individual store sales + """ + Gets individual store sales - Args: - region: The region to get store sales for. - **kwargs: Additional arguments. + Args: + region: The region to get store sales for. + **kwargs: Additional arguments. - Returns: - A dict containing the stores with locations and their sales, and with outlier stores highlighted - """ - logger.info("get_store_sales called with region=%s, kwargs=%s", region, kwargs) + Returns: + A dict containing the stores with locations and their sales, and with outlier stores highlighted + """ + logger.info("get_store_sales called with region=%s, kwargs=%s", region, kwargs) - return { - "center": {"lat": 34, "lng": -118.2437}, - "zoom": 10, - "locations": [ - { - "lat": 34.0195, - "lng": -118.4912, - "name": "Santa Monica Branch", - "description": "High traffic coastal location.", - "outlier_reason": "Yes, 15% sales over baseline", - "background": "#4285F4", - "borderColor": "#FFFFFF", - "glyphColor": "#FFFFFF", - }, - {"lat": 34.0488, "lng": -118.2518, "name": "Downtown Flagship"}, - {"lat": 34.1016, "lng": -118.3287, "name": "Hollywood Boulevard Store"}, - {"lat": 34.1478, "lng": -118.1445, "name": "Pasadena Location"}, - {"lat": 33.7701, "lng": -118.1937, "name": "Long Beach Outlet"}, - {"lat": 34.0736, "lng": -118.4004, "name": "Beverly Hills Boutique"}, - ], - } + return { + "center": {"lat": 34, "lng": -118.2437}, + "zoom": 10, + "locations": [ + { + "lat": 34.0195, + "lng": -118.4912, + "name": "Santa Monica Branch", + "description": "High traffic coastal location.", + "outlier_reason": "Yes, 15% sales over baseline", + "background": "#4285F4", + "borderColor": "#FFFFFF", + "glyphColor": "#FFFFFF", + }, + {"lat": 34.0488, "lng": -118.2518, "name": "Downtown Flagship"}, + {"lat": 34.1016, "lng": -118.3287, "name": "Hollywood Boulevard Store"}, + {"lat": 34.1478, "lng": -118.1445, "name": "Pasadena Location"}, + {"lat": 33.7701, "lng": -118.1937, "name": "Long Beach Outlet"}, + {"lat": 34.0736, "lng": -118.4004, "name": "Beverly Hills Boutique"}, + ], + } def get_sales_data(time_period: str = "year", **kwargs: Any) -> dict[str, Any]: - """ - Gets the sales data. + """ + Gets the sales data. - Args: - time_period: The time period to get sales data for (e.g. 'Q1', 'year'). Defaults to 'year'. - **kwargs: Additional arguments. + Args: + time_period: The time period to get sales data for (e.g. 'Q1', 'year'). Defaults to 'year'. + **kwargs: Additional arguments. - Returns: - A dict containing the sales breakdown by product category. - """ - logger.info( - "get_sales_data called with time_period=%s, kwargs=%s", time_period, kwargs - ) + Returns: + A dict containing the sales breakdown by product category. + """ + logger.info( + "get_sales_data called with time_period=%s, kwargs=%s", time_period, kwargs + ) - return { - "sales_data": [ - { - "label": "Apparel", - "value": 41, - "drillDown": [ - {"label": "Tops", "value": 31}, - {"label": "Bottoms", "value": 38}, - {"label": "Outerwear", "value": 20}, - {"label": "Footwear", "value": 11}, - ], - }, - { - "label": "Home Goods", - "value": 15, - "drillDown": [ - {"label": "Pillow", "value": 8}, - {"label": "Coffee Maker", "value": 16}, - {"label": "Area Rug", "value": 3}, - {"label": "Bath Towels", "value": 14}, - ], - }, - { - "label": "Electronics", - "value": 28, - "drillDown": [ - {"label": "Phones", "value": 25}, - {"label": "Laptops", "value": 27}, - {"label": "TVs", "value": 21}, - {"label": "Other", "value": 27}, - ], - }, - {"label": "Health & Beauty", "value": 10}, - {"label": "Other", "value": 6}, - ] - } + return { + "sales_data": [ + { + "label": "Apparel", + "value": 41, + "drillDown": [ + {"label": "Tops", "value": 31}, + {"label": "Bottoms", "value": 38}, + {"label": "Outerwear", "value": 20}, + {"label": "Footwear", "value": 11}, + ], + }, + { + "label": "Home Goods", + "value": 15, + "drillDown": [ + {"label": "Pillow", "value": 8}, + {"label": "Coffee Maker", "value": 16}, + {"label": "Area Rug", "value": 3}, + {"label": "Bath Towels", "value": 14}, + ], + }, + { + "label": "Electronics", + "value": 28, + "drillDown": [ + {"label": "Phones", "value": 25}, + {"label": "Laptops", "value": 27}, + {"label": "TVs", "value": 21}, + {"label": "Other", "value": 27}, + ], + }, + {"label": "Health & Beauty", "value": 10}, + {"label": "Other", "value": 6}, + ] + } diff --git a/samples/agent/adk/uv.lock b/samples/agent/adk/uv.lock index 48cf6be77..a973639d0 100644 --- a/samples/agent/adk/uv.lock +++ b/samples/agent/adk/uv.lock @@ -15,6 +15,9 @@ members = [ "rizzcharts", ] +[manifest.dependency-groups] +dev = [{ name = "pyink", specifier = ">=24.10.0" }] + [[package]] name = "a2a-sdk" version = "0.3.22" @@ -52,6 +55,7 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ + { name = "pyink", specifier = ">=24.10.0" }, { name = "pytest", specifier = ">=9.0.2" }, { name = "pytest-asyncio", specifier = ">=1.3.0" }, ] @@ -300,6 +304,33 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/54/51/321e821856452f7386c4e9df866f196720b1ad0c5ea1623ea7399969ae3b/authlib-1.6.6-py2.py3-none-any.whl", hash = "sha256:7d9e9bc535c13974313a87f53e8430eb6ea3d1cf6ae4f6efcd793f2e949143fd", size = 244005, upload-time = "2025-12-12T08:01:40.209Z" }, ] +[[package]] +name = "black" +version = "25.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "mypy-extensions" }, + { name = "packaging" }, + { name = "pathspec" }, + { name = "platformdirs" }, + { name = "pytokens" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c4/d9/07b458a3f1c525ac392b5edc6b191ff140b596f9d77092429417a54e249d/black-25.12.0.tar.gz", hash = "sha256:8d3dd9cea14bff7ddc0eb243c811cdb1a011ebb4800a5f0335a01a68654796a7", size = 659264, upload-time = "2025-12-08T01:40:52.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/52/c551e36bc95495d2aa1a37d50566267aa47608c81a53f91daa809e03293f/black-25.12.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a05ddeb656534c3e27a05a29196c962877c83fa5503db89e68857d1161ad08a5", size = 1923809, upload-time = "2025-12-08T01:46:55.126Z" }, + { url = "https://files.pythonhosted.org/packages/a0/f7/aac9b014140ee56d247e707af8db0aae2e9efc28d4a8aba92d0abd7ae9d1/black-25.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9ec77439ef3e34896995503865a85732c94396edcc739f302c5673a2315e1e7f", size = 1742384, upload-time = "2025-12-08T01:49:37.022Z" }, + { url = "https://files.pythonhosted.org/packages/74/98/38aaa018b2ab06a863974c12b14a6266badc192b20603a81b738c47e902e/black-25.12.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e509c858adf63aa61d908061b52e580c40eae0dfa72415fa47ac01b12e29baf", size = 1798761, upload-time = "2025-12-08T01:46:05.386Z" }, + { url = "https://files.pythonhosted.org/packages/16/3a/a8ac542125f61574a3f015b521ca83b47321ed19bb63fe6d7560f348bfe1/black-25.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:252678f07f5bac4ff0d0e9b261fbb029fa530cfa206d0a636a34ab445ef8ca9d", size = 1429180, upload-time = "2025-12-08T01:45:34.903Z" }, + { url = "https://files.pythonhosted.org/packages/e6/2d/bdc466a3db9145e946762d52cd55b1385509d9f9004fec1c97bdc8debbfb/black-25.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:bc5b1c09fe3c931ddd20ee548511c64ebf964ada7e6f0763d443947fd1c603ce", size = 1239350, upload-time = "2025-12-08T01:46:09.458Z" }, + { url = "https://files.pythonhosted.org/packages/35/46/1d8f2542210c502e2ae1060b2e09e47af6a5e5963cb78e22ec1a11170b28/black-25.12.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:0a0953b134f9335c2434864a643c842c44fba562155c738a2a37a4d61f00cad5", size = 1917015, upload-time = "2025-12-08T01:53:27.987Z" }, + { url = "https://files.pythonhosted.org/packages/41/37/68accadf977672beb8e2c64e080f568c74159c1aaa6414b4cd2aef2d7906/black-25.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2355bbb6c3b76062870942d8cc450d4f8ac71f9c93c40122762c8784df49543f", size = 1741830, upload-time = "2025-12-08T01:54:36.861Z" }, + { url = "https://files.pythonhosted.org/packages/ac/76/03608a9d8f0faad47a3af3a3c8c53af3367f6c0dd2d23a84710456c7ac56/black-25.12.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9678bd991cc793e81d19aeeae57966ee02909877cb65838ccffef24c3ebac08f", size = 1791450, upload-time = "2025-12-08T01:44:52.581Z" }, + { url = "https://files.pythonhosted.org/packages/06/99/b2a4bd7dfaea7964974f947e1c76d6886d65fe5d24f687df2d85406b2609/black-25.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:97596189949a8aad13ad12fcbb4ae89330039b96ad6742e6f6b45e75ad5cfd83", size = 1452042, upload-time = "2025-12-08T01:46:13.188Z" }, + { url = "https://files.pythonhosted.org/packages/b2/7c/d9825de75ae5dd7795d007681b752275ea85a1c5d83269b4b9c754c2aaab/black-25.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:778285d9ea197f34704e3791ea9404cd6d07595745907dd2ce3da7a13627b29b", size = 1267446, upload-time = "2025-12-08T01:46:14.497Z" }, + { url = "https://files.pythonhosted.org/packages/68/11/21331aed19145a952ad28fca2756a1433ee9308079bd03bd898e903a2e53/black-25.12.0-py3-none-any.whl", hash = "sha256:48ceb36c16dbc84062740049eef990bb2ce07598272e673c17d1a7720c71c828", size = 206191, upload-time = "2025-12-08T01:40:50.963Z" }, +] + [[package]] name = "certifi" version = "2026.1.4" @@ -1145,7 +1176,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/02/2f/28592176381b9ab2cafa12829ba7b472d177f3acc35d8fbcf3673d966fff/greenlet-3.3.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a1e41a81c7e2825822f4e068c48cb2196002362619e2d70b148f20a831c00739", size = 275140, upload-time = "2025-12-04T14:23:01.282Z" }, { url = "https://files.pythonhosted.org/packages/2c/80/fbe937bf81e9fca98c981fe499e59a3f45df2a04da0baa5c2be0dca0d329/greenlet-3.3.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9f515a47d02da4d30caaa85b69474cec77b7929b2e936ff7fb853d42f4bf8808", size = 599219, upload-time = "2025-12-04T14:50:08.309Z" }, { url = "https://files.pythonhosted.org/packages/c2/ff/7c985128f0514271b8268476af89aee6866df5eec04ac17dcfbc676213df/greenlet-3.3.0-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7d2d9fd66bfadf230b385fdc90426fcd6eb64db54b40c495b72ac0feb5766c54", size = 610211, upload-time = "2025-12-04T14:57:43.968Z" }, - { url = "https://files.pythonhosted.org/packages/79/07/c47a82d881319ec18a4510bb30463ed6891f2ad2c1901ed5ec23d3de351f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30a6e28487a790417d036088b3bcb3f3ac7d8babaa7d0139edbaddebf3af9492", size = 624311, upload-time = "2025-12-04T15:07:14.697Z" }, { url = "https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:087ea5e004437321508a8d6f20efc4cfec5e3c30118e1417ea96ed1d93950527", size = 612833, upload-time = "2025-12-04T14:26:03.669Z" }, { url = "https://files.pythonhosted.org/packages/b5/ba/56699ff9b7c76ca12f1cdc27a886d0f81f2189c3455ff9f65246780f713d/greenlet-3.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ab97cf74045343f6c60a39913fa59710e4bd26a536ce7ab2397adf8b27e67c39", size = 1567256, upload-time = "2025-12-04T15:04:25.276Z" }, { url = "https://files.pythonhosted.org/packages/1e/37/f31136132967982d698c71a281a8901daf1a8fbab935dce7c0cf15f942cc/greenlet-3.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5375d2e23184629112ca1ea89a53389dddbffcf417dad40125713d88eb5f96e8", size = 1636483, upload-time = "2025-12-04T14:27:30.804Z" }, @@ -1153,7 +1183,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d7/7c/f0a6d0ede2c7bf092d00bc83ad5bafb7e6ec9b4aab2fbdfa6f134dc73327/greenlet-3.3.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:60c2ef0f578afb3c8d92ea07ad327f9a062547137afe91f38408f08aacab667f", size = 275671, upload-time = "2025-12-04T14:23:05.267Z" }, { url = "https://files.pythonhosted.org/packages/44/06/dac639ae1a50f5969d82d2e3dd9767d30d6dbdbab0e1a54010c8fe90263c/greenlet-3.3.0-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a5d554d0712ba1de0a6c94c640f7aeba3f85b3a6e1f2899c11c2c0428da9365", size = 646360, upload-time = "2025-12-04T14:50:10.026Z" }, { url = "https://files.pythonhosted.org/packages/e0/94/0fb76fe6c5369fba9bf98529ada6f4c3a1adf19e406a47332245ef0eb357/greenlet-3.3.0-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3a898b1e9c5f7307ebbde4102908e6cbfcb9ea16284a3abe15cab996bee8b9b3", size = 658160, upload-time = "2025-12-04T14:57:45.41Z" }, - { url = "https://files.pythonhosted.org/packages/93/79/d2c70cae6e823fac36c3bbc9077962105052b7ef81db2f01ec3b9bf17e2b/greenlet-3.3.0-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dcd2bdbd444ff340e8d6bdf54d2f206ccddbb3ccfdcd3c25bf4afaa7b8f0cf45", size = 671388, upload-time = "2025-12-04T15:07:15.789Z" }, { url = "https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5773edda4dc00e173820722711d043799d3adb4f01731f40619e07ea2750b955", size = 660166, upload-time = "2025-12-04T14:26:05.099Z" }, { url = "https://files.pythonhosted.org/packages/4b/d2/91465d39164eaa0085177f61983d80ffe746c5a1860f009811d498e7259c/greenlet-3.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ac0549373982b36d5fd5d30beb8a7a33ee541ff98d2b502714a09f1169f31b55", size = 1615193, upload-time = "2025-12-04T15:04:27.041Z" }, { url = "https://files.pythonhosted.org/packages/42/1b/83d110a37044b92423084d52d5d5a3b3a73cafb51b547e6d7366ff62eff1/greenlet-3.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d198d2d977460358c3b3a4dc844f875d1adb33817f0613f663a656f463764ccc", size = 1683653, upload-time = "2025-12-04T14:27:32.366Z" }, @@ -1161,7 +1190,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a0/66/bd6317bc5932accf351fc19f177ffba53712a202f9df10587da8df257c7e/greenlet-3.3.0-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:d6ed6f85fae6cdfdb9ce04c9bf7a08d666cfcfb914e7d006f44f840b46741931", size = 282638, upload-time = "2025-12-04T14:25:20.941Z" }, { url = "https://files.pythonhosted.org/packages/30/cf/cc81cb030b40e738d6e69502ccbd0dd1bced0588e958f9e757945de24404/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d9125050fcf24554e69c4cacb086b87b3b55dc395a8b3ebe6487b045b2614388", size = 651145, upload-time = "2025-12-04T14:50:11.039Z" }, { url = "https://files.pythonhosted.org/packages/9c/ea/1020037b5ecfe95ca7df8d8549959baceb8186031da83d5ecceff8b08cd2/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:87e63ccfa13c0a0f6234ed0add552af24cc67dd886731f2261e46e241608bee3", size = 654236, upload-time = "2025-12-04T14:57:47.007Z" }, - { url = "https://files.pythonhosted.org/packages/69/cc/1e4bae2e45ca2fa55299f4e85854606a78ecc37fead20d69322f96000504/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2662433acbca297c9153a4023fe2161c8dcfdcc91f10433171cf7e7d94ba2221", size = 662506, upload-time = "2025-12-04T15:07:16.906Z" }, { url = "https://files.pythonhosted.org/packages/57/b9/f8025d71a6085c441a7eaff0fd928bbb275a6633773667023d19179fe815/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3c6e9b9c1527a78520357de498b0e709fb9e2f49c3a513afd5a249007261911b", size = 653783, upload-time = "2025-12-04T14:26:06.225Z" }, { url = "https://files.pythonhosted.org/packages/f6/c7/876a8c7a7485d5d6b5c6821201d542ef28be645aa024cfe1145b35c120c1/greenlet-3.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:286d093f95ec98fdd92fcb955003b8a3d054b4e2cab3e2707a5039e7b50520fd", size = 1614857, upload-time = "2025-12-04T15:04:28.484Z" }, { url = "https://files.pythonhosted.org/packages/4f/dc/041be1dff9f23dac5f48a43323cd0789cb798342011c19a248d9c9335536/greenlet-3.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c10513330af5b8ae16f023e8ddbfb486ab355d04467c4679c5cfe4659975dd9", size = 1676034, upload-time = "2025-12-04T14:27:33.531Z" }, @@ -1715,6 +1743,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/da/7d22601b625e241d4f23ef1ebff8acfc60da633c9e7e7922e24d10f592b3/multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3", size = 12317, upload-time = "2025-10-06T14:52:29.272Z" }, ] +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + [[package]] name = "openai" version = "2.15.0" @@ -1912,6 +1949,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, ] +[[package]] +name = "pathspec" +version = "1.0.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fa/36/e27608899f9b8d4dff0617b2d9ab17ca5608956ca44461ac14ac48b44015/pathspec-1.0.4.tar.gz", hash = "sha256:0210e2ae8a21a9137c0d470578cb0e595af87edaa6ebf12ff176f14a02e0e645", size = 131200, upload-time = "2026-01-27T03:59:46.938Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/3c/2c197d226f9ea224a9ab8d197933f9da0ae0aac5b6e0f884e2b8d9c8e9f7/pathspec-1.0.4-py3-none-any.whl", hash = "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723", size = 55206, upload-time = "2026-01-27T03:59:45.137Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, +] + [[package]] name = "propcache" version = "0.4.1" @@ -2156,6 +2211,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c1/60/5d4751ba3f4a40a6891f24eec885f51afd78d208498268c734e256fb13c4/pydantic_settings-2.12.0-py3-none-any.whl", hash = "sha256:fddb9fd99a5b18da837b29710391e945b1e30c135477f484084ee513adb93809", size = 51880, upload-time = "2025-11-10T14:25:45.546Z" }, ] +[[package]] +name = "pyink" +version = "25.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "black" }, + { name = "click" }, + { name = "mypy-extensions" }, + { name = "packaging" }, + { name = "pathspec" }, + { name = "platformdirs" }, + { name = "pytokens" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/31/45/5940abea3a364768b267ff4c73d898f7d692f649540e613a8fe67089abcc/pyink-25.12.0.tar.gz", hash = "sha256:930a913fed2824ffbbd3c10847fad1171c2b075dd709a13dc435caea851de7b8", size = 279674, upload-time = "2026-01-02T15:02:22.259Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/0b/209f6cde8f7c2ad8a47a2b5e41da101e6af782f8896d99f6cf2620c87ba7/pyink-25.12.0-py3-none-any.whl", hash = "sha256:3c9ed8c6f1f6f4a7f3a6a3a31bff5a7ab6b3c88954dc456273c8e71ce6ff0508", size = 143528, upload-time = "2026-01-02T15:02:20.385Z" }, +] + [[package]] name = "pyjwt" version = "2.10.1" @@ -2209,6 +2282,30 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/aa/76/03af049af4dcee5d27442f71b6924f01f3efb5d2bd34f23fcd563f2cc5f5/python_multipart-0.0.21-py3-none-any.whl", hash = "sha256:cf7a6713e01c87aa35387f4774e812c4361150938d20d232800f75ffcf266090", size = 24541, upload-time = "2025-12-17T09:24:21.153Z" }, ] +[[package]] +name = "pytokens" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e5/16/4b9cfd90d55e66ffdb277d7ebe3bc25250c2311336ec3fc73b2673c794d5/pytokens-0.4.0.tar.gz", hash = "sha256:6b0b03e6ea7c9f9d47c5c61164b69ad30f4f0d70a5d9fe7eac4d19f24f77af2d", size = 15039, upload-time = "2026-01-19T07:59:50.623Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/63/627b7e71d557383da5a97f473ad50f8d9c2c1f55c7d3c2531a120c796f6e/pytokens-0.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:73eff3bdd8ad08da679867992782568db0529b887bed4c85694f84cdf35eafc6", size = 159744, upload-time = "2026-01-19T07:59:16.88Z" }, + { url = "https://files.pythonhosted.org/packages/28/d7/16f434c37ec3824eba6bcb6e798e5381a8dc83af7a1eda0f95c16fe3ade5/pytokens-0.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d97cc1f91b1a8e8ebccf31c367f28225699bea26592df27141deade771ed0afb", size = 253207, upload-time = "2026-01-19T07:59:18.069Z" }, + { url = "https://files.pythonhosted.org/packages/ab/96/04102856b9527701ae57d74a6393d1aca5bad18a1b1ca48ccffb3c93b392/pytokens-0.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a2c8952c537cb73a1a74369501a83b7f9d208c3cf92c41dd88a17814e68d48ce", size = 267452, upload-time = "2026-01-19T07:59:19.328Z" }, + { url = "https://files.pythonhosted.org/packages/0e/ef/0936eb472b89ab2d2c2c24bb81c50417e803fa89c731930d9fb01176fe9f/pytokens-0.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5dbf56f3c748aed9310b310d5b8b14e2c96d3ad682ad5a943f381bdbbdddf753", size = 265965, upload-time = "2026-01-19T07:59:20.613Z" }, + { url = "https://files.pythonhosted.org/packages/ae/f5/64f3d6f7df4a9e92ebda35ee85061f6260e16eac82df9396020eebbca775/pytokens-0.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:e131804513597f2dff2b18f9911d9b6276e21ef3699abeffc1c087c65a3d975e", size = 102813, upload-time = "2026-01-19T07:59:22.012Z" }, + { url = "https://files.pythonhosted.org/packages/5f/f1/d07e6209f18ef378fc2ae9dee8d1dfe91fd2447c2e2dbfa32867b6dd30cf/pytokens-0.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0d7374c917197106d3c4761374718bc55ea2e9ac0fb94171588ef5840ee1f016", size = 159968, upload-time = "2026-01-19T07:59:23.07Z" }, + { url = "https://files.pythonhosted.org/packages/0a/73/0eb111400abd382a04f253b269819db9fcc748aa40748441cebdcb6d068f/pytokens-0.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cd3fa1caf9e47a72ee134a29ca6b5bea84712724bba165d6628baa190c6ea5b", size = 253373, upload-time = "2026-01-19T07:59:24.381Z" }, + { url = "https://files.pythonhosted.org/packages/bd/8d/9e4e2fdb5bcaba679e54afcc304e9f13f488eb4d626e6b613f9553e03dbd/pytokens-0.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c6986576b7b07fe9791854caa5347923005a80b079d45b63b0be70d50cce5f1", size = 267024, upload-time = "2026-01-19T07:59:25.74Z" }, + { url = "https://files.pythonhosted.org/packages/cb/b7/e0a370321af2deb772cff14ff337e1140d1eac2c29a8876bfee995f486f0/pytokens-0.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9940f7c2e2f54fb1cb5fe17d0803c54da7a2bf62222704eb4217433664a186a7", size = 270912, upload-time = "2026-01-19T07:59:27.072Z" }, + { url = "https://files.pythonhosted.org/packages/7c/54/4348f916c440d4c3e68b53b4ed0e66b292d119e799fa07afa159566dcc86/pytokens-0.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:54691cf8f299e7efabcc25adb4ce715d3cef1491e1c930eaf555182f898ef66a", size = 103836, upload-time = "2026-01-19T07:59:28.112Z" }, + { url = "https://files.pythonhosted.org/packages/e8/f8/a693c0cfa9c783a2a8c4500b7b2a8bab420f8ca4f2d496153226bf1c12e3/pytokens-0.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:94ff5db97a0d3cd7248a5b07ba2167bd3edc1db92f76c6db00137bbaf068ddf8", size = 167643, upload-time = "2026-01-19T07:59:29.292Z" }, + { url = "https://files.pythonhosted.org/packages/c0/dd/a64eb1e9f3ec277b69b33ef1b40ffbcc8f0a3bafcde120997efc7bdefebf/pytokens-0.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d0dd6261cd9cc95fae1227b1b6ebee023a5fd4a4b6330b071c73a516f5f59b63", size = 289553, upload-time = "2026-01-19T07:59:30.537Z" }, + { url = "https://files.pythonhosted.org/packages/df/22/06c1079d93dbc3bca5d013e1795f3d8b9ed6c87290acd6913c1c526a6bb2/pytokens-0.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0cdca8159df407dbd669145af4171a0d967006e0be25f3b520896bc7068f02c4", size = 302490, upload-time = "2026-01-19T07:59:32.352Z" }, + { url = "https://files.pythonhosted.org/packages/8d/de/a6f5e43115b4fbf4b93aa87d6c83c79932cdb084f9711daae04549e1e4ad/pytokens-0.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:4b5770abeb2a24347380a1164a558f0ebe06e98aedbd54c45f7929527a5fb26e", size = 305652, upload-time = "2026-01-19T07:59:33.685Z" }, + { url = "https://files.pythonhosted.org/packages/ab/3d/c136e057cb622e36e0c3ff7a8aaa19ff9720050c4078235691da885fe6ee/pytokens-0.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:74500d72c561dad14c037a9e86a657afd63e277dd5a3bb7570932ab7a3b12551", size = 115472, upload-time = "2026-01-19T07:59:34.734Z" }, + { url = "https://files.pythonhosted.org/packages/7c/3c/6941a82f4f130af6e1c68c076b6789069ef10c04559bd4733650f902fd3b/pytokens-0.4.0-py3-none-any.whl", hash = "sha256:0508d11b4de157ee12063901603be87fb0253e8f4cb9305eb168b1202ab92068", size = 13224, upload-time = "2026-01-19T07:59:49.822Z" }, +] + [[package]] name = "pywin32" version = "311"