diff --git a/docs/docs.json b/docs/docs.json index 32129340e6..7133aca58d 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -202,6 +202,7 @@ "en/tools/search-research/youtubevideosearchtool", "en/tools/search-research/tavilysearchtool", "en/tools/search-research/tavilyextractortool", + "en/tools/search-research/parallelsearchtool", "en/tools/search-research/arxivpapertool", "en/tools/search-research/serpapi-googlesearchtool", "en/tools/search-research/serpapi-googleshoppingtool", diff --git a/docs/en/tools/search-research/overview.mdx b/docs/en/tools/search-research/overview.mdx index 1c390268d2..539d5e41b9 100644 --- a/docs/en/tools/search-research/overview.mdx +++ b/docs/en/tools/search-research/overview.mdx @@ -54,6 +54,10 @@ These tools enable your agents to search the web, research topics, and find info Extract structured content from web pages using the Tavily API. + + LLM-optimized web search with compressed excerpts via the Parallel API. + + Search arXiv and optionally download PDFs. @@ -76,7 +80,14 @@ These tools enable your agents to search the web, research topics, and find info - **Academic Research**: Find scholarly articles and technical papers ```python -from crewai_tools import SerperDevTool, GitHubSearchTool, YoutubeVideoSearchTool, TavilySearchTool, TavilyExtractorTool +from crewai_tools import ( + SerperDevTool, + GitHubSearchTool, + YoutubeVideoSearchTool, + TavilySearchTool, + TavilyExtractorTool, + ParallelSearchTool, +) # Create research tools web_search = SerperDevTool() @@ -84,11 +95,12 @@ code_search = GitHubSearchTool() video_research = YoutubeVideoSearchTool() tavily_search = TavilySearchTool() content_extractor = TavilyExtractorTool() +parallel_search = ParallelSearchTool() # Add to your agent agent = Agent( role="Research Analyst", - tools=[web_search, code_search, video_research, tavily_search, content_extractor], + tools=[web_search, code_search, video_research, tavily_search, content_extractor, parallel_search], goal="Gather comprehensive information on any topic" ) ``` diff --git a/docs/en/tools/search-research/parallelsearchtool.mdx b/docs/en/tools/search-research/parallelsearchtool.mdx new file mode 100644 index 0000000000..9dcbe0a6a9 --- /dev/null +++ b/docs/en/tools/search-research/parallelsearchtool.mdx @@ -0,0 +1,288 @@ +--- +title: "Parallel Search Tool" +description: "Perform LLM-optimized web searches using the Parallel Search API" +icon: "magnifying-glass" +mode: "wide" +--- + +The `ParallelSearchTool` provides an interface to the [Parallel Search API](https://docs.parallel.ai/search/search-quickstart), enabling CrewAI agents to perform web searches that return ranked results with compressed excerpts optimized for LLMs. It replaces the traditional search → scrape → extract pipeline with a single, low-latency API call. + +## Installation + +To use the `ParallelSearchTool`, you need to install the `parallel-web` library: + +```shell +pip install 'crewai[tools]' parallel-web +``` + +## Environment Variables + +Ensure your Parallel API key is set as an environment variable: + +```bash +export PARALLEL_API_KEY='your_parallel_api_key' +``` + +Get an API key at [platform.parallel.ai](https://platform.parallel.ai). + +## Example Usage + +Here's how to initialize and use the `ParallelSearchTool` within a CrewAI agent: + +```python +from crewai import Agent, Task, Crew +from crewai_tools import ParallelSearchTool + +# Initialize the tool +parallel_tool = ParallelSearchTool() + +# Create an agent that uses the tool +researcher = Agent( + role='Market Researcher', + goal='Find information about the latest AI trends', + backstory='An expert market researcher specializing in technology.', + tools=[parallel_tool], + verbose=True +) + +# Create a task for the agent +research_task = Task( + description='Search for the top 3 AI trends in 2024.', + expected_output='A JSON report summarizing the top 3 AI trends found.', + agent=researcher +) + +# Form the crew and kick it off +crew = Crew( + agents=[researcher], + tasks=[research_task], + verbose=True +) + +result = crew.kickoff() +print(result) +``` + +## Configuration Options + +The `ParallelSearchTool` accepts the following arguments during initialization: + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `api_key` | str | env var | Parallel API key (defaults to `PARALLEL_API_KEY` env var) | +| `mode` | str | None | `"one-shot"` (comprehensive results) or `"agentic"` (concise, token-efficient for multi-step workflows) | +| `max_results` | int | 10 | Maximum number of results to return (1-20) | +| `excerpts` | dict | None | Excerpt configuration, e.g., `{"max_chars_per_result": 10000, "max_chars_total": 50000}` | +| `fetch_policy` | dict | None | Content freshness control, e.g., `{"max_age_seconds": 3600}` | +| `source_policy` | dict | None | Domain inclusion/exclusion policy | + +## Run Parameters + +When calling `run()`, pass these parameters: + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| `objective` | str | At least one required | Natural-language research goal (<= 5000 chars) | +| `search_queries` | list[str] | At least one required | Up to 5 keyword queries (each <= 200 chars) | +| `max_results` | int | No | Override the default max results for this search (1-20) | +| `source_policy` | dict | No | Override the default source policy for this search | + + +At least one of `objective` or `search_queries` is required. The `max_results` and `source_policy` parameters can be set at initialization (as defaults) or overridden per-search at runtime. + + +## Advanced Usage + +You can configure the tool with custom parameters: + +```python +# Example: Initialize with specific parameters +custom_parallel_tool = ParallelSearchTool( + mode='agentic', + max_results=5, + excerpts={"max_chars_per_result": 10000} +) + +# The agent will use these defaults +agent_with_custom_tool = Agent( + role="Advanced Researcher", + goal="Conduct detailed research with comprehensive results", + tools=[custom_parallel_tool] +) +``` + +### Domain Filtering + +Restrict searches to specific domains. You can set defaults at initialization or override per-search: + +```python +# Set default source policy at initialization +parallel_tool = ParallelSearchTool( + max_results=5, + source_policy={ + "allow": {"domains": ["un.org", "who.int"]}, + } +) + +# Use the default source policy +result = parallel_tool.run( + objective="When was the United Nations established?" +) + +# Override source policy for a specific search +result = parallel_tool.run( + objective="Latest WHO health guidelines", + source_policy={"allow": {"domains": ["who.int"]}}, + max_results=3 # Also override max_results for this search +) +``` + +### Fresh Content + +Request recent content only: + +```python +parallel_tool = ParallelSearchTool( + fetch_policy={"max_age_seconds": 3600}, # Content no older than 1 hour + max_results=10 +) + +result = parallel_tool.run( + objective="Latest news on AI regulation" +) +``` + +### Agentic Mode + +For multi-step reasoning workflows, use `agentic` mode for more concise, token-efficient results: + +```python +parallel_tool = ParallelSearchTool( + mode="agentic", + max_results=5 +) + +result = parallel_tool.run( + objective="Find recent research on quantum error correction", + search_queries=["quantum error correction 2024", "QEC algorithms"] +) +``` + +## Complete Agent Example + +Here's a complete example that uses `ParallelSearchTool` with a CrewAI agent to research a topic and produce a cited answer. + +**Additional requirements:** + +```shell +uv add "crewai[anthropic]" +``` + +**Environment variable:** +- `ANTHROPIC_API_KEY` (for the LLM) + +```python +import os +from crewai import Agent, Task, Crew, LLM, Process +from crewai_tools import ParallelSearchTool + +# Configure Claude as the LLM +llm = LLM( + model="anthropic/claude-opus-4-5-20251101", + temperature=0.5, + max_tokens=4096, + api_key=os.getenv("ANTHROPIC_API_KEY"), +) + +# Initialize ParallelSearchTool with agentic mode for multi-step workflows +search = ParallelSearchTool( + mode="agentic", + max_results=10, + excerpts={"max_chars_per_result": 10000}, +) + +# User query +query = "What are current best practices for LLM evaluation and benchmarking?" + +# Create a researcher agent +researcher = Agent( + role="Web Researcher", + backstory="You are an expert web researcher who finds and synthesizes information from multiple sources.", + goal="Find high-quality, cited sources and provide accurate, well-sourced answers.", + tools=[search], + llm=llm, + verbose=True, +) + +# Define the research task +task = Task( + description=f"Research: {query}\n\nProvide a comprehensive answer with citations.", + expected_output="A concise answer with inline citations in the format: [claim] - [source URL]", + agent=researcher, + output_file="research_output.md", +) + +# Create and run the crew +crew = Crew( + agents=[researcher], + tasks=[task], + verbose=True, + process=Process.sequential, +) + +result = crew.kickoff() +print(result) +``` + + +```markdown +# Current Best Practices for LLM Evaluation and Benchmarking + +## Overview + +LLM evaluation is the systematic process of assessing the performance of Large Language +Models to determine their effectiveness, reliability, and efficiency, helping developers +understand the model's strengths and weaknesses while ensuring it functions as expected +in real-world applications - https://www.singlestore.com/blog/complete-guide-to-evaluating-large-language-models/ + +## Key Evaluation Methodologies + +### 1. Multiple-Choice Benchmarks + +Multiple-choice benchmarks like MMLU test an LLM's knowledge recall in a straightforward, +quantifiable way similar to standardized tests, measuring accuracy as the fraction of +correctly answered questions - https://magazine.sebastianraschka.com/p/llm-evaluation-4-approaches + +... +``` + + + +- Use `mode="agentic"` for multi-step reasoning workflows (more concise, token-efficient results) +- Use `mode="one-shot"` for single comprehensive searches +- Increase `excerpts={"max_chars_per_result": N}` for more detailed source content + + +## Features + +- **Single-Call Pipeline**: Replaces search → scrape → extract with one API call +- **LLM-Optimized**: Returns compressed excerpts designed for LLM prompts +- **Flexible Modes**: Choose between comprehensive (`one-shot`) or concise (`agentic`) results +- **Domain Control**: Include or exclude specific domains via `source_policy` +- **Freshness Control**: Limit results to recent content via `fetch_policy` +- **Built-in Reliability**: SDK includes retries, timeouts, and error handling + +## Response Format + +The tool returns search results as a JSON string containing: +- `search_id`: Unique identifier for the search +- `results`: Array of ranked results, each with: + - `url`: Source URL + - `title`: Page title + - `excerpts`: Compressed, relevant excerpts from the page + +## References + +- [Search API Quickstart](https://docs.parallel.ai/search/search-quickstart) +- [Search API Best Practices](https://docs.parallel.ai/search/best-practices) +- [Parallel Platform](https://platform.parallel.ai) diff --git a/lib/crewai-tools/pyproject.toml b/lib/crewai-tools/pyproject.toml index 60853ed744..a7ffa6f17c 100644 --- a/lib/crewai-tools/pyproject.toml +++ b/lib/crewai-tools/pyproject.toml @@ -74,6 +74,9 @@ linkup-sdk = [ tavily-python = [ "tavily-python>=0.5.4", ] +parallel-web = [ + "parallel-web>=0.3.4", +] hyperbrowser = [ "hyperbrowser>=0.18.0", ] diff --git a/lib/crewai-tools/src/crewai_tools/tools/parallel_tools/README.md b/lib/crewai-tools/src/crewai_tools/tools/parallel_tools/README.md index 37f4135612..29bfeac569 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/parallel_tools/README.md +++ b/lib/crewai-tools/src/crewai_tools/tools/parallel_tools/README.md @@ -1,153 +1,247 @@ # ParallelSearchTool -Unified Parallel web search tool using the Parallel Search API (v1beta). Returns ranked results with compressed excerpts optimized for LLMs. +A tool that performs web searches using the Parallel Search API. Returns ranked results with compressed excerpts optimized for LLMs. -- **Quickstart**: see the official docs: [Search API Quickstart](https://docs.parallel.ai/search-api/search-quickstart) -- **Processors**: guidance on `base` vs `pro`: [Processors](https://docs.parallel.ai/search-api/processors) +- **Quickstart**: see the official docs: [Search API Quickstart](https://docs.parallel.ai/search/search-quickstart) +- **Best Practices**: [Search API Best Practices](https://docs.parallel.ai/search/best-practices) + +## Installation + +To use the `ParallelSearchTool`, you need to install the `parallel-web` library: + +```shell +pip install 'crewai[tools]' parallel-web +``` ## Why this tool -- **Single-call pipeline**: Replaces search → scrape → extract with a single, low‑latency API call. -- **LLM‑ready**: Returns compressed excerpts that feed directly into LLM prompts (fewer tokens, less pre/post‑processing). +- **Single-call pipeline**: Replaces search -> scrape -> extract with a single, low-latency API call. +- **LLM-ready**: Returns compressed excerpts that feed directly into LLM prompts (fewer tokens, less pre/post-processing). - **Flexible**: Control result count and excerpt length; optionally restrict sources via `source_policy`. ## Environment - `PARALLEL_API_KEY` (required) +- `ANTHROPIC_API_KEY` (optional, for the agent example) + +## Initialization Parameters + +Configure the tool at initialization time: -Optional (for the agent example): -- `OPENAI_API_KEY` or other LLM provider keys supported by CrewAI +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `api_key` | str | env var | Parallel API key (defaults to `PARALLEL_API_KEY` env var) | +| `mode` | str | None | `"one-shot"` (comprehensive) or `"agentic"` (concise for multi-step workflows) | +| `max_results` | int | 10 | Maximum results to return (1-20) | +| `excerpts` | dict | None | `{"max_chars_per_result": 10000, "max_chars_total": 50000}` | +| `fetch_policy` | dict | None | `{"max_age_seconds": 3600}` for content freshness | +| `source_policy` | dict | None | Domain inclusion/exclusion policy | -## Parameters +## Run Parameters -- `objective` (str, optional): Natural‑language research goal (≤ 5000 chars) -- `search_queries` (list[str], optional): Up to 5 keyword queries (each ≤ 200 chars) -- `processor` (str, default `base`): `base` (fast/low cost) or `pro` (freshness/quality) -- `max_results` (int, default 10): ≤ 40 (subject to processor limits) -- `max_chars_per_result` (int, default 6000): ≥ 100; values > 30000 not guaranteed -- `source_policy` (dict, optional): Source policy for domain inclusion/exclusion +Pass these when calling `run()`: + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| `objective` | str | One of these | Natural-language research goal (<= 5000 chars) | +| `search_queries` | list[str] | required | Up to 5 keyword queries (each <= 200 chars) | +| `max_results` | int | No | Override the default max results for this search (1-20) | +| `source_policy` | dict | No | Override the default source policy for this search | Notes: -- API is in beta; default rate limit is 600 RPM. Contact support for production capacity. +- At least one of `objective` or `search_queries` is required. +- `max_results` and `source_policy` can be set at initialization (as defaults) or overridden per-search at runtime. +- API is in beta; default rate limit is 600 RPM. Contact support@parallel.ai for production capacity. -## Direct usage (when published) +## Direct usage ```python from crewai_tools import ParallelSearchTool -tool = ParallelSearchTool() +# Initialize with configuration +tool = ParallelSearchTool( + mode="one-shot", + max_results=5, + excerpts={"max_chars_per_result": 10000}, +) + +# Run a search resp_json = tool.run( - objective="When was the United Nations established? Prefer UN's websites.", - search_queries=["Founding year UN", "Year of founding United Nations"], - processor="base", - max_results=5, - max_chars_per_result=1500, + objective="When was the United Nations established? Prefer UN's websites.", + search_queries=["Founding year UN", "Year of founding United Nations"], ) print(resp_json) # => {"search_id": ..., "results": [{"url", "title", "excerpts": [...]}, ...]} ``` -### Parameters you can pass +### Example with `source_policy` + +```python +from crewai_tools import ParallelSearchTool + +# Set defaults at initialization +tool = ParallelSearchTool( + max_results=5, + excerpts={"max_chars_per_result": 5000}, + source_policy={ + "allow": {"domains": ["un.org"]}, + # "deny": {"domains": ["example.com"]}, # optional + }, +) -Call `run(...)` with any of the following (at least one of `objective` or `search_queries` is required): +# Use the default source policy +resp_json = tool.run( + objective="When was the United Nations established?", +) + +# Override source_policy and max_results for a specific search +resp_json = tool.run( + objective="Latest WHO health guidelines", + source_policy={"allow": {"domains": ["who.int"]}}, + max_results=3, +) +``` + +### Example with `mode` for agentic workflows ```python -tool.run( - objective: str | None = None, # ≤ 5000 chars - search_queries: list[str] | None = None, # up to 5 items, each ≤ 200 chars - processor: str = "base", # "base" (fast) or "pro" (freshness/quality) - max_results: int = 10, # ≤ 40 (processor limits apply) - max_chars_per_result: int = 6000, # ≥ 100 (values > 30000 not guaranteed) - source_policy: dict | None = None, # optional SourcePolicy config +from crewai_tools import ParallelSearchTool + +# Use "agentic" mode for multi-step reasoning loops +tool = ParallelSearchTool( + mode="agentic", # Concise, token-efficient results + max_results=5, +) + +resp_json = tool.run( + objective="Find recent research on quantum error correction", + search_queries=["quantum error correction 2024", "QEC algorithms"], ) ``` -Example with `source_policy`: +### Example with `fetch_policy` for fresh content ```python -source_policy = { - "allow": {"domains": ["un.org"]}, - # "deny": {"domains": ["example.com"]}, # optional -} +from crewai_tools import ParallelSearchTool + +# Request content no older than 1 hour +tool = ParallelSearchTool( + fetch_policy={"max_age_seconds": 3600}, + max_results=10, +) resp_json = tool.run( - objective="When was the United Nations established?", - processor="base", - max_results=5, - max_chars_per_result=1500, - source_policy=source_policy, + objective="Latest news on AI regulation", ) ``` -## Example with agents +## Example with CrewAI Agents + +Here's a complete example that uses `ParallelSearchTool` with a CrewAI agent to research a topic and produce a cited answer. + +**Additional requirements:** + +```shell +uv add "crewai[anthropic]" +``` -Here’s a minimal example that calls `ParallelSearchTool` to fetch sources and has an LLM produce a short, cited answer. +**Environment variable:** +- `ANTHROPIC_API_KEY` (for the LLM) ```python import os from crewai import Agent, Task, Crew, LLM, Process from crewai_tools import ParallelSearchTool -# LLM +# Configure Claude as the LLM llm = LLM( - model="gemini/gemini-2.0-flash", - temperature=0.5, - api_key=os.getenv("GEMINI_API_KEY") + model="anthropic/claude-opus-4-5-20251101", + temperature=0.5, + max_tokens=4096, + api_key=os.getenv("ANTHROPIC_API_KEY"), ) -# Parallel Search -search = ParallelSearchTool() +# Initialize ParallelSearchTool with agentic mode for multi-step workflows +search = ParallelSearchTool( + mode="agentic", + max_results=10, + excerpts={"max_chars_per_result": 10000}, +) # User query -query = "find all the recent concerns about AI evals? please cite the sources" +query = "What are current best practices for LLM evaluation and benchmarking?" -# Researcher agent +# Create a researcher agent researcher = Agent( - role="Web Researcher", - backstory="You are an expert web researcher", - goal="Find cited, high-quality sources and provide a brief answer.", - tools=[search], - llm=llm, - verbose=True, + role="Web Researcher", + backstory="You are an expert web researcher who finds and synthesizes information from multiple sources.", + goal="Find high-quality, cited sources and provide accurate, well-sourced answers.", + tools=[search], + llm=llm, + verbose=True, ) -# Research task +# Define the research task task = Task( - description=f"Research the {query} and produce a short, cited answer.", - expected_output="A concise, sourced answer to the question. The answer should be in this format: [query]: [answer] - [source]", - agent=researcher, - output_file="answer.mdx", + description=f"Research: {query}\n\nProvide a comprehensive answer with citations.", + expected_output="A concise answer with inline citations in the format: [claim] - [source URL]", + agent=researcher, + output_file="research_output.md", ) -# Crew +# Create and run the crew crew = Crew( - agents=[researcher], - tasks=[task], + agents=[researcher], + tasks=[task], verbose=True, process=Process.sequential, ) -# Run the crew -result = crew.kickoff(inputs={'query': query}) +result = crew.kickoff() print(result) ``` -Output from the agent above: +**Example output:** + +```markdown +# Current Best Practices for LLM Evaluation and Benchmarking + +## Overview + +LLM evaluation is the systematic process of assessing the performance of Large Language +Models to determine their effectiveness, reliability, and efficiency, helping developers +understand the model's strengths and weaknesses while ensuring it functions as expected +in real-world applications - https://www.singlestore.com/blog/complete-guide-to-evaluating-large-language-models/ + +## Key Evaluation Methodologies -```md -Recent concerns about AI evaluations include: the rise of AI-related incidents alongside a lack of standardized Responsible AI (RAI) evaluations among major industrial model developers - [https://hai.stanford.edu/ai-index/2025-ai-index-report]; flawed benchmark datasets that fail to account for critical factors, leading to unrealistic estimates of AI model abilities - [https://www.nature.com/articles/d41586-025-02462-5]; the need for multi-metric, context-aware evaluations in medical imaging AI to ensure reliability and clinical relevance - [https://www.sciencedirect.com/science/article/pii/S3050577125000283]; challenges related to data sets (insufficient, imbalanced, or poor quality), communication gaps, and misaligned expectations in AI model training - [https://www.oracle.com/artificial-intelligence/ai-model-training-challenges/]; the argument that LLM agents should be evaluated primarily on their riskiness, not just performance, due to unreliability, hallucinations, and brittleness - [https://www.technologyreview.com/2025/06/24/1119187/fix-ai-evaluation-crisis/]; the fact that the AI industry's embraced benchmarks may be close to meaningless, with top makers of AI models picking and choosing different responsible AI benchmarks, complicating efforts to systematically compare risks and limitations - [https://themarkup.org/artificial-intelligence/2024/07/17/everyone-is-judging-ai-by-these-tests-but-experts-say-theyre-close-to-meaningless]; and the difficulty of building robust and reliable model evaluations, as many existing evaluation suites are limited in their ability to serve as accurate indicators of model capabilities or safety - [https://www.anthropic.com/research/evaluating-ai-systems]. +### 1. Multiple-Choice Benchmarks + +Multiple-choice benchmarks like MMLU test an LLM's knowledge recall in a straightforward, +quantifiable way similar to standardized tests, measuring accuracy as the fraction of +correctly answered questions - https://magazine.sebastianraschka.com/p/llm-evaluation-4-approaches + +... ``` -Tips: -- Ensure your LLM provider keys are set (e.g., `GEMINI_API_KEY`) and CrewAI model config is in place. -- For longer analyses, raise `max_chars_per_result` or use `processor="pro"` (higher quality, higher latency). +**Tips:** +- Use `mode="agentic"` for multi-step reasoning workflows (more concise, token-efficient results) +- Use `mode="one-shot"` for single comprehensive searches +- Increase `excerpts={"max_chars_per_result": N}` for more detailed source content + +## Deprecated Parameters + +The following parameters are deprecated but still accepted in `run()` for backwards compatibility: + +- `processor`: No longer used. Use `mode` at initialization instead. +- `max_chars_per_result`: Use `excerpts={"max_chars_per_result": N}` at initialization instead. ## Behavior -- Single‑request web research; no scraping/post‑processing required. +- Single-request web research; no scraping/post-processing required. - Returns `search_id` and ranked `results` with compressed `excerpts`. -- Clear error handling on HTTP/timeouts. +- Uses the `parallel-web` Python SDK with built-in retries, timeouts, and error handling. ## References -- Search API Quickstart: https://docs.parallel.ai/search-api/search-quickstart -- Processors: https://docs.parallel.ai/search-api/processors +- Search API Quickstart: https://docs.parallel.ai/search/search-quickstart +- Best Practices: https://docs.parallel.ai/search/best-practices diff --git a/lib/crewai-tools/src/crewai_tools/tools/parallel_tools/parallel_search_tool.py b/lib/crewai-tools/src/crewai_tools/tools/parallel_tools/parallel_search_tool.py index b015ec6957..f6bfb31b92 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/parallel_tools/parallel_search_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/parallel_tools/parallel_search_tool.py @@ -1,13 +1,24 @@ +import json import os -from typing import Annotated, Any +from typing import Annotated, Any, Literal +import warnings from crewai.tools import BaseTool, EnvVar -from pydantic import BaseModel, Field -import requests +from dotenv import load_dotenv +from pydantic import BaseModel, ConfigDict, Field + + +load_dotenv() +try: + from parallel import Parallel + + PARALLEL_AVAILABLE = True +except ImportError: + PARALLEL_AVAILABLE = False class ParallelSearchInput(BaseModel): - """Input schema for ParallelSearchTool using the Search API (v1beta). + """Input schema for ParallelSearchTool using the Search API. At least one of objective or search_queries is required. """ @@ -23,103 +34,227 @@ class ParallelSearchInput(BaseModel): min_length=1, max_length=5, ) - processor: str = Field( - default="base", - description="Search processor: 'base' (fast/low cost) or 'pro' (higher quality/freshness)", - pattern=r"^(base|pro)$", - ) - max_results: int = Field( - default=10, + max_results: int | None = Field( + default=None, ge=1, - le=40, - description="Maximum number of search results to return (processor limits apply)", - ) - max_chars_per_result: int = Field( - default=6000, - ge=100, - description="Maximum characters per result excerpt (values >30000 not guaranteed)", + le=20, + description="Maximum number of search results to return (1-20). If not provided, uses tool default.", ) source_policy: dict[str, Any] | None = Field( - default=None, description="Optional source policy configuration" + default=None, + description="Source policy for domain inclusion/exclusion. Example: {'include': ['example.com']} or {'exclude': ['spam.com']}", ) class ParallelSearchTool(BaseTool): - name: str = "Parallel Web Search Tool" + """Tool that uses the Parallel Search API to perform web searches. + + Attributes: + client: An instance of Parallel client. + name: The name of the tool. + description: A description of the tool's purpose. + args_schema: The schema for the tool's arguments. + api_key: The Parallel API key. + mode: Search mode ('one-shot' or 'agentic'). + max_results: Maximum number of results to return. + excerpts: Excerpt configuration for result length. + fetch_policy: Content freshness control. + source_policy: Domain inclusion/exclusion policy. + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + client: Any = None + name: str = "Parallel Web Search" description: str = ( - "Search the web using Parallel's Search API (v1beta). Returns ranked results with " - "compressed excerpts optimized for LLMs." + "A tool that performs web searches using the Parallel Search API. " + "Returns ranked results with compressed excerpts optimized for LLMs." ) args_schema: type[BaseModel] = ParallelSearchInput - + api_key: str | None = Field( + default_factory=lambda: os.getenv("PARALLEL_API_KEY"), + description="The Parallel API key. If not provided, it will be loaded from the environment variable PARALLEL_API_KEY.", + ) + mode: Literal["one-shot", "agentic"] | None = Field( + default=None, + description=( + "Search mode: 'one-shot' (comprehensive results, default) or " + "'agentic' (concise, token-efficient for multi-step workflows)" + ), + ) + max_results: int = Field( + default=10, + ge=1, + le=20, + description="Maximum number of search results to return (1-20)", + ) + excerpts: dict[str, int] | None = Field( + default=None, + description="Excerpt configuration: {'max_chars_per_result': 10000, 'max_chars_total': 50000}", + ) + fetch_policy: dict[str, int] | None = Field( + default=None, + description="Content freshness control: {'max_age_seconds': 3600}", + ) + source_policy: dict[str, Any] | None = Field( + default=None, + description="Source policy for domain inclusion/exclusion", + ) + package_dependencies: list[str] = Field(default_factory=lambda: ["parallel-web"]) env_vars: list[EnvVar] = Field( default_factory=lambda: [ EnvVar( name="PARALLEL_API_KEY", - description="API key for Parallel", + description="API key for Parallel search service", required=True, ), ] ) - package_dependencies: list[str] = Field(default_factory=lambda: ["requests"]) - search_url: str = "https://api.parallel.ai/v1beta/search" + def __init__(self, **kwargs: Any): + super().__init__(**kwargs) + if not PARALLEL_AVAILABLE: + raise ImportError( + "Missing optional dependency 'parallel-web'. Install with:\n" + " uv add crewai-tools --extra parallel-web\n" + "or\n" + " pip install parallel-web" + ) + + if "PARALLEL_API_KEY" not in os.environ and not kwargs.get("api_key"): + raise ValueError( + "Environment variable PARALLEL_API_KEY is required for ParallelSearchTool. " + "Set it with: export PARALLEL_API_KEY='your_api_key'" + ) + + self.client = Parallel(api_key=self.api_key) def _run( self, objective: str | None = None, search_queries: list[str] | None = None, - processor: str = "base", - max_results: int = 10, - max_chars_per_result: int = 6000, + max_results: int | None = None, source_policy: dict[str, Any] | None = None, + # Deprecated parameters for backwards compatibility + processor: str | None = None, + max_chars_per_result: int | None = None, **_: Any, ) -> str: - api_key = os.environ.get("PARALLEL_API_KEY") - if not api_key: - return "Error: PARALLEL_API_KEY environment variable is required" + """Synchronously performs a search using the Parallel API. + + Args: + objective: Natural-language goal for the web research. + search_queries: Optional list of keyword queries. + max_results: Maximum results to return. Overrides init value if provided. + source_policy: Domain inclusion/exclusion policy. Overrides init value if provided. + processor: DEPRECATED - no longer used. + max_chars_per_result: DEPRECATED - use excerpts config instead. + + Returns: + A JSON string containing the search results. + """ + if not self.client: + raise ValueError( + "Parallel client is not initialized. Ensure 'parallel-web' is installed and API key is set." + ) if not objective and not search_queries: return "Error: Provide at least one of 'objective' or 'search_queries'" - headers = { - "x-api-key": api_key, - "Content-Type": "application/json", - } + # Handle deprecated parameters + excerpts = self._handle_deprecated_params(processor, max_chars_per_result) + + # Use runtime values if provided, otherwise fall back to init values + effective_max_results = ( + max_results if max_results is not None else self.max_results + ) + effective_source_policy = ( + source_policy if source_policy is not None else self.source_policy + ) + + search_params = self._build_search_params( + objective, + search_queries, + excerpts, + effective_max_results, + effective_source_policy, + ) try: - payload: dict[str, Any] = { - "processor": processor, - "max_results": max_results, - "max_chars_per_result": max_chars_per_result, - } - if objective is not None: - payload["objective"] = objective - if search_queries is not None: - payload["search_queries"] = search_queries - if source_policy is not None: - payload["source_policy"] = source_policy - - request_timeout = 90 if processor == "pro" else 30 - resp = requests.post( - self.search_url, json=payload, headers=headers, timeout=request_timeout - ) - if resp.status_code >= 300: - return ( - f"Parallel Search API error: {resp.status_code} {resp.text[:200]}" - ) - data = resp.json() - return self._format_output(data) - except requests.Timeout: - return "Parallel Search API timeout. Please try again later." + response = self.client.beta.search(**search_params) + return self._format_output(response) except Exception as exc: - return f"Unexpected error calling Parallel Search API: {exc}" + return f"Parallel Search API error: {exc}" + + def _handle_deprecated_params( + self, + processor: str | None, + max_chars_per_result: int | None, + ) -> dict[str, int] | None: + """Handle deprecated parameters with warnings.""" + excerpts = self.excerpts + + if processor is not None: + warnings.warn( + "The 'processor' parameter is deprecated and will be ignored. " + "Use 'mode' instead when initializing the tool.", + DeprecationWarning, + stacklevel=3, + ) + + if max_chars_per_result is not None: + warnings.warn( + "The 'max_chars_per_result' parameter is deprecated. " + "Use 'excerpts={\"max_chars_per_result\": N}' when initializing the tool.", + DeprecationWarning, + stacklevel=3, + ) + # Map to new excerpts parameter for backwards compatibility + if excerpts is None: + excerpts = {"max_chars_per_result": max_chars_per_result} + elif "max_chars_per_result" not in excerpts: + excerpts = {**excerpts, "max_chars_per_result": max_chars_per_result} + + return excerpts + + def _build_search_params( + self, + objective: str | None, + search_queries: list[str] | None, + excerpts: dict[str, int] | None, + max_results: int, + source_policy: dict[str, Any] | None, + ) -> dict[str, Any]: + """Build search parameters dictionary.""" + search_params: dict[str, Any] = {"max_results": max_results} + + if objective is not None: + search_params["objective"] = objective + if search_queries is not None: + search_params["search_queries"] = search_queries + if self.mode is not None: + search_params["mode"] = self.mode + if excerpts is not None: + search_params["excerpts"] = excerpts + if self.fetch_policy is not None: + search_params["fetch_policy"] = self.fetch_policy + if source_policy is not None: + search_params["source_policy"] = source_policy + + return search_params - def _format_output(self, result: dict[str, Any]) -> str: - # Return the full JSON payload (search_id + results) as a compact JSON string + def _format_output(self, result: Any) -> str: + """Format the search response as a JSON string.""" try: - import json + # Handle SDK response object - convert to dict if needed + if hasattr(result, "model_dump"): + data = result.model_dump() + elif hasattr(result, "to_dict"): + data = result.to_dict() + elif hasattr(result, "__dict__"): + data = dict(result.__dict__) + else: + data = result - return json.dumps(result or {}, ensure_ascii=False) + return json.dumps(data or {}, ensure_ascii=False, default=str) except Exception: return str(result or {}) diff --git a/lib/crewai-tools/tests/tools/parallel_search_tool_test.py b/lib/crewai-tools/tests/tools/parallel_search_tool_test.py index 453fc259b7..3e4e05b723 100644 --- a/lib/crewai-tools/tests/tools/parallel_search_tool_test.py +++ b/lib/crewai-tools/tests/tools/parallel_search_tool_test.py @@ -1,44 +1,120 @@ import json -from unittest.mock import patch -from urllib.parse import urlparse +from unittest.mock import MagicMock, patch + +import pytest from crewai_tools.tools.parallel_tools.parallel_search_tool import ( + PARALLEL_AVAILABLE, ParallelSearchTool, ) -def test_requires_env_var(monkeypatch): - monkeypatch.delenv("PARALLEL_API_KEY", raising=False) - tool = ParallelSearchTool() - result = tool.run(objective="test") - assert "PARALLEL_API_KEY" in result - - -@patch("crewai_tools.tools.parallel_tools.parallel_search_tool.requests.post") -def test_happy_path(mock_post, monkeypatch): - monkeypatch.setenv("PARALLEL_API_KEY", "test") - - mock_post.return_value.status_code = 200 - mock_post.return_value.json.return_value = { - "search_id": "search_123", - "results": [ - { - "url": "https://www.un.org/en/about-us/history-of-the-un", - "title": "History of the United Nations", - "excerpts": [ - "Four months after the San Francisco Conference ended, the United Nations officially began, on 24 October 1945..." +@pytest.fixture +def mock_parallel_client(): + """Create a mock Parallel client with search response.""" + mock_client = MagicMock() + mock_client.beta.search.return_value = MagicMock( + model_dump=MagicMock( + return_value={ + "search_id": "search_123", + "results": [ + { + "url": "https://www.un.org/en/about-us/history-of-the-un", + "title": "History of the United Nations", + "excerpts": [ + "Four months after the San Francisco Conference ended, the United Nations officially began, on 24 October 1945..." + ], + } ], } - ], - } - - tool = ParallelSearchTool() - result = tool.run( - objective="When was the UN established?", search_queries=["Founding year UN"] + ) ) + return mock_client + + +def test_requires_parallel_web_package(monkeypatch): + """Test that the tool requires parallel-web package to be installed.""" + monkeypatch.delenv("PARALLEL_API_KEY", raising=False) + + with patch( + "crewai_tools.tools.parallel_tools.parallel_search_tool.PARALLEL_AVAILABLE", + False, + ): + with pytest.raises(ImportError, match="parallel-web"): + ParallelSearchTool() + + +def test_requires_env_var(monkeypatch, mock_parallel_client): + """Test that the tool requires PARALLEL_API_KEY to be set.""" + monkeypatch.delenv("PARALLEL_API_KEY", raising=False) + + with patch( + "crewai_tools.tools.parallel_tools.parallel_search_tool.PARALLEL_AVAILABLE", + True, + ), patch( + "crewai_tools.tools.parallel_tools.parallel_search_tool.Parallel", + return_value=mock_parallel_client, + ): + with pytest.raises(ValueError, match="PARALLEL_API_KEY"): + ParallelSearchTool() + + +@pytest.mark.skipif(not PARALLEL_AVAILABLE, reason="parallel-web not installed") +def test_happy_path(monkeypatch, mock_parallel_client): + """Test successful search with the Parallel SDK.""" + monkeypatch.setenv("PARALLEL_API_KEY", "test-key") + + with patch( + "crewai_tools.tools.parallel_tools.parallel_search_tool.Parallel", + return_value=mock_parallel_client, + ): + tool = ParallelSearchTool() + result = tool.run( + objective="When was the UN established?", + search_queries=["Founding year UN"], + ) + data = json.loads(result) assert "search_id" in data - urls = [r.get("url", "") for r in data.get("results", [])] - # Validate host against allowed set instead of substring matching - allowed_hosts = {"www.un.org", "un.org"} - assert any(urlparse(u).netloc in allowed_hosts for u in urls) + assert "results" in data + assert len(data["results"]) > 0 + assert data["results"][0]["url"] == "https://www.un.org/en/about-us/history-of-the-un" + + +@pytest.mark.skipif(not PARALLEL_AVAILABLE, reason="parallel-web not installed") +def test_requires_objective_or_queries(monkeypatch, mock_parallel_client): + """Test that at least one of objective or search_queries is required.""" + monkeypatch.setenv("PARALLEL_API_KEY", "test-key") + + with patch( + "crewai_tools.tools.parallel_tools.parallel_search_tool.Parallel", + return_value=mock_parallel_client, + ): + tool = ParallelSearchTool() + result = tool.run() + + assert "Error" in result + assert "objective" in result.lower() or "search_queries" in result.lower() + + +@pytest.mark.skipif(not PARALLEL_AVAILABLE, reason="parallel-web not installed") +def test_runtime_parameter_override(monkeypatch, mock_parallel_client): + """Test that max_results and source_policy can be overridden at runtime.""" + monkeypatch.setenv("PARALLEL_API_KEY", "test-key") + + with patch( + "crewai_tools.tools.parallel_tools.parallel_search_tool.Parallel", + return_value=mock_parallel_client, + ): + tool = ParallelSearchTool( + max_results=10, source_policy={"include": ["init.com"]} + ) + tool.run( + objective="test query", + max_results=5, + source_policy={"include": ["runtime.com"]}, + ) + + call_kwargs = mock_parallel_client.beta.search.call_args.kwargs + assert call_kwargs["max_results"] == 5 + assert call_kwargs["source_policy"] == {"include": ["runtime.com"]} diff --git a/uv.lock b/uv.lock index 08eafb1f4d..573d8d5c4b 100644 --- a/uv.lock +++ b/uv.lock @@ -1373,6 +1373,9 @@ mysql = [ oxylabs = [ { name = "oxylabs" }, ] +parallel-web = [ + { name = "parallel-web" }, +] patronus = [ { name = "patronus" }, ] @@ -1456,6 +1459,7 @@ requires-dist = [ { name = "nest-asyncio", marker = "extra == 'bedrock'", specifier = ">=1.6.0" }, { name = "nest-asyncio", marker = "extra == 'contextual'", specifier = ">=1.6.0" }, { name = "oxylabs", marker = "extra == 'oxylabs'", specifier = "==2.0.0" }, + { name = "parallel-web", marker = "extra == 'parallel-web'", specifier = ">=0.3.4" }, { name = "patronus", marker = "extra == 'patronus'", specifier = ">=0.0.16" }, { name = "playwright", marker = "extra == 'bedrock'", specifier = ">=1.52.0" }, { name = "psycopg2-binary", marker = "extra == 'postgresql'", specifier = ">=2.9.10" }, @@ -1485,7 +1489,7 @@ requires-dist = [ { name = "weaviate-client", marker = "extra == 'weaviate-client'", specifier = ">=4.10.2" }, { name = "youtube-transcript-api", specifier = "~=1.2.2" }, ] -provides-extras = ["apify", "beautifulsoup4", "bedrock", "browserbase", "composio-core", "contextual", "couchbase", "databricks-sdk", "exa-py", "firecrawl-py", "github", "hyperbrowser", "linkup-sdk", "mcp", "mongodb", "multion", "mysql", "oxylabs", "patronus", "postgresql", "qdrant-client", "rag", "scrapegraph-py", "scrapfly-sdk", "selenium", "serpapi", "singlestore", "snowflake", "spider-client", "sqlalchemy", "stagehand", "tavily-python", "weaviate-client", "xml"] +provides-extras = ["apify", "beautifulsoup4", "bedrock", "browserbase", "composio-core", "contextual", "couchbase", "databricks-sdk", "exa-py", "firecrawl-py", "github", "hyperbrowser", "linkup-sdk", "mcp", "mongodb", "multion", "mysql", "oxylabs", "parallel-web", "patronus", "postgresql", "qdrant-client", "rag", "scrapegraph-py", "scrapfly-sdk", "selenium", "serpapi", "singlestore", "snowflake", "spider-client", "sqlalchemy", "stagehand", "tavily-python", "weaviate-client", "xml"] [[package]] name = "cryptography" @@ -1890,7 +1894,7 @@ name = "exceptiongroup" version = "1.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } wheels = [ @@ -4373,7 +4377,7 @@ name = "nvidia-cudnn-cu12" version = "9.10.2.21" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-cublas-cu12" }, + { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8", size = 706758467, upload-time = "2025-06-06T21:54:08.597Z" }, @@ -4384,7 +4388,7 @@ name = "nvidia-cufft-cu12" version = "11.3.3.83" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-nvjitlink-cu12" }, + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74", size = 193118695, upload-time = "2025-03-07T01:45:27.821Z" }, @@ -4411,9 +4415,9 @@ name = "nvidia-cusolver-cu12" version = "11.7.3.90" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-cublas-cu12" }, - { name = "nvidia-cusparse-cu12" }, - { name = "nvidia-nvjitlink-cu12" }, + { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450", size = 267506905, upload-time = "2025-03-07T01:47:16.273Z" }, @@ -4424,7 +4428,7 @@ name = "nvidia-cusparse-cu12" version = "12.5.8.93" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-nvjitlink-cu12" }, + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b", size = 288216466, upload-time = "2025-03-07T01:48:13.779Z" }, @@ -4484,9 +4488,9 @@ name = "ocrmac" version = "1.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "click" }, - { name = "pillow" }, - { name = "pyobjc-framework-vision" }, + { name = "click", marker = "sys_platform == 'darwin'" }, + { name = "pillow", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-vision", marker = "sys_platform == 'darwin'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/dd/dc/de3e9635774b97d9766f6815bbb3f5ec9bce347115f10d9abbf2733a9316/ocrmac-1.0.0.tar.gz", hash = "sha256:5b299e9030c973d1f60f82db000d6c2e5ff271601878c7db0885e850597d1d2e", size = 1463997, upload-time = "2024-11-07T12:00:00.197Z" } wheels = [ @@ -4910,6 +4914,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436, upload-time = "2024-09-20T13:09:48.112Z" }, ] +[[package]] +name = "parallel-web" +version = "0.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "httpx" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fd/c7/2d784abf966cc87aaa07bff44f109bf659a1ef9998ce4a0ce13b05600594/parallel_web-0.3.4.tar.gz", hash = "sha256:eae6e20b87a43f475bb05df7295e506989c6bd38d322380da204d2ed0bb6e556", size = 131133, upload-time = "2025-11-13T00:29:34.215Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/6e/c21754fe48505d2bc112d322cf8de7bd84f035a6f331d86acb548d0b0387/parallel_web-0.3.4-py3-none-any.whl", hash = "sha256:2804e84ebba789e475901c9aeb88c10045c2d07a2afd9bbc05e317725785c720", size = 137028, upload-time = "2025-11-13T00:29:32.037Z" }, +] + [[package]] name = "paramiko" version = "4.0.0" @@ -6082,7 +6103,7 @@ name = "pyobjc-framework-cocoa" version = "12.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pyobjc-core" }, + { name = "pyobjc-core", marker = "sys_platform == 'darwin'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/02/a3/16ca9a15e77c061a9250afbae2eae26f2e1579eb8ca9462ae2d2c71e1169/pyobjc_framework_cocoa-12.1.tar.gz", hash = "sha256:5556c87db95711b985d5efdaaf01c917ddd41d148b1e52a0c66b1a2e2c5c1640", size = 2772191, upload-time = "2025-11-14T10:13:02.069Z" } wheels = [ @@ -6098,8 +6119,8 @@ name = "pyobjc-framework-coreml" version = "12.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pyobjc-core" }, - { name = "pyobjc-framework-cocoa" }, + { name = "pyobjc-core", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-cocoa", marker = "sys_platform == 'darwin'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/30/2d/baa9ea02cbb1c200683cb7273b69b4bee5070e86f2060b77e6a27c2a9d7e/pyobjc_framework_coreml-12.1.tar.gz", hash = "sha256:0d1a4216891a18775c9e0170d908714c18e4f53f9dc79fb0f5263b2aa81609ba", size = 40465, upload-time = "2025-11-14T10:14:02.265Z" } wheels = [ @@ -6115,8 +6136,8 @@ name = "pyobjc-framework-quartz" version = "12.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pyobjc-core" }, - { name = "pyobjc-framework-cocoa" }, + { name = "pyobjc-core", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-cocoa", marker = "sys_platform == 'darwin'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/94/18/cc59f3d4355c9456fc945eae7fe8797003c4da99212dd531ad1b0de8a0c6/pyobjc_framework_quartz-12.1.tar.gz", hash = "sha256:27f782f3513ac88ec9b6c82d9767eef95a5cf4175ce88a1e5a65875fee799608", size = 3159099, upload-time = "2025-11-14T10:21:24.31Z" } wheels = [ @@ -6132,10 +6153,10 @@ name = "pyobjc-framework-vision" version = "12.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pyobjc-core" }, - { name = "pyobjc-framework-cocoa" }, - { name = "pyobjc-framework-coreml" }, - { name = "pyobjc-framework-quartz" }, + { name = "pyobjc-core", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-cocoa", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-coreml", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-quartz", marker = "sys_platform == 'darwin'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/c2/5a/08bb3e278f870443d226c141af14205ff41c0274da1e053b72b11dfc9fb2/pyobjc_framework_vision-12.1.tar.gz", hash = "sha256:a30959100e85dcede3a786c544e621ad6eb65ff6abf85721f805822b8c5fe9b0", size = 59538, upload-time = "2025-11-14T10:23:21.979Z" } wheels = [