Skip to main content
Give your OpenAI-powered applications real-time web search capabilities by integrating Parallel Search as a tool. This guide shows how to define Parallel Search as an OpenAI function and handle tool calls in your application.

Overview

OpenAI’s tool calling (formerly function calling) allows GPT models to output structured JSON indicating they want to call a function you’ve defined. Your application then executes the function and returns results to the model. By defining Parallel Search as a tool, your model can:
  • Search the web for current information
  • Access real-time news, research, and facts
  • Cite sources with URLs in responses

Prerequisites

  1. Get your Parallel API key from Platform
  2. Get your OpenAI API key from OpenAI
  3. Install the required SDKs:
pip install openai parallel-web
export PARALLEL_API_KEY="your-parallel-api-key"
export OPENAI_API_KEY="your-openai-api-key"

Define the Search Tool

First, define the Parallel search tool using OpenAI’s tool schema format:
parallel_search_tool = {
    "type": "function",
    "function": {
        "name": "parallel_search",
        "description": "Search the web for current information using Parallel's AI-powered search. Returns relevant excerpts from web pages optimized for LLM consumption. Use this for finding up-to-date information, news, research, and facts. Provide at least one of objective or search_queries.",
        "parameters": {
            "type": "object",
            "properties": {
                "objective": {
                    "type": "string",
                    "description": "A natural language description of what you're searching for. Be specific about your research goal."
                },
                "search_queries": {
                    "type": "array",
                    "items": {"type": "string"},
                    "description": "Specific search queries to execute. Use multiple queries to cover different angles of the search."
                },
                "max_chars_total": {
                    "type": "integer",
                    "description": "Maximum total characters across all excerpts. Default 50000."
                }
            }
        }
    }
}

Implement the Search Function

Create a function that calls the Parallel Search API when the model requests it:
import os
import json
from parallel import Parallel

parallel_client = Parallel(api_key=os.environ["PARALLEL_API_KEY"])

def parallel_search(objective: str = None, search_queries: list = None, max_chars_total: int = 50000) -> dict:
    """Execute a search using the Parallel Search API."""
    response = parallel_client.beta.search(
        objective=objective,
        search_queries=search_queries,
        excerpts={"max_chars_per_result": 5000, "max_chars_total": max_chars_total}
    )

    # Format results for the LLM
    return {
        "results": [
            {"url": r.url, "title": r.title, "excerpts": r.excerpts[:3] if r.excerpts else []}
            for r in response.results
        ]
    }

Process Tool Calls

Handle the tool calls returned by OpenAI:
def process_tool_calls(tool_calls):
    """Process tool calls from OpenAI and return results."""
    results = []
    for tool_call in tool_calls:
        if tool_call.function.name == "parallel_search":
            args = json.loads(tool_call.function.arguments)
            search_result = parallel_search(
                objective=args.get("objective"),
                search_queries=args.get("search_queries"),
                max_chars_total=args.get("max_chars_total", 50000)
            )
            results.append({
                "tool_call_id": tool_call.id,
                "role": "tool",
                "content": json.dumps(search_result)
            })
    return results

Complete Example

Here’s a complete example that ties everything together:
import os
import json
from openai import OpenAI
from parallel import Parallel

# Initialize clients
openai_client = OpenAI()
parallel_client = Parallel(api_key=os.environ["PARALLEL_API_KEY"])

# Tool definition
parallel_search_tool = {
    "type": "function",
    "function": {
        "name": "parallel_search",
        "description": "Search the web for current information using Parallel's AI-powered search. Provide at least one of objective or search_queries.",
        "parameters": {
            "type": "object",
            "properties": {
                "objective": {
                    "type": "string",
                    "description": "A natural language description of what you're searching for."
                },
                "search_queries": {
                    "type": "array",
                    "items": {"type": "string"},
                    "description": "Specific search queries to execute."
                },
                "max_results": {
                    "type": "integer",
                    "description": "Maximum results (1-20). Default 10."
                },
                "max_chars_total": {
                    "type": "integer",
                    "description": "Maximum total characters across all excerpts. Default 50000."
                }
            }
        }
    }
}


def parallel_search(objective=None, search_queries=None, max_results=10, max_chars_total=50000):
    response = parallel_client.beta.search(
        objective=objective,
        search_queries=search_queries,
        max_results=max_results,
        excerpts={"max_chars_per_result": 5000, "max_chars_total": max_chars_total}
    )
    return {
        "results": [
            {"url": r.url, "title": r.title, "excerpts": r.excerpts[:3] if r.excerpts else []}
            for r in response.results
        ]
    }


def chat_with_search(user_message: str) -> str:
    messages = [
        {
            "role": "system",
            "content": "You are a helpful research assistant. Use the parallel_search tool to find current information. Always cite sources with URLs."
        },
        {"role": "user", "content": user_message}
    ]

    # First API call - may trigger tool use
    response = openai_client.chat.completions.create(
        model="gpt-4o",
        messages=messages,
        tools=[parallel_search_tool],
        tool_choice="auto"
    )

    assistant_message = response.choices[0].message

    # Check if the model wants to use tools
    if assistant_message.tool_calls:
        messages.append(assistant_message)

        # Process each tool call
        for tool_call in assistant_message.tool_calls:
            if tool_call.function.name == "parallel_search":
                args = json.loads(tool_call.function.arguments)
                result = parallel_search(
                    objective=args.get("objective"),
                    search_queries=args.get("search_queries"),
                    max_results=args.get("max_results", 10),
                    max_chars_total=args.get("max_chars_total", 50000)
                )
                messages.append({
                    "role": "tool",
                    "tool_call_id": tool_call.id,
                    "content": json.dumps(result)
                })

        # Second API call with search results
        response = openai_client.chat.completions.create(
            model="gpt-4o",
            messages=messages,
            tools=[parallel_search_tool],
            tool_choice="auto"
        )

    return response.choices[0].message.content


# Example usage
if __name__ == "__main__":
    answer = chat_with_search("What are the latest developments in quantum computing?")
    print(answer)

Tool Parameters

ParameterTypeRequiredDescription
objectivestringNo*Natural language description of your search goal
search_queriesstring[]No*Specific search queries to execute
max_chars_totalintegerNoMaximum total characters across all excerpts (default 50000)
*At least one of objective or search_queries is required.
The complete example above shows additional optional parameters (max_results) that you can add to the tool definition for more control. See the Search API documentation for all available options.