Skip to content

Pydantic AI

Test Pydantic AI agents and structured outputs with Tenro.

Experimental

This framework integration is experimental. Examples may break when the framework updates. See Compatibility for support definitions.

Tested with: pydantic-ai==1.39.0

What you'll use

Decorator Purpose
@link_tool Your custom tools (search, database, APIs)
@link_agent Entry point for tracing

No @link_llm needed. Tenro intercepts LLM calls at the HTTP level automatically.

Customer support example

A customer support agent that searches a knowledge base and generates responses.

"""Customer Support: Testing knowledge base retrieval with Pydantic AI."""

from __future__ import annotations

from examples.experimental.pydantic_ai.myapp.agents import CustomerSupportAgent
from examples.myapp import search_knowledge_base

from tenro import Provider, ToolCall
from tenro.simulate import agent, llm, tool
from tenro.testing import tenro


@tenro
def test_customer_support_answers_question() -> None:
    """Test customer support agent uses knowledge base and LLM."""
    tool.simulate(
        search_knowledge_base,
        result=[{"title": "Refund Policy", "content": "Full refunds within 30 days."}],
    )
    # Simulate LLM requesting the tool, then returning final answer
    llm.simulate(
        Provider.OPENAI,
        responses=[
            [ToolCall("search_kb")],
            "You can get a full refund within 30 days of purchase.",
        ],
    )

    result = CustomerSupportAgent().run("How do I get a refund?")

    assert result == "You can get a full refund within 30 days of purchase."
    agent.verify(CustomerSupportAgent)
    llm.verify_many(Provider.OPENAI, count=2)
    tool.verify_many(search_knowledge_base, count=1)

RAG pipeline example

A retrieval-augmented generation agent with document search.

"""RAG Pipeline: Testing document retrieval with Pydantic AI."""

from __future__ import annotations

from examples.experimental.pydantic_ai.myapp.agents import RAGPipeline
from examples.myapp import fetch_documents

from tenro import Provider, ToolCall
from tenro.simulate import agent, llm, tool
from tenro.testing import tenro


@tenro
def test_rag_pipeline_synthesizes_answer() -> None:
    """Test RAG pipeline fetches documents and generates answer."""
    tool.simulate(
        fetch_documents,
        result=[
            {"id": "doc1", "text": "Machine learning uses algorithms to learn."},
            {"id": "doc2", "text": "Deep learning is a subset of ML."},
        ],
    )
    # Simulate LLM requesting the tool, then returning final answer
    llm.simulate(
        Provider.OPENAI,
        responses=[
            [ToolCall("fetch_docs")],
            "Machine learning is a field where algorithms learn patterns from data.",
        ],
    )

    result = RAGPipeline().run("What is machine learning?", "AI")

    assert result == "Machine learning is a field where algorithms learn patterns from data."
    agent.verify(RAGPipeline)
    llm.verify_many(Provider.OPENAI, count=2)
    tool.verify_many(fetch_documents, count=1)

Multi-turn conversation example

An agent handling multi-turn conversations with context.

"""Multi-Turn Conversation: Testing sequential LLM calls with Pydantic AI."""

from __future__ import annotations

from examples.experimental.pydantic_ai.myapp.agents import ConversationAgent

from tenro import Provider
from tenro.simulate import agent, llm
from tenro.testing import tenro


@tenro
def test_multi_turn_conversation() -> None:
    """Test agent handles multi-turn conversation with context."""
    llm.simulate(
        Provider.OPENAI,
        responses=[
            "A list in Python is created with square brackets: my_list = [1, 2, 3]",
            "To add items, use append(): my_list.append(4)",
        ],
    )

    responses = ConversationAgent().run(
        ["How do I create a list in Python?", "How do I add items to it?"]
    )

    assert responses == [
        "A list in Python is created with square brackets: my_list = [1, 2, 3]",
        "To add items, use append(): my_list.append(4)",
    ]
    agent.verify(ConversationAgent)
    llm.verify_many(Provider.OPENAI, count=2)

Key patterns

Agentic loop (LLM calls tool)

When the LLM decides to call a tool, then responds with the result:

from tenro import Provider, ToolCall
from tenro.simulate import llm, tool
# Assuming search_knowledge_base is defined with @link_tool("kb_search")

# 1. Set up tool result (use function reference)
tool.simulate(search_knowledge_base, result={"content": "Full refunds within 30 days."})

# 2. Set up LLM responses: first triggers tool, second is final response
llm.simulate(Provider.OPENAI, responses=[
    ToolCall(search_knowledge_base, query="refund policy"),
    "You can get a full refund within 30 days.",
])

Verifying

from tenro import Provider
from tenro.simulate import llm, tool
tool.verify_many(search_knowledge_base, count=1)
llm.verify_many(Provider.OPENAI, count=2)  # Tool request + final answer

See also