Skip to content

LangGraph

Test LangGraph state graphs and workflows with Tenro.

Experimental

This framework integration is experimental. Examples may break when the framework updates. See Compatibility for support definitions.

Tested with: langgraph==1.0.5

What you'll use

Decorator Purpose
@link_tool Your custom tools (search, database, APIs)
@link_agent Entry point for tracing

No @link_llm needed. Tenro intercepts LLM calls at the HTTP level automatically.

Customer support example

A customer support workflow with state graph for retrieval and response generation.

"""Customer Support: Testing knowledge base retrieval with LangGraph."""

from __future__ import annotations

from examples.experimental.langgraph.myapp.agents import CustomerSupportAgent
from examples.myapp import search_knowledge_base

from tenro import Provider
from tenro.simulate import agent, llm, tool
from tenro.testing import tenro


@tenro
def test_customer_support_answers_question() -> None:
    """Test customer support agent uses knowledge base and LLM."""
    tool.simulate(
        search_knowledge_base,
        result=[{"title": "Refund Policy", "content": "Full refunds within 30 days."}],
    )
    llm.simulate(
        Provider.OPENAI,
        response="You can get a full refund within 30 days of purchase.",
    )

    result = CustomerSupportAgent().run("How do I get a refund?")

    assert result == "You can get a full refund within 30 days of purchase."
    agent.verify(CustomerSupportAgent)
    llm.verify(Provider.OPENAI)
    tool.verify_many(search_knowledge_base, count=1)

RAG pipeline example

A retrieval-augmented generation workflow with document search nodes.

"""RAG Pipeline: Testing document retrieval with LangGraph."""

from __future__ import annotations

from examples.experimental.langgraph.myapp.agents import RAGPipeline
from examples.myapp import fetch_documents

from tenro import Provider
from tenro.simulate import agent, llm, tool
from tenro.testing import tenro


@tenro
def test_rag_pipeline_synthesizes_answer() -> None:
    """Test RAG pipeline fetches documents and generates answer."""
    tool.simulate(
        fetch_documents,
        result=[
            {"id": "doc1", "text": "Machine learning uses algorithms to learn."},
            {"id": "doc2", "text": "Deep learning is a subset of ML."},
        ],
    )
    llm.simulate(
        Provider.OPENAI,
        response="Machine learning is a field where algorithms learn patterns from data.",
    )

    result = RAGPipeline().run("What is machine learning?", "AI")

    assert result == "Machine learning is a field where algorithms learn patterns from data."
    agent.verify(RAGPipeline)
    llm.verify(Provider.OPENAI)
    tool.verify_many(fetch_documents, count=1)

Multi-turn conversation example

A stateful workflow handling multi-turn conversations.

"""Multi-Turn Conversation: Testing sequential LLM calls with LangGraph."""

from __future__ import annotations

from examples.experimental.langgraph.myapp.agents import ConversationAgent

from tenro import Provider
from tenro.simulate import agent, llm
from tenro.testing import tenro


@tenro
def test_multi_turn_conversation() -> None:
    """Test agent handles multi-turn conversation with context."""
    llm.simulate(
        Provider.OPENAI,
        responses=[
            "A list in Python is created with square brackets: my_list = [1, 2, 3]",
            "To add items, use append(): my_list.append(4)",
        ],
    )

    responses = ConversationAgent().run(
        ["How do I create a list in Python?", "How do I add items to it?"]
    )

    assert responses == [
        "A list in Python is created with square brackets: my_list = [1, 2, 3]",
        "To add items, use append(): my_list.append(4)",
    ]
    agent.verify(ConversationAgent)
    llm.verify_many(Provider.OPENAI, count=2)

Key patterns

Agentic loop (LLM calls tool)

When the LLM decides to call a tool, then responds with the result:

from tenro import Provider, ToolCall
from tenro.simulate import llm, tool
# Assuming search_knowledge_base is defined with @link_tool("search_kb")

# 1. Set up tool result (use function reference)
tool.simulate(search_knowledge_base, result={"content": "Full refunds within 30 days."})

# 2. Set up LLM responses: first triggers tool, second is final response
llm.simulate(Provider.OPENAI, responses=[
    ToolCall(search_knowledge_base, query="refund"),
    "You can get a full refund within 30 days.",
])

Multi-node graph

from tenro import Provider, ToolCall
from tenro.simulate import llm
# Sequential responses for different graph nodes with tool calls
llm.simulate(Provider.OPENAI, responses=[
    ToolCall(retrieve, query="docs"),
    "Node 1: Retrieved documents.",
    "Node 2: Synthesized answer.",
])

Verifying

from tenro import Provider
from tenro.simulate import llm, tool
tool.verify_many(search_knowledge_base, count=1)
llm.verify_many(Provider.OPENAI, count=2)  # Tool request + final answer

See also