Skip to content

API Reference

load_agent

load_agent

load_agent(
    path: str | Path,
    *,
    model_override: str | None = None,
    logfire_token: str | None = None,
) -> TextAgent[Any]

Load a TextAgent from a file.

Parameters:

Name Type Description Default
path str | Path

Path to the agent definition file (.txt with TOML front-matter)

required
model_override str | None

Override the model specified in the file

None
logfire_token str | None

Logfire token for auto-instrumentation (or set LOGFIRE_TOKEN env var)

None

Returns:

Type Description
TextAgent[Any]

A configured TextAgent ready to run.

Raises:

Type Description
FileNotFoundError

If the file doesn't exist.

AgentDefinitionError

If the agent definition is invalid.

Example
# Basic usage
text_agent = load_agent("judges/safety.txt")

# Override model for testing
text_agent = load_agent(
    "judges/safety.txt",
    model_override="openai:gpt-4.1-nano"
)
Source code in src/textagents/loader.py
def load_agent(
    path: str | Path,
    *,
    model_override: str | None = None,
    logfire_token: str | None = None,
) -> TextAgent[Any]:
    """Load a TextAgent from a file.

    Args:
        path: Path to the agent definition file (.txt with TOML front-matter)
        model_override: Override the model specified in the file
        logfire_token: Logfire token for auto-instrumentation
                      (or set LOGFIRE_TOKEN env var)

    Returns:
        A configured TextAgent ready to run.

    Raises:
        FileNotFoundError: If the file doesn't exist.
        AgentDefinitionError: If the agent definition is invalid.

    Example:
        ```python
        # Basic usage
        text_agent = load_agent("judges/safety.txt")

        # Override model for testing
        text_agent = load_agent(
            "judges/safety.txt",
            model_override="openai:gpt-4.1-nano"
        )
        ```
    """
    path = Path(path)

    if not path.exists():
        raise FileNotFoundError(f"Agent file not found: {path}")

    # Configure Logfire if token available
    _maybe_configure_logfire(logfire_token)

    # Parse the agent file
    spec = parse_agent_file(path)

    # Apply model override if specified
    if model_override:
        spec = replace(spec, model=model_override)

    # Create and return the TextAgent
    return create_text_agent(spec)
import textagents

# Basic
agent = textagents.load_agent("agent.txt")

# With model override
agent = textagents.load_agent("agent.txt", model_override="openai:gpt-4o-mini")

# With Logfire
agent = textagents.load_agent("agent.txt", logfire_token="...")

TextAgent

TextAgent

TextAgent(
    spec: AgentSpec,
    output_model: type[OutputT],
    agent: Agent[None, OutputT],
)

Bases: Generic[OutputT]

A text-defined PydanticAI agent wrapper.

Provides a simplified interface for running agents defined in .txt files with TOML front-matter. Handles template interpolation, input validation, and forwards to the underlying PydanticAI Agent.

Example
text_agent = load_agent("safety_judge.txt")

# Async (primary)
result = await text_agent.run(user_input="Hello", model_output="Hi there!")
print(result.reasoning)
print(result.no_hate)  # True

# Sync convenience
result = text_agent.run_sync(user_input="Hello", model_output="Hi there!")

# Advanced: access underlying PydanticAI agent
full_result = await text_agent.agent.run("custom prompt")
print(full_result.all_messages())

Attributes:

Name Type Description
spec AgentSpec

The parsed agent specification

output_model type[OutputT]

The dynamically generated Pydantic model for outputs

agent Agent[None, OutputT]

The underlying PydanticAI agent (public for advanced usage)

Initialize TextAgent.

Use load_agent() instead of constructing directly.

Parameters:

Name Type Description Default
spec AgentSpec

Parsed agent specification

required
output_model type[OutputT]

Generated Pydantic output model

required
agent Agent[None, OutputT]

Configured PydanticAI agent

required
Source code in src/textagents/agent.py
def __init__(
    self,
    spec: AgentSpec,
    output_model: type[OutputT],
    agent: Agent[None, OutputT],
) -> None:
    """Initialize TextAgent.

    Use load_agent() instead of constructing directly.

    Args:
        spec: Parsed agent specification
        output_model: Generated Pydantic output model
        agent: Configured PydanticAI agent
    """
    self.spec = spec
    self.output_model = output_model
    self.agent = agent

name property

name: str

Agent name (from spec or derived from filename).

model property

model: str

Model identifier string.

input_names property

input_names: list[str]

List of expected input variable names.

required_inputs property

required_inputs: list[str]

List of required (non-optional) input names.

run async

run(**inputs: Any) -> OutputT

Run the agent asynchronously with the given inputs.

Parameters:

Name Type Description Default
**inputs Any

Named inputs matching agent.input_type definitions. Use @filepath syntax to load from file.

{}

Returns:

Type Description
OutputT

The validated output model instance (result.output from PydanticAI).

Raises:

Type Description
MissingInputError

If required inputs are not provided.

InputTypeError

If input types cannot be coerced.

Source code in src/textagents/agent.py
async def run(self, **inputs: Any) -> OutputT:
    """Run the agent asynchronously with the given inputs.

    Args:
        **inputs: Named inputs matching agent.input_type definitions.
                 Use @filepath syntax to load from file.

    Returns:
        The validated output model instance (result.output from PydanticAI).

    Raises:
        MissingInputError: If required inputs are not provided.
        InputTypeError: If input types cannot be coerced.
    """
    # Process inputs (file loading, coercion, magic vars)
    processed = process_inputs(inputs, self.spec)

    # Interpolate the prompt template
    user_message = interpolate_template(self.spec.prompt_template, processed)

    # Interpolate instructions if present
    instructions = None
    if self.spec.instructions:
        instructions = interpolate_template(self.spec.instructions, processed)

    # Run the agent
    # Note: We pass instructions via the run call to support dynamic interpolation
    if instructions:
        result = await self.agent.run(user_message, instructions=instructions)
    else:
        result = await self.agent.run(user_message)

    return result.output

run_sync

run_sync(**inputs: Any) -> OutputT

Run the agent synchronously (convenience wrapper).

Equivalent to asyncio.run(self.run(**inputs)).

Parameters:

Name Type Description Default
**inputs Any

Named inputs matching agent.input_type definitions.

{}

Returns:

Type Description
OutputT

The validated output model instance.

Source code in src/textagents/agent.py
def run_sync(self, **inputs: Any) -> OutputT:
    """Run the agent synchronously (convenience wrapper).

    Equivalent to asyncio.run(self.run(**inputs)).

    Args:
        **inputs: Named inputs matching agent.input_type definitions.

    Returns:
        The validated output model instance.
    """
    return asyncio.run(self.run(**inputs))
# Async
result = await agent.run(text="Hello")

# Sync
result = agent.run_sync(text="Hello")

# Properties
agent.name            # Agent name
agent.model           # Model string
agent.required_inputs # List of required input names

Results

Results are Pydantic model instances:

result.reasoning      # Access fields
result.model_dump()   # Convert to dict
result.model_dump_json()  # Convert to JSON