Integration Guides
Integration Guides
This page shows how to integrate TextPrompts with popular AI frameworks and libraries.
Pydantic AI
Pydantic AI is a Python agent framework designed to make it less painful to build production-grade applications with generative AI.
Basic Integration
from textprompts import load_prompt
from pydantic_ai import Agent
# Load system prompt
system_prompt = load_prompt("prompts/customer_agent.txt")
# Create agent
agent = Agent(
'openai:gpt-4',
system_prompt=system_prompt.prompt.format(
company_name="ACME Corp",
support_level="premium"
)
)
# Run agent
result = agent.run_sync("Help me with my order")
Advanced Usage with Dependencies
When you need to tailor the system prompt using request-specific context, you can
register a formatter with the @agent.system_prompt decorator. This keeps the
prompt logic close to the agent while still allowing you to compute any helper
values that the template depends on.
from textprompts import load_prompt
from pydantic_ai import Agent, RunContext
from pydantic import BaseModel
class CustomerContext(BaseModel):
customer_id: str
tier: str
region: str
# Simple helper to keep the example self-contained
def get_regional_policies(region: str) -> str:
policies = {
"US": "Follow U.S. consumer protection regulations and ACME's premium guarantees.",
"EU": "Comply with EU return directives and ACME's standard guarantees.",
"default": "Apply ACME's global support policy with local adjustments as needed.",
}
return policies.get(region, policies["default"])
# Load prompt template
agent_prompt = load_prompt("prompts/contextual_agent.txt")
# Create agent with dependencies
agent = Agent(
'openai:gpt-4',
deps_type=CustomerContext,
)
@agent.system_prompt
def contextual_prompt(ctx: RunContext[CustomerContext]) -> str:
return agent_prompt.prompt.format(
customer_tier=ctx.deps.tier,
region=ctx.deps.region,
policies=get_regional_policies(ctx.deps.region)
)
# Run with context
context = CustomerContext(
customer_id="cust_123",
tier="premium",
region="US"
)
result = agent.run_sync("What's my refund policy?", deps=context)
OpenAI
Chat Completions
import openai
from textprompts import load_prompt
# Load prompts
system_prompt = load_prompt("prompts/assistant_system.txt")
user_prompt_template = load_prompt("prompts/user_query.txt")
# Create completion
response = openai.chat.completions.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": system_prompt.prompt.format(
domain="technical support",
tone="helpful and detailed"
)
},
{
"role": "user",
"content": user_prompt_template.prompt.format(
query="How do I reset my password?",
context="mobile app"
)
}
]
)
Function Calling
import openai
from textprompts import load_prompt
# Load function description prompt
function_prompt = load_prompt("prompts/function_descriptions.txt")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": function_prompt.prompt.format(
function_name="get_weather",
purpose="Get current weather for a location"
),
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"}
}
}
}
}
]
response = openai.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "What's the weather in Paris?"}],
tools=tools
)
LangChain
Prompt Templates
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from textprompts import load_prompt
# Load template
template_prompt = load_prompt("prompts/analysis_template.txt")
# Create LangChain prompt
prompt = PromptTemplate(
template=str(template_prompt.prompt),
input_variables=["document", "question", "context"]
)
# Use with chain
llm = OpenAI()
chain = prompt | llm
result = chain.invoke({
"document": "Financial report content...",
"question": "What are the key risks?",
"context": "Q4 2024 analysis"
})
Chat Templates
from langchain.prompts import ChatPromptTemplate
from textprompts import load_prompt
# Load system and user prompts
system_prompt = load_prompt("prompts/chat_system.txt")
user_prompt = load_prompt("prompts/chat_user.txt")
# Create chat template
chat_prompt = ChatPromptTemplate.from_messages([
SystemMessage(content=str(system_prompt.prompt)),
HumanMessage(content=str(user_prompt.prompt))
])
# Format and use
messages = chat_prompt.format_messages(
role="helpful assistant",
user_query="Explain quantum computing",
difficulty_level="beginner",
)
for message in messages:
print(f"{message.type}: {message.content}")
Anthropic Claude
Direct API Usage
import anthropic
from textprompts import load_prompt
# Load prompts
system_prompt = load_prompt("prompts/claude_system.txt")
user_prompt = load_prompt("prompts/claude_user.txt")
client = anthropic.Anthropic()
response = client.messages.create(
model="claude-3-sonnet-20240229",
max_tokens=1000,
system=system_prompt.prompt.format(
expertise="software engineering",
communication_style="technical but accessible"
),
messages=[
{
"role": "user",
"content": user_prompt.prompt.format(
task="code review",
code_snippet="...",
focus_areas="performance, security, maintainability"
)
}
]
)
Hugging Face Transformers
Text Generation
from transformers import pipeline
from textprompts import load_prompt
# Load prompt template
prompt_template = load_prompt("prompts/text_generation.txt")
# Create generator
generator = pipeline("text-generation", model="gpt2")
# Generate text
prompt = prompt_template.prompt.format(
topic="artificial intelligence",
style="informative",
length="medium"
)
result = generator(prompt, max_length=200, num_return_sequences=1)
Chat Templates
from transformers import AutoTokenizer
from textprompts import load_prompt
# Load chat template
chat_template = load_prompt("prompts/chat_template.txt")
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
# Apply chat template
conversation = chat_template.prompt.format(
user_message="Hello, how are you?",
context="friendly conversation",
personality="helpful and engaging"
)
# Tokenize and generate
inputs = tokenizer.encode(conversation, return_tensors="pt")
Ollama
Local Model Integration
import ollama
from textprompts import load_prompt
# Load system prompt
system_prompt = load_prompt("prompts/ollama_system.txt")
# Create conversation
response = ollama.chat(
model='llama2',
messages=[
{
'role': 'system',
'content': system_prompt.prompt.format(
domain="creative writing",
tone="imaginative and engaging"
)
},
{
'role': 'user',
'content': 'Write a short story about a robot learning to paint'
}
]
)
LlamaIndex
Query Engine
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.prompts import PromptTemplate
from textprompts import load_prompt
# Load query template
query_template = load_prompt("prompts/query_template.txt")
# Create index
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
# Create query engine with custom prompt
query_engine = index.as_query_engine(
text_qa_template=query_template.prompt.format(
instruction="Answer based on the context provided",
format="bullet points",
tone="concise and informative"
)
)
# Create query engine with custom prompt
query_engine = index.as_query_engine(text_qa_template=text_qa_prompt)
# Query
response = query_engine.query("What are the main benefits?")
Streamlit
Interactive Prompt Builder
import streamlit as st
from pathlib import Path
from textprompts import load_prompt, PromptString
# Load available prompts
prompt_dict = {}
for path in Path("prompts/").rglob("*.txt"):
p = load_prompt(path)
if p.meta and p.meta.title:
prompt_dict[p.meta.title] = p
# UI
st.title("Prompt Builder")
# Select prompt
selected_prompt = st.selectbox(
"Choose a prompt:",
options=list(prompt_dict.keys())
)
if selected_prompt:
prompt = prompt_dict[selected_prompt]
# Show metadata
st.subheader("Prompt Info")
st.write(f"**Title:** {prompt.meta.title}")
st.write(f"**Version:** {prompt.meta.version}")
st.write(f"**Description:** {prompt.meta.description}")
# Extract variables
import re
variables = re.findall(r'\{([^}]+)\}', prompt.prompt)
# Input fields for variables
st.subheader("Variables")
values = {}
for var in variables:
values[var] = st.text_input(f"{var}:", key=var)
# Generate output
if st.button("Generate"):
try:
result = prompt.prompt.format(**values)
st.subheader("Result")
st.text_area("Generated prompt:", result, height=200)
except ValueError as e:
st.error(f"Error: {e}")
FastAPI
Prompt Management API
from pathlib import Path
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from textprompts import load_prompt, TextPromptsError
from typing import Dict, Optional
app = FastAPI()
# Load prompts on startup
prompts = {}
@app.on_event("startup")
async def load_all_prompts():
global prompts
try:
for path in Path("prompts/").rglob("*.txt"):
p = load_prompt(path)
if p.meta and p.meta.title:
prompts[p.meta.title] = p
except TextPromptsError as e:
print(f"Failed to load prompts: {e}")
class FormatRequest(BaseModel):
prompt_name: str
variables: Dict[str, str]
@app.post("/format")
async def format_prompt(request: FormatRequest):
if request.prompt_name not in prompts:
raise HTTPException(status_code=404, detail="Prompt not found")
prompt = prompts[request.prompt_name]
try:
result = prompt.prompt.format(**request.variables)
return {"result": result}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
@app.get("/prompts")
async def list_prompts():
return {
name: {
"title": prompt.meta.title,
"version": prompt.meta.version,
"description": prompt.meta.description
}
for name, prompt in prompts.items()
}
Best Practices
1. Prompt Versioning
from pathlib import Path
from textprompts import load_prompt
from packaging import version
def get_prompt_version(name: str, version_req: str = "latest"):
"""Get specific version of a prompt."""
matching = []
for path in Path("prompts/").rglob("*.txt"):
p = load_prompt(path)
if p.meta and p.meta.title == name:
matching.append(p)
if version_req == "latest":
return max(matching, key=lambda p: version.parse(p.meta.version or "0.0.0"))
else:
return next(p for p in matching if p.meta.version == version_req)
2. Environment Configuration
import os
from textprompts import load_prompt
def load_environment_prompt(name: str):
"""Load prompt based on environment."""
env = os.getenv("ENVIRONMENT", "development")
try:
# Try environment-specific prompt first
return load_prompt(f"prompts/{env}/{name}.txt")
except:
# Fall back to default
return load_prompt(f"prompts/default/{name}.txt")
3. Caching for Performance
from functools import lru_cache
from textprompts import load_prompt
@lru_cache(maxsize=128)
def cached_prompt(path: str):
"""Cache frequently used prompts."""
return load_prompt(path)
4. Validation Pipeline
from pathlib import Path
from textprompts import load_prompt
import re
def validate_prompt_collection(directory: str):
"""Validate all prompts in a directory."""
for path in Path(directory).rglob("*.txt"):
prompt = load_prompt(path)
# Check metadata
if not prompt.meta or not prompt.meta.title:
print(f"WARNING: {prompt.path} missing title")
# Check for common issues
if "{" in prompt.prompt and "}" in prompt.prompt:
variables = re.findall(r'\{([^}]+)\}', prompt.prompt)
print(f"INFO: {prompt.path} uses variables: {variables}")