Break a task into discrete stages and render each as its own named span. No framework needed — justDocumentation Index
Fetch the complete documentation index at: https://docs.trodo.ai/docs/llms.txt
Use this file to discover all available pages before exploring further.
withSpan with kind='agent' for each stage.
- Node.js
- Python
import trodo from 'trodo-node';
import OpenAI from 'openai';
trodo.init({ siteId: process.env.TRODO_SITE_ID });
const openai = new OpenAI();
async function triage(question) {
return trodo.withSpan({ kind: 'agent', name: 'triage' }, async (span) => {
span.setInput({ question });
const r = await openai.chat.completions.create({
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: `Classify intent: ${question}` }],
});
const intent = r.choices[0].message.content.trim();
span.setOutput({ intent });
return intent;
});
}
async function research(intent, question) {
return trodo.withSpan({ kind: 'agent', name: 'research' }, async (span) => {
span.setInput({ intent, question });
// Your retrieval / API calls here, auto-captured as child spans.
const facts = await kb.search(question);
span.setOutput({ factCount: facts.length });
return facts;
});
}
async function write(question, facts) {
return trodo.withSpan({ kind: 'agent', name: 'write' }, async (span) => {
span.setInput({ question, factCount: facts.length });
const r = await openai.chat.completions.create({
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: `Answer: ${question}\nFacts: ${JSON.stringify(facts)}` }],
});
const answer = r.choices[0].message.content;
span.setOutput({ answer });
return answer;
});
}
export async function support(userId, question) {
const { result } = await trodo.wrapAgent('multi-step', async (run) => {
run.setInput({ question });
const intent = await triage(question);
const facts = await research(intent, question);
const answer = await write(question, facts);
run.setOutput({ answer, intent });
return answer;
}, { distinctId: userId });
return result;
}
import trodo, os
from openai import OpenAI
trodo.init(site_id=os.environ["TRODO_SITE_ID"])
client = OpenAI()
def triage(question):
with trodo.span("triage", kind="agent") as span:
span.set_input({"question": question})
r = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": f"Classify intent: {question}"}],
)
intent = r.choices[0].message.content.strip()
span.set_output({"intent": intent})
return intent
def research(intent, question):
with trodo.span("research", kind="agent") as span:
span.set_input({"intent": intent, "question": question})
facts = kb.search(question)
span.set_output({"factCount": len(facts)})
return facts
def write(question, facts):
with trodo.span("write", kind="agent") as span:
span.set_input({"question": question, "factCount": len(facts)})
r = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": f"Answer: {question}\nFacts: {facts}"}],
)
answer = r.choices[0].message.content
span.set_output({"answer": answer})
return answer
def support(user_id, question):
with trodo.wrap_agent("multi-step", distinct_id=user_id) as run:
run.set_input({"question": question})
intent = triage(question)
facts = research(intent, question)
answer = write(question, facts)
run.set_output({"answer": answer, "intent": intent})
return answer
Waterfall
run: multi-step
├─ agent triage
│ └─ llm openai.chat.completions
├─ agent research
│ └─ (your retrieval spans)
└─ agent write
└─ llm openai.chat.completions
kind='agent' spans render as stage headers in the dashboard; LLM / tool / retrieval spans nest underneath.
See also
- Recipes → Context manager — same pattern split across multiple files.
- Tracing → Spans.