Documentation Index
Fetch the complete documentation index at: https://docs.trodo.ai/docs/llms.txt
Use this file to discover all available pages before exploring further.
The smallest useful agent: wrap one LLM call, capture input and output, attach feedback after the user reacts.
import trodo from 'trodo-node';
import OpenAI from 'openai';
trodo.init({ siteId: process.env.TRODO_SITE_ID });
const openai = new OpenAI();
export async function answer(userId, conversationId, question) {
const { result, runId } = await trodo.wrapAgent(
'support-bot',
async (run) => {
run.setInput({ question });
const r = await openai.chat.completions.create({
model: 'gpt-4o-mini',
messages: [
{ role: 'system', content: 'You are a concise support assistant.' },
{ role: 'user', content: question },
],
});
const text = r.choices[0].message.content;
run.setOutput({ answer: text });
return text;
},
{ distinctId: userId, conversationId },
);
// When the user clicks thumbs-up on your UI:
await trodo.feedback(runId, { satisfaction: 'positive', rating: 5 });
return result;
}
import trodo, os
from openai import OpenAI
trodo.init(site_id=os.environ["TRODO_SITE_ID"])
client = OpenAI()
def answer(user_id, conversation_id, question):
with trodo.wrap_agent("support-bot", distinct_id=user_id, conversation_id=conversation_id) as run:
run.set_input({"question": question})
r = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a concise support assistant."},
{"role": "user", "content": question},
],
)
text = r.choices[0].message.content
run.set_output({"answer": text})
trodo.feedback(run.run_id, satisfaction="positive", rating=5)
return text
What gets captured
- Run —
support-bot, status ok, duration end-to-end, tokens + cost rolled up from the LLM span.
- Span — one
kind='llm' span with model, provider, input/output tokens, cost, and temperature auto-filled by the OpenAI auto-instrumenter.
- Feedback — one
positive / rating 5 row linked to the run.
See also