Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.trodo.ai/docs/llms.txt

Use this file to discover all available pages before exploring further.

An agent loop that calls the model, executes tool calls, feeds results back, and repeats until done. Each round produces one llm span; each tool call produces one tool span. A deep nested waterfall in the dashboard.
import trodo from 'trodo-node';
import Anthropic from '@anthropic-ai/sdk';

trodo.init({ siteId: process.env.TRODO_SITE_ID });
const anthropic = new Anthropic();

const TOOLS = {
  search_kb:     async (q)     => ({ hits: [{ id: 'kb-1', title: `doc for ${q}` }] }),
  create_ticket: async (title) => ({ ticketId: 'T-' + Math.floor(Math.random() * 1e6) }),
  send_email:    async (to)    => ({ sent: true, to }),
};

async function callTool(name, args) {
  return trodo.withSpan({ kind: 'tool', name, toolName: name }, async (span) => {
    span.setInput(args);
    try {
      const out = await TOOLS[name](args);
      span.setOutput(out);
      return out;
    } catch (err) {
      span.setAttribute('error.retryable', !!err.retryable);
      throw err;   // span recorded as error
    }
  });
}

export async function runAgent(userId, task) {
  const { result, runId } = await trodo.wrapAgent('tool-loop', async (run) => {
    run.setInput({ task });

    const messages = [{ role: 'user', content: task }];
    let rounds = 0;

    while (rounds++ < 5) {
      const resp = await anthropic.messages.create({
        model: 'claude-3-5-sonnet-latest',
        max_tokens: 1024,
        tools: [
          { name: 'search_kb',     description: 'Search the KB',      input_schema: { type: 'object', properties: { query: { type: 'string' } } } },
          { name: 'create_ticket', description: 'Create a ticket',    input_schema: { type: 'object', properties: { title: { type: 'string' } } } },
          { name: 'send_email',    description: 'Send a notification',input_schema: { type: 'object', properties: { to: { type: 'string' } } } },
        ],
        messages,
      });

      messages.push({ role: 'assistant', content: resp.content });

      const toolUses = resp.content.filter((b) => b.type === 'tool_use');
      if (!toolUses.length) {
        const text = resp.content.find((b) => b.type === 'text')?.text || '';
        run.setOutput({ text, rounds });
        return text;
      }

      const toolResults = [];
      for (const use of toolUses) {
        const out = await callTool(use.name, use.input);
        toolResults.push({ type: 'tool_result', tool_use_id: use.id, content: JSON.stringify(out) });
      }
      messages.push({ role: 'user', content: toolResults });
    }

    throw new Error('tool loop exceeded max rounds');
  }, { distinctId: userId });

  return result;
}

What the waterfall looks like

run: tool-loop
  ├─ llm  anthropic.messages.create  (round 1)
  ├─ tool search_kb
  ├─ llm  anthropic.messages.create  (round 2)
  ├─ tool create_ticket
  ├─ tool send_email
  └─ llm  anthropic.messages.create  (final)
Tool spans carry tool_name, input args, and output. LLM spans carry model, tokens, and cost.

See also