# Fully qualified — uses this exact modelctx.llm.generate(..., model="anthropic:claude-sonnet-4-6")# Bare model name with decorator provider — resolves automatically@agent(..., llm={"provider": "anthropic", "model": "claude-sonnet-4-6"})def execute(prompt, ctx): # No model arg — uses decorator default result = ctx.llm.generate(...) # Override per-call result = ctx.llm.generate(..., model="claude-haiku-4-5")
Resolution cascade:
Fully qualified per-call (provider:model) — use directly
Bare per-call + decorator provider — resolve
No per-call model + decorator default — use default
result.text # Generated text (None for generate_object)result.object # Structured output dict (None for generate)result.model # Model identifier used (e.g., "anthropic:claude-sonnet-4-6")result.usage # {"prompt_tokens": 120, "completion_tokens": 250}result.finish_reason # "stop", "length", etc.
from friday_agent_sdk import LlmError, agent, err, ok@agent(id="retry-agent", version="1.0.0", description="Retries on failure")def execute(prompt, ctx): try: result = ctx.llm.generate(..., model="expensive-model") except LlmError as e: # Fallback to cheaper model result = ctx.llm.generate(..., model="claude-haiku-4-5") return ok({"output": result.text})