For multiple URLs, use the async endpoint to submit all tasks at once, then poll for results. This avoids blocking while waiting for each task to complete.
import { TinyFish, RunStatus } from "@tiny-fish/sdk";const client = new TinyFish();async function processBatch(tasks: { url: string; goal: string }[]) { // Submit all tasks const responses = await Promise.all( tasks.map((task) => client.agent.queue(task)) ); // Poll for completion const maxAttempts = 150; // 5 minutes at 2s intervals const results = await Promise.all( responses.map(async (r) => { if (r.error) { throw r.error; } for (let attempt = 0; attempt < maxAttempts; attempt++) { const run = await client.runs.get(r.run_id); if ( run.status === RunStatus.COMPLETED || run.status === RunStatus.FAILED || run.status === RunStatus.CANCELLED ) { return run; } await new Promise((resolve) => setTimeout(resolve, 2000)); } throw new Error(`Run ${r.run_id} timed out after ${maxAttempts} attempts`); }) ); return results;}// Usageasync function main() { const results = await processBatch([ { url: "https://example.com/page1", goal: "Extract product info" }, { url: "https://example.com/page2", goal: "Extract product info" }, ]); console.log(results);}main();
A run with COMPLETED status means the agent finished, but the result may still describe a failure (e.g., the site showed a captcha or access-denied page). Always validate the result content.
def is_real_success(result): """COMPLETED status is necessary but not sufficient.""" if not result: return False result_str = str(result).lower() failure_signals = ["captcha", "blocked", "access denied", "could not", "unable to"] return not any(signal in result_str for signal in failure_signals)# Usagefrom tinyfish import TinyFish, RunStatusclient = TinyFish()run = client.agent.run( url="https://example.com", goal="Extract pricing data. Return as JSON.",)if run.status == RunStatus.COMPLETED and is_real_success(run.result): print("Success:", run.result)else: print("Needs retry or manual review")
TinyFish has concurrency limits based on your plan. The SDK automatically retries 429 and 5xx errors with exponential backoff (up to maxRetries attempts, default 2).
import { TinyFish, RateLimitError } from "@tiny-fish/sdk";// Adjust retry behavior via client optionsconst client = new TinyFish({ maxRetries: 3, // default is 2});async function main() { try { const run = await client.agent.run({ url: "https://example.com", goal: "Extract data", }); console.log(run.result); } catch (e) { if (e instanceof RateLimitError) { console.log("Rate limited after all retries exhausted"); } throw e; }}main();
Use vault credentials to automate sites that require login. Connect your password manager once via the vault setup, then pass credentials to each run.
# Use all vault credentialscurl -X POST https://agent.tinyfish.ai/v1/automation/run \ -H "X-API-Key: $TINYFISH_API_KEY" \ -H "Content-Type: application/json" \ -d '{ "url": "https://www.linkedin.com", "goal": "Log in and extract the latest 3 posts from my feed. Return as JSON.", "use_vault": true }'# Scope to a specific credentialcurl -X POST https://agent.tinyfish.ai/v1/automation/run \ -H "X-API-Key: $TINYFISH_API_KEY" \ -H "Content-Type: application/json" \ -d '{ "url": "https://www.linkedin.com", "goal": "Log in and extract the latest 3 posts from my feed. Return as JSON.", "use_vault": true, "credential_item_ids": ["cred:conn-abc:Work:item-linkedin"] }'
Use credential_item_ids to scope runs to specific credentials when you have multiple accounts on the same domain. See Vault Credentials for details.
Search for URLs, then fetch full content from the top results:
from tinyfish import TinyFishclient = TinyFish()# Step 1: Search for relevant pagessearch_results = client.search.query("best python web frameworks 2026")# Step 2: Fetch content from top 3 resultsurls = [r.url for r in search_results.results[:3]]fetched = client.fetch.get_contents(urls=urls, format="markdown")for page in fetched.results: print(f"{page.title}: {page.text[:200]}...")