Batch processing
Process multiple files efficiently.
Overview
Learn how to analyze multiple recordings in parallel while respecting rate limits.
Node.js example
import { Mappa } from "@mappa-ai/mappa-node";
const mappa = new Mappa({
apiKey: process.env.MAPPA_API_KEY!,
});
const urls = [
"https://example.com/recording1.mp3",
"https://example.com/recording2.mp3",
"https://example.com/recording3.mp3",
];
// Process in parallel with concurrency limit
async function processBatch(urls: string[], concurrency = 3) {
const results = [];
for (let i = 0; i < urls.length; i += concurrency) {
const batch = urls.slice(i, i + concurrency);
const promises = batch.map((url) =>
mappa.reports.generateFromUrl({
url,
output: { template: "general_report" },
})
);
const batchResults = await Promise.all(promises);
results.push(...batchResults);
}
return results;
}
const reports = await processBatch(urls);
Python example
import asyncio
from mappa import AsyncMappa
mappa = AsyncMappa(api_key=os.environ["MAPPA_API_KEY"])
urls = [
"https://example.com/recording1.mp3",
"https://example.com/recording2.mp3",
"https://example.com/recording3.mp3",
]
async def process_batch(urls, concurrency=3):
semaphore = asyncio.Semaphore(concurrency)
async def process_one(url):
async with semaphore:
return await mappa.reports.generate_from_url(
url=url,
output={"template": "general_report"},
)
return await asyncio.gather(*[process_one(url) for url in urls])
reports = asyncio.run(process_batch(urls))