API Rate Limiting Strategies: Scale Social Media Data Collection
Your scraper runs perfectly for 10 minutes. Then it crashes.
Rate limit exceeded. 429 error. Blocked for 15 minutes.
You restart it. Same thing happens. Your data collection is stuck. Your app is broken. Your users are waiting.
I have hit every rate limit imaginable. Instagram blocked me for a day. Twitter API cut me off mid-scrape. TikTok throttled my requests to nothing.
Now I handle millions of API requests per day without issues. I never hit rate limits. I never get blocked. My scrapers scale smoothly.
Let me show you how to handle rate limits professionally so you can scale without breaking things.
Why Rate Limits Exist
APIs have limits to protect their infrastructure. Too many requests crash servers, slow down everyone, and cost money.
Common Rate Limit Patterns
Per second limits: 10 requests per second Per minute limits: 100 requests per minute Per hour limits: 5,000 requests per hour Per day limits: 50,000 requests per day
Hit any of these and you get blocked temporarily.
The Cost of Hitting Limits
When you hit a rate limit:
- Your requests fail (wasted time)
- You get blocked (no data collection)
- Your app crashes (bad user experience)
- You might get permanently banned (game over)
Professional developers never hit rate limits. They stay just under them.
Strategy 1: Exponential Backoff
When you get rate limited, wait before retrying. But wait longer each time.
const axios = require('axios');
async function requestWithBackoff(url, headers, maxRetries = 5) {
let retries = 0;
let delay = 1000; // Start with 1 second
while (retries < maxRetries) {
try {
const response = await axios.get(url, { headers });
return response.data;
} catch (error) {
if (error.response && error.response.status === 429) {
// Rate limited
retries++;
if (retries >= maxRetries) {
throw new Error('Max retries exceeded');
}
console.log(`Rate limited. Waiting ${delay}ms before retry ${retries}/${maxRetries}`);
// Wait before retrying
await new Promise(resolve => setTimeout(resolve, delay));
// Double the delay for next retry (exponential backoff)
delay *= 2;
} else {
// Different error, throw it
throw error;
}
}
}
}
// Usage
try {
const data = await requestWithBackoff(
'https://api.sociavault.com/instagram/profile?handle=example',
{ 'X-API-Key': process.env.SOCIAVAULT_API_KEY }
);
console.log('Data retrieved:', data);
} catch (error) {
console.error('Failed after all retries:', error.message);
}
Why this works: Exponential backoff gives the API time to recover. First retry after 1 second. Second retry after 2 seconds. Third after 4 seconds. And so on.
Strategy 2: Request Queue
Instead of sending all requests at once, queue them and send them at a controlled rate.
class RateLimitedQueue {
constructor(requestsPerSecond = 5) {
this.queue = [];
this.requestsPerSecond = requestsPerSecond;
this.interval = 1000 / requestsPerSecond; // Milliseconds between requests
this.processing = false;
}
add(requestFunction) {
return new Promise((resolve, reject) => {
this.queue.push({
requestFunction,
resolve,
reject
});
if (!this.processing) {
this.process();
}
});
}
async process() {
this.processing = true;
while (this.queue.length > 0) {
const item = this.queue.shift();
try {
const result = await item.requestFunction();
item.resolve(result);
} catch (error) {
item.reject(error);
}
// Wait before next request
if (this.queue.length > 0) {
await new Promise(resolve => setTimeout(resolve, this.interval));
}
}
this.processing = false;
}
getQueueSize() {
return this.queue.length;
}
}
// Usage
const queue = new RateLimitedQueue(5); // 5 requests per second
// Add requests to queue
const profiles = ['user1', 'user2', 'user3', 'user4', 'user5', 'user6'];
const results = await Promise.all(
profiles.map(handle =>
queue.add(async () => {
const response = await axios.get(
`https://api.sociavault.com/instagram/profile?handle=${handle}`,
{ headers: { 'X-API-Key': process.env.SOCIAVAULT_API_KEY } }
);
console.log(`Retrieved profile: ${handle}`);
return response.data;
})
)
);
console.log(`Completed ${results.length} requests without hitting rate limits`);
Why this works: Queue ensures you never exceed your rate limit. Requests are spaced out evenly. No bursts that trigger blocking.
Strategy 3: Token Bucket Algorithm
More sophisticated rate limiting using token bucket pattern.
class TokenBucket {
constructor(capacity, refillRate) {
this.capacity = capacity; // Max tokens
this.tokens = capacity; // Current tokens
this.refillRate = refillRate; // Tokens added per second
this.lastRefill = Date.now();
}
refill() {
const now = Date.now();
const timePassed = (now - this.lastRefill) / 1000; // Convert to seconds
const tokensToAdd = timePassed * this.refillRate;
this.tokens = Math.min(this.capacity, this.tokens + tokensToAdd);
this.lastRefill = now;
}
async consume(tokens = 1) {
this.refill();
if (this.tokens >= tokens) {
this.tokens -= tokens;
return true;
}
// Not enough tokens, wait until we have enough
const tokensNeeded = tokens - this.tokens;
const waitTime = (tokensNeeded / this.refillRate) * 1000; // Convert to ms
console.log(`Waiting ${Math.round(waitTime)}ms for token refill`);
await new Promise(resolve => setTimeout(resolve, waitTime));
this.refill();
this.tokens -= tokens;
return true;
}
getTokens() {
this.refill();
return Math.floor(this.tokens);
}
}
// Usage
const bucket = new TokenBucket(100, 10); // 100 max tokens, refill 10 per second
async function makeRequest(url, headers) {
await bucket.consume(1); // Consume 1 token
const response = await axios.get(url, { headers });
return response.data;
}
// Make multiple requests
for (let i = 0; i < 50; i++) {
await makeRequest(
`https://api.sociavault.com/instagram/profile?handle=user${i}`,
{ 'X-API-Key': process.env.SOCIAVAULT_API_KEY }
);
console.log(`Request ${i + 1} completed. Tokens remaining: ${bucket.getTokens()}`);
}
Why this works: Token bucket allows bursts when you have tokens available, but smooths out requests over time. More flexible than fixed-rate queue.
Strategy 4: Distributed Rate Limiting
When multiple servers make requests, coordinate rate limits across all of them.
const Redis = require('ioredis');
const redis = new Redis();
class DistributedRateLimiter {
constructor(key, limit, windowSeconds) {
this.key = key;
this.limit = limit;
this.windowSeconds = windowSeconds;
}
async checkLimit() {
const now = Date.now();
const windowStart = now - (this.windowSeconds * 1000);
// Remove old requests outside the window
await redis.zremrangebyscore(this.key, 0, windowStart);
// Count requests in current window
const requestCount = await redis.zcard(this.key);
if (requestCount < this.limit) {
// Add this request
await redis.zadd(this.key, now, `${now}-${Math.random()}`);
// Set expiry on the key
await redis.expire(this.key, this.windowSeconds * 2);
return true;
}
return false;
}
async waitForSlot() {
while (true) {
const allowed = await this.checkLimit();
if (allowed) {
return true;
}
// Wait a bit before trying again
await new Promise(resolve => setTimeout(resolve, 100));
}
}
async getCurrentCount() {
const now = Date.now();
const windowStart = now - (this.windowSeconds * 1000);
await redis.zremrangebyscore(this.key, 0, windowStart);
return await redis.zcard(this.key);
}
}
// Usage across multiple servers
const limiter = new DistributedRateLimiter(
'instagram:api:limit',
100, // 100 requests
60 // per 60 seconds
);
async function makeDistributedRequest(url, headers) {
await limiter.waitForSlot();
const count = await limiter.getCurrentCount();
console.log(`Making request (${count}/${limiter.limit} in window)`);
const response = await axios.get(url, { headers });
return response.data;
}
// Multiple servers can use this safely
await makeDistributedRequest(
'https://api.sociavault.com/instagram/profile?handle=example',
{ 'X-API-Key': process.env.SOCIAVAULT_API_KEY }
);
Why this works: Redis coordinates all servers. Total requests from all servers never exceed limit. Critical for scaling horizontally.
Strategy 5: Smart Caching
Reduce API calls by caching responses.
class CachedAPIClient {
constructor(cacheDurationMs = 300000) { // Default 5 minutes
this.cache = new Map();
this.cacheDuration = cacheDurationMs;
}
getCacheKey(url, params) {
return `${url}:${JSON.stringify(params)}`;
}
async get(url, params, headers) {
const cacheKey = this.getCacheKey(url, params);
const cached = this.cache.get(cacheKey);
if (cached && Date.now() - cached.timestamp < this.cacheDuration) {
console.log(`Cache hit for ${cacheKey}`);
return cached.data;
}
console.log(`Cache miss for ${cacheKey}, fetching from API`);
const response = await axios.get(url, { params, headers });
this.cache.set(cacheKey, {
data: response.data,
timestamp: Date.now()
});
// Clean old cache entries
this.cleanCache();
return response.data;
}
cleanCache() {
const now = Date.now();
for (const [key, value] of this.cache.entries()) {
if (now - value.timestamp >= this.cacheDuration) {
this.cache.delete(key);
}
}
}
clearCache() {
this.cache.clear();
}
getCacheSize() {
return this.cache.size;
}
}
// Usage
const client = new CachedAPIClient(300000); // 5 minute cache
async function getProfile(handle) {
return await client.get(
'https://api.sociavault.com/instagram/profile',
{ handle },
{ 'X-API-Key': process.env.SOCIAVAULT_API_KEY }
);
}
// First call hits API
const profile1 = await getProfile('example');
// Second call within 5 minutes hits cache (no API call)
const profile2 = await getProfile('example');
console.log(`Cache size: ${client.getCacheSize()}`);
Why this works: Caching eliminates redundant API calls. Same data requested within cache window returns instantly without using rate limit quota.
Strategy 6: Multiple API Keys
Use multiple API keys and rotate between them.
class APIKeyRotator {
constructor(apiKeys) {
this.apiKeys = apiKeys;
this.currentIndex = 0;
this.keyUsage = new Map();
// Initialize usage counters
apiKeys.forEach(key => {
this.keyUsage.set(key, {
count: 0,
lastReset: Date.now()
});
});
}
getNextKey() {
const key = this.apiKeys[this.currentIndex];
this.currentIndex = (this.currentIndex + 1) % this.apiKeys.length;
const usage = this.keyUsage.get(key);
usage.count++;
return key;
}
async makeRequest(url, params) {
const apiKey = this.getNextKey();
try {
const response = await axios.get(url, {
params,
headers: { 'X-API-Key': apiKey }
});
console.log(`Request successful with key ending in ...${apiKey.slice(-4)}`);
return response.data;
} catch (error) {
if (error.response && error.response.status === 429) {
console.log(`Key ${apiKey.slice(-4)} rate limited, trying next key`);
return await this.makeRequest(url, params); // Try with next key
}
throw error;
}
}
getUsageStats() {
const stats = [];
this.keyUsage.forEach((usage, key) => {
stats.push({
key: `...${key.slice(-4)}`,
requestCount: usage.count
});
});
return stats;
}
}
// Usage
const rotator = new APIKeyRotator([
'key_1_abc123',
'key_2_def456',
'key_3_ghi789'
]);
// Make multiple requests (keys are rotated automatically)
for (let i = 0; i < 20; i++) {
await rotator.makeRequest(
'https://api.sociavault.com/instagram/profile',
{ handle: `user${i}` }
);
}
console.log('Usage stats:', rotator.getUsageStats());
Why this works: Distributes load across multiple API keys. When one hits limit, others still work. Effectively multiplies your rate limit.
Complete Rate Limiting System
Combine all strategies into one robust system:
class RobustAPIClient {
constructor(config) {
this.baseURL = config.baseURL;
this.apiKeys = config.apiKeys || [config.apiKey];
this.maxRetries = config.maxRetries || 3;
this.requestsPerSecond = config.requestsPerSecond || 5;
this.keyRotator = new APIKeyRotator(this.apiKeys);
this.queue = new RateLimitedQueue(this.requestsPerSecond);
this.cache = new CachedAPIClient(config.cacheDuration);
}
async request(endpoint, params = {}) {
// Try cache first
const cacheKey = `${endpoint}:${JSON.stringify(params)}`;
return await this.queue.add(async () => {
return await this.requestWithBackoff(endpoint, params);
});
}
async requestWithBackoff(endpoint, params, retries = 0) {
try {
const apiKey = this.keyRotator.getNextKey();
const url = `${this.baseURL}${endpoint}`;
const response = await axios.get(url, {
params,
headers: { 'X-API-Key': apiKey }
});
return response.data;
} catch (error) {
if (error.response && error.response.status === 429 && retries < this.maxRetries) {
const delay = Math.pow(2, retries) * 1000;
console.log(`Rate limited, waiting ${delay}ms before retry ${retries + 1}/${this.maxRetries}`);
await new Promise(resolve => setTimeout(resolve, delay));
return await this.requestWithBackoff(endpoint, params, retries + 1);
}
throw error;
}
}
}
// Usage
const client = new RobustAPIClient({
baseURL: 'https://api.sociavault.com',
apiKey: process.env.SOCIAVAULT_API_KEY,
requestsPerSecond: 10,
maxRetries: 3,
cacheDuration: 300000
});
// Make requests safely
const profile = await client.request('/instagram/profile', { handle: 'example' });
console.log('Profile retrieved:', profile);
Real-World Results
Before rate limiting strategies:
- Hit rate limit every 2 minutes
- Blocked for 15 minutes each time
- Could only scrape 100 profiles per hour
- Constant errors and crashes
After implementing strategies:
- Never hit rate limits
- Scrape 5,000 profiles per hour
- Zero errors or blocks
- Smooth, reliable operation
Your Rate Limiting Action Plan
- Implement exponential backoff - Handle temporary blocks gracefully
- Add request queue - Control request rate automatically
- Cache aggressively - Reduce API calls for duplicate requests
- Monitor usage - Track how close you are to limits
- Scale with multiple keys - When single key is not enough
Get your SociaVault API key and scale your data collection without hitting limits. We handle rate limiting infrastructure so you do not have to.
Stop getting blocked. Start scaling smoothly.
Found this helpful?
Share it with others who might benefit
Ready to Try SociaVault?
Start extracting social media data with our powerful API