Rate Limiting and Throttling in JavaScript
Rate limiting and throttling control the frequency of operations. This article covers techniques for limiting request rates and implementing efficient rate limiting strategies.
Introduction
Rate limiting and throttling:
- Prevent API abuse
- Protect server resources
- Improve application stability
- Manage resource consumption
- Ensure fair usage
Understanding rate limiting helps you:
- Build robust applications
- Handle high traffic
- Comply with API limits
- Optimize resource usage
Basic Throttling
Simple Throttle
// โ
Good: Basic throttle function
function throttle(fn, delay) {
let lastCall = 0;
return function(...args) {
const now = Date.now();
if (now - lastCall >= delay) {
lastCall = now;
return fn(...args);
}
};
}
// Usage
const throttledScroll = throttle(() => {
console.log('Scroll event');
}, 1000);
window.addEventListener('scroll', throttledScroll);
Throttle with Trailing Call
// โ
Good: Throttle with trailing call
function throttleWithTrailing(fn, delay) {
let lastCall = 0;
let timeoutId = null;
return function(...args) {
const now = Date.now();
if (now - lastCall >= delay) {
lastCall = now;
fn(...args);
clearTimeout(timeoutId);
timeoutId = null;
} else if (!timeoutId) {
timeoutId = setTimeout(() => {
lastCall = Date.now();
fn(...args);
timeoutId = null;
}, delay - (now - lastCall));
}
};
}
// Usage
const throttledResize = throttleWithTrailing(() => {
console.log('Window resized');
}, 500);
window.addEventListener('resize', throttledResize);
Token Bucket Algorithm
Token Bucket Implementation
// โ
Good: Token bucket rate limiter
class TokenBucket {
constructor(capacity, refillRate) {
this.capacity = capacity;
this.tokens = capacity;
this.refillRate = refillRate; // tokens per second
this.lastRefill = Date.now();
}
refill() {
const now = Date.now();
const timePassed = (now - this.lastRefill) / 1000;
const tokensToAdd = timePassed * this.refillRate;
this.tokens = Math.min(this.capacity, this.tokens + tokensToAdd);
this.lastRefill = now;
}
tryConsume(tokens = 1) {
this.refill();
if (this.tokens >= tokens) {
this.tokens -= tokens;
return true;
}
return false;
}
async waitForTokens(tokens = 1) {
while (!this.tryConsume(tokens)) {
await new Promise(resolve => setTimeout(resolve, 100));
}
}
}
// Usage
const bucket = new TokenBucket(10, 2); // 10 tokens, 2 per second
async function makeRequest() {
await bucket.waitForTokens(1);
console.log('Making request');
// Make API call
}
// Make 5 requests
for (let i = 0; i < 5; i++) {
makeRequest();
}
Sliding Window Algorithm
Sliding Window Counter
// โ
Good: Sliding window rate limiter
class SlidingWindowCounter {
constructor(maxRequests, windowSize) {
this.maxRequests = maxRequests;
this.windowSize = windowSize; // milliseconds
this.requests = [];
}
isAllowed() {
const now = Date.now();
// Remove old requests outside window
this.requests = this.requests.filter(
time => now - time < this.windowSize
);
if (this.requests.length < this.maxRequests) {
this.requests.push(now);
return true;
}
return false;
}
getRetryAfter() {
if (this.requests.length === 0) return 0;
const oldestRequest = this.requests[0];
const now = Date.now();
return Math.ceil((oldestRequest + this.windowSize - now) / 1000);
}
}
// Usage
const limiter = new SlidingWindowCounter(5, 60000); // 5 requests per minute
async function apiCall() {
if (limiter.isAllowed()) {
console.log('Request allowed');
// Make API call
} else {
const retryAfter = limiter.getRetryAfter();
console.log(`Rate limited. Retry after ${retryAfter}s`);
}
}
Leaky Bucket Algorithm
Leaky Bucket Implementation
// โ
Good: Leaky bucket rate limiter
class LeakyBucket {
constructor(capacity, leakRate) {
this.capacity = capacity;
this.water = 0;
this.leakRate = leakRate; // items per second
this.lastLeak = Date.now();
}
leak() {
const now = Date.now();
const timePassed = (now - this.lastLeak) / 1000;
const waterToLeak = timePassed * this.leakRate;
this.water = Math.max(0, this.water - waterToLeak);
this.lastLeak = now;
}
tryAdd(amount = 1) {
this.leak();
if (this.water + amount <= this.capacity) {
this.water += amount;
return true;
}
return false;
}
async waitToAdd(amount = 1) {
while (!this.tryAdd(amount)) {
await new Promise(resolve => setTimeout(resolve, 100));
}
}
}
// Usage
const bucket = new LeakyBucket(10, 2); // 10 capacity, 2 items/second
async function processRequest() {
await bucket.waitToAdd(1);
console.log('Processing request');
}
Practical Rate Limiting Patterns
API Rate Limiter
// โ
Good: API rate limiter with retry
class APIRateLimiter {
constructor(maxRequests, windowSize) {
this.maxRequests = maxRequests;
this.windowSize = windowSize;
this.requests = [];
}
async fetch(url, options = {}) {
// Wait for rate limit
while (!this.isAllowed()) {
const retryAfter = this.getRetryAfter();
console.log(`Rate limited. Waiting ${retryAfter}s`);
await new Promise(resolve => setTimeout(resolve, retryAfter * 1000));
}
// Make request
const response = await fetch(url, options);
// Handle rate limit headers
const remaining = response.headers.get('X-RateLimit-Remaining');
const reset = response.headers.get('X-RateLimit-Reset');
if (remaining !== null) {
console.log(`Requests remaining: ${remaining}`);
}
return response;
}
isAllowed() {
const now = Date.now();
this.requests = this.requests.filter(
time => now - time < this.windowSize
);
if (this.requests.length < this.maxRequests) {
this.requests.push(now);
return true;
}
return false;
}
getRetryAfter() {
if (this.requests.length === 0) return 0;
const oldestRequest = this.requests[0];
const now = Date.now();
return Math.ceil((oldestRequest + this.windowSize - now) / 1000);
}
}
// Usage
const limiter = new APIRateLimiter(10, 60000); // 10 requests per minute
async function fetchData() {
const response = await limiter.fetch('/api/data');
const data = await response.json();
return data;
}
Concurrent Request Limiter
// โ
Good: Limit concurrent requests
class ConcurrentLimiter {
constructor(maxConcurrent) {
this.maxConcurrent = maxConcurrent;
this.current = 0;
this.queue = [];
}
async run(fn) {
while (this.current >= this.maxConcurrent) {
await new Promise(resolve => this.queue.push(resolve));
}
this.current++;
try {
return await fn();
} finally {
this.current--;
const resolve = this.queue.shift();
if (resolve) resolve();
}
}
}
// Usage
const limiter = new ConcurrentLimiter(3); // Max 3 concurrent
async function fetchUrl(url) {
return limiter.run(() => fetch(url).then(r => r.json()));
}
// Fetch 10 URLs with max 3 concurrent
const urls = Array.from({ length: 10 }, (_, i) => `/api/item/${i}`);
const results = await Promise.all(urls.map(fetchUrl));
Exponential Backoff with Rate Limiting
// โ
Good: Exponential backoff for rate limiting
async function fetchWithBackoff(url, maxRetries = 3) {
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
const response = await fetch(url);
if (response.status === 429) {
// Rate limited
const retryAfter = response.headers.get('Retry-After');
const delay = retryAfter
? parseInt(retryAfter) * 1000
: Math.pow(2, attempt) * 1000;
console.log(`Rate limited. Retrying in ${delay}ms`);
await new Promise(resolve => setTimeout(resolve, delay));
continue;
}
if (!response.ok) throw new Error(`HTTP ${response.status}`);
return response.json();
} catch (error) {
if (attempt === maxRetries - 1) throw error;
const delay = Math.pow(2, attempt) * 1000;
console.log(`Attempt ${attempt + 1} failed. Retrying in ${delay}ms`);
await new Promise(resolve => setTimeout(resolve, delay));
}
}
}
// Usage
const data = await fetchWithBackoff('/api/data');
Throttling vs Debouncing
Throttle: Execute at Regular Intervals
// โ
Good: Throttle for regular execution
function throttle(fn, delay) {
let lastCall = 0;
return function(...args) {
const now = Date.now();
if (now - lastCall >= delay) {
lastCall = now;
fn(...args);
}
};
}
// Usage: Scroll events
const throttledScroll = throttle(() => {
console.log('Scroll position:', window.scrollY);
}, 1000);
window.addEventListener('scroll', throttledScroll);
Debounce: Execute After Delay
// โ
Good: Debounce for delayed execution
function debounce(fn, delay) {
let timeoutId;
return function(...args) {
clearTimeout(timeoutId);
timeoutId = setTimeout(() => fn(...args), delay);
};
}
// Usage: Search input
const debouncedSearch = debounce((query) => {
console.log('Searching for:', query);
// Make API call
}, 300);
document.getElementById('search').addEventListener('input', (e) => {
debouncedSearch(e.target.value);
});
Best Practices
-
Choose appropriate algorithm:
// โ Good - Token bucket for smooth rate limiting const bucket = new TokenBucket(10, 2); -
Handle rate limit headers:
// โ Good - Respect server rate limit headers const remaining = response.headers.get('X-RateLimit-Remaining'); const reset = response.headers.get('X-RateLimit-Reset'); -
Implement exponential backoff:
// โ Good - Exponential backoff for retries const delay = Math.pow(2, attempt) * 1000; -
Monitor rate limit status:
// โ Good - Track rate limit usage console.log(`Requests remaining: ${remaining}`);
Common Mistakes
-
Not respecting rate limits:
// โ Bad - Ignores rate limits for (let i = 0; i < 100; i++) { fetch('/api/data'); } // โ Good - Respects rate limits const limiter = new TokenBucket(10, 2); -
Using throttle instead of debounce:
// โ Bad - Throttle for search const throttledSearch = throttle(search, 300); // โ Good - Debounce for search const debouncedSearch = debounce(search, 300); -
Not handling 429 responses:
// โ Bad - Ignores 429 status const response = await fetch(url); // โ Good - Handles 429 status if (response.status === 429) { // Handle rate limit }
Summary
Rate limiting and throttling are essential for robust applications. Key takeaways:
- Token bucket for smooth rate limiting
- Sliding window for request counting
- Leaky bucket for queue management
- Throttle for regular execution
- Debounce for delayed execution
- Respect server rate limit headers
- Implement exponential backoff
- Monitor rate limit status
Related Resources
- Rate Limiting - Wikipedia
- Token Bucket - Wikipedia
- Throttle and Debounce - CSS-Tricks
- HTTP 429 - MDN
- Retry-After - MDN
Next Steps
- Learn about Debouncing and Memoization
- Explore Concurrency Patterns in JavaScript
- Study Stream Processing Basics
- Implement rate limiting in your applications
- Monitor API rate limits
Comments