Back to Blog
Rate LimitingAPISecurityPerformance

Rate Limiting Algorithms Explained

Implement effective rate limiting. From token bucket to sliding window to distributed rate limiting.

B
Bootspring Team
Engineering
March 5, 2023
7 min read

Rate limiting protects APIs from abuse and ensures fair resource usage. Different algorithms offer different trade-offs between accuracy, memory, and performance.

Rate Limiting Algorithms#

Token Bucket: - Tokens added at fixed rate - Requests consume tokens - Allows bursts up to bucket size Sliding Window Log: - Stores timestamp of each request - Counts requests in sliding window - Most accurate, highest memory Sliding Window Counter: - Combines fixed and sliding windows - Good accuracy with less memory - Best overall choice Fixed Window Counter: - Counts requests per time window - Simple but allows boundary bursts - Simplest implementation

Token Bucket Implementation#

1import Redis from 'ioredis'; 2 3class TokenBucket { 4 constructor( 5 private redis: Redis, 6 private maxTokens: number, 7 private refillRate: number, // tokens per second 8 private prefix = 'ratelimit' 9 ) {} 10 11 async consume(key: string, tokens = 1): Promise<RateLimitResult> { 12 const now = Date.now(); 13 const redisKey = `${this.prefix}:${key}`; 14 15 // Lua script for atomic operation 16 const script = ` 17 local key = KEYS[1] 18 local max_tokens = tonumber(ARGV[1]) 19 local refill_rate = tonumber(ARGV[2]) 20 local now = tonumber(ARGV[3]) 21 local requested = tonumber(ARGV[4]) 22 23 local data = redis.call('HMGET', key, 'tokens', 'last_refill') 24 local tokens = tonumber(data[1]) or max_tokens 25 local last_refill = tonumber(data[2]) or now 26 27 -- Calculate tokens to add 28 local elapsed = (now - last_refill) / 1000 29 local refill = math.floor(elapsed * refill_rate) 30 tokens = math.min(max_tokens, tokens + refill) 31 32 local allowed = tokens >= requested 33 if allowed then 34 tokens = tokens - requested 35 end 36 37 redis.call('HMSET', key, 'tokens', tokens, 'last_refill', now) 38 redis.call('EXPIRE', key, math.ceil(max_tokens / refill_rate) + 1) 39 40 return {allowed and 1 or 0, tokens, max_tokens} 41 `; 42 43 const result = await this.redis.eval( 44 script, 45 1, 46 redisKey, 47 this.maxTokens, 48 this.refillRate, 49 now, 50 tokens 51 ) as [number, number, number]; 52 53 return { 54 allowed: result[0] === 1, 55 remaining: result[1], 56 limit: result[2], 57 resetAt: now + Math.ceil((this.maxTokens - result[1]) / this.refillRate) * 1000, 58 }; 59 } 60} 61 62// Usage 63const bucket = new TokenBucket(redis, 100, 10); // 100 tokens, refill 10/sec 64 65app.use(async (req, res, next) => { 66 const result = await bucket.consume(req.ip); 67 68 res.setHeader('X-RateLimit-Limit', result.limit); 69 res.setHeader('X-RateLimit-Remaining', result.remaining); 70 res.setHeader('X-RateLimit-Reset', result.resetAt); 71 72 if (!result.allowed) { 73 return res.status(429).json({ error: 'Rate limit exceeded' }); 74 } 75 76 next(); 77});

Sliding Window Counter#

1class SlidingWindowCounter { 2 constructor( 3 private redis: Redis, 4 private limit: number, 5 private windowMs: number, 6 private prefix = 'ratelimit' 7 ) {} 8 9 async check(key: string): Promise<RateLimitResult> { 10 const now = Date.now(); 11 const windowStart = now - this.windowMs; 12 const currentWindow = Math.floor(now / this.windowMs); 13 const previousWindow = currentWindow - 1; 14 15 const redisKey = `${this.prefix}:${key}`; 16 17 const script = ` 18 local key = KEYS[1] 19 local current_window = ARGV[1] 20 local previous_window = ARGV[2] 21 local now = tonumber(ARGV[3]) 22 local window_ms = tonumber(ARGV[4]) 23 local limit = tonumber(ARGV[5]) 24 25 -- Get counts 26 local current_count = tonumber(redis.call('HGET', key, current_window)) or 0 27 local previous_count = tonumber(redis.call('HGET', key, previous_window)) or 0 28 29 -- Calculate weighted count 30 local window_start = math.floor(now / window_ms) * window_ms 31 local elapsed = now - window_start 32 local weight = (window_ms - elapsed) / window_ms 33 local weighted_count = current_count + math.floor(previous_count * weight) 34 35 local allowed = weighted_count < limit 36 37 if allowed then 38 redis.call('HINCRBY', key, current_window, 1) 39 redis.call('EXPIRE', key, math.ceil(window_ms / 1000) * 2) 40 weighted_count = weighted_count + 1 41 end 42 43 return {allowed and 1 or 0, limit - weighted_count, limit} 44 `; 45 46 const result = await this.redis.eval( 47 script, 48 1, 49 redisKey, 50 currentWindow.toString(), 51 previousWindow.toString(), 52 now, 53 this.windowMs, 54 this.limit 55 ) as [number, number, number]; 56 57 return { 58 allowed: result[0] === 1, 59 remaining: Math.max(0, result[1]), 60 limit: result[2], 61 resetAt: (currentWindow + 1) * this.windowMs, 62 }; 63 } 64} 65 66// Usage: 100 requests per minute 67const limiter = new SlidingWindowCounter(redis, 100, 60000);

Fixed Window Counter#

1class FixedWindowCounter { 2 constructor( 3 private redis: Redis, 4 private limit: number, 5 private windowMs: number, 6 private prefix = 'ratelimit' 7 ) {} 8 9 async check(key: string): Promise<RateLimitResult> { 10 const window = Math.floor(Date.now() / this.windowMs); 11 const redisKey = `${this.prefix}:${key}:${window}`; 12 13 const current = await this.redis.incr(redisKey); 14 15 if (current === 1) { 16 await this.redis.expire(redisKey, Math.ceil(this.windowMs / 1000)); 17 } 18 19 const allowed = current <= this.limit; 20 21 return { 22 allowed, 23 remaining: Math.max(0, this.limit - current), 24 limit: this.limit, 25 resetAt: (window + 1) * this.windowMs, 26 }; 27 } 28}

Tiered Rate Limiting#

1interface RateLimitTier { 2 name: string; 3 requestsPerMinute: number; 4 requestsPerHour: number; 5 requestsPerDay: number; 6} 7 8const tiers: Record<string, RateLimitTier> = { 9 free: { name: 'free', requestsPerMinute: 10, requestsPerHour: 100, requestsPerDay: 1000 }, 10 basic: { name: 'basic', requestsPerMinute: 60, requestsPerHour: 1000, requestsPerDay: 10000 }, 11 pro: { name: 'pro', requestsPerMinute: 300, requestsPerHour: 5000, requestsPerDay: 50000 }, 12}; 13 14class TieredRateLimiter { 15 private limiters: Map<string, SlidingWindowCounter[]> = new Map(); 16 17 constructor(private redis: Redis) {} 18 19 private getLimiters(tier: RateLimitTier): SlidingWindowCounter[] { 20 if (!this.limiters.has(tier.name)) { 21 this.limiters.set(tier.name, [ 22 new SlidingWindowCounter(this.redis, tier.requestsPerMinute, 60000, `rl:${tier.name}:min`), 23 new SlidingWindowCounter(this.redis, tier.requestsPerHour, 3600000, `rl:${tier.name}:hour`), 24 new SlidingWindowCounter(this.redis, tier.requestsPerDay, 86400000, `rl:${tier.name}:day`), 25 ]); 26 } 27 return this.limiters.get(tier.name)!; 28 } 29 30 async check(key: string, tierName: string): Promise<RateLimitResult> { 31 const tier = tiers[tierName] || tiers.free; 32 const limiters = this.getLimiters(tier); 33 34 // Check all limits 35 const results = await Promise.all( 36 limiters.map((limiter) => limiter.check(key)) 37 ); 38 39 // Find most restrictive 40 const blocked = results.find((r) => !r.allowed); 41 42 if (blocked) { 43 return blocked; 44 } 45 46 // Return the limit closest to being exceeded 47 return results.reduce((min, r) => 48 r.remaining / r.limit < min.remaining / min.limit ? r : min 49 ); 50 } 51}

Distributed Rate Limiting#

1// Cluster-aware rate limiting with Redis 2class DistributedRateLimiter { 3 constructor( 4 private redis: Redis, 5 private syncInterval = 1000 6 ) {} 7 8 async checkWithSync( 9 key: string, 10 limit: number, 11 windowMs: number 12 ): Promise<RateLimitResult> { 13 const now = Date.now(); 14 const window = Math.floor(now / windowMs); 15 const localKey = `${key}:${window}`; 16 17 // Local counter for this instance 18 const localCount = this.incrementLocal(localKey); 19 20 // Sync to Redis periodically 21 if (localCount % 10 === 0 || this.shouldSync(localKey)) { 22 await this.syncToRedis(key, window, localCount); 23 } 24 25 // Get global count 26 const globalCount = await this.getGlobalCount(key, window); 27 28 return { 29 allowed: globalCount < limit, 30 remaining: Math.max(0, limit - globalCount), 31 limit, 32 resetAt: (window + 1) * windowMs, 33 }; 34 } 35 36 private localCounts = new Map<string, number>(); 37 private lastSync = new Map<string, number>(); 38 39 private incrementLocal(key: string): number { 40 const count = (this.localCounts.get(key) || 0) + 1; 41 this.localCounts.set(key, count); 42 return count; 43 } 44 45 private shouldSync(key: string): boolean { 46 const last = this.lastSync.get(key) || 0; 47 return Date.now() - last > this.syncInterval; 48 } 49 50 private async syncToRedis(key: string, window: number, count: number): Promise<void> { 51 const redisKey = `ratelimit:${key}:${window}`; 52 await this.redis.incrby(redisKey, count); 53 this.localCounts.delete(`${key}:${window}`); 54 this.lastSync.set(`${key}:${window}`, Date.now()); 55 } 56 57 private async getGlobalCount(key: string, window: number): Promise<number> { 58 const redisKey = `ratelimit:${key}:${window}`; 59 const count = await this.redis.get(redisKey); 60 return parseInt(count || '0', 10); 61 } 62}

Middleware Integration#

1import rateLimit from 'express-rate-limit'; 2import RedisStore from 'rate-limit-redis'; 3 4// Express middleware with Redis store 5const apiLimiter = rateLimit({ 6 store: new RedisStore({ 7 client: redis, 8 prefix: 'rl:api:', 9 }), 10 windowMs: 60 * 1000, 11 max: 100, 12 standardHeaders: true, 13 legacyHeaders: false, 14 keyGenerator: (req) => req.user?.id || req.ip, 15 handler: (req, res) => { 16 res.status(429).json({ 17 error: 'Too many requests', 18 retryAfter: Math.ceil(req.rateLimit.resetTime / 1000), 19 }); 20 }, 21 skip: (req) => req.path === '/health', 22}); 23 24// Different limits for different endpoints 25const authLimiter = rateLimit({ 26 windowMs: 15 * 60 * 1000, // 15 minutes 27 max: 5, 28 skipSuccessfulRequests: true, 29}); 30 31app.use('/api/', apiLimiter); 32app.use('/api/auth/login', authLimiter);

Best Practices#

Algorithm Selection: ✓ Token bucket for bursty traffic ✓ Sliding window for accuracy ✓ Fixed window for simplicity Implementation: ✓ Use Redis for distributed systems ✓ Return rate limit headers ✓ Provide clear error messages ✓ Allow for bursts when appropriate Operations: ✓ Monitor rate limit hits ✓ Alert on sustained limit hits ✓ Provide upgrade paths ✓ Document limits clearly

Conclusion#

Choose the right rate limiting algorithm based on your needs. Token bucket allows bursts, sliding window is most accurate, and fixed window is simplest. Use Redis for distributed systems and always return informative headers to clients.

Share this article

Help spread the word about Bootspring