Community-Scripts.org : installer n’importe quel service sur Proxmox en une commande
La cinquième fois que j'ai tapé les mêmes commandes pour créer un LXC Debian, installer…
Le rate limiting et le throttling sont des mécanismes essentiels pour protéger vos APIs contre les abus, garantir une distribution équitable des ressources et maintenir la qualité de service. En 2025, avec 48% des APIs subissant des attaques DDoS ou de scraping, implémenter ces protections n’est plus optionnel. Ce guide complet couvre les stratégies de rate limiting, leur implémentation pratique et les considérations de performance.
| Aspect | Rate Limiting | Throttling |
|---|---|---|
| Objectif | Limiter le nombre de requêtes | Réduire la vitesse de traitement |
| Action | Rejette les requêtes excédentaires (429) | Ralentit le traitement |
| Utilisation | Protection contre les abus | Gestion de la charge |
| Réponse | Erreur HTTP 429 | Délai dans la réponse |
1. Fixed Window (Fenêtre fixe)
Fenêtre de 1 minute: 100 requêtes max
00:00 → 00:59: ████████████ (100 requêtes)
01:00 → 01:59: ████ (reset à 0)
2. Sliding Window (Fenêtre glissante)
Fenêtre mobile de 1 minute
00:30: compte les requêtes de 23:30 à 00:30
00:45: compte les requêtes de 23:45 à 00:45
3. Token Bucket (Seau à jetons)
Capacité: 100 jetons
Remplissage: 10 jetons/seconde
Requête = consomme 1 jeton
Si jetons = 0 → rejet
4. Leaky Bucket (Seau percé)
File d'attente de requêtes
Traitement à débit constant
Si file pleine → rejet
// Installation: npm install express-rate-limit
const express = require('express');
const rateLimit = require('express-rate-limit');
const app = express();
// Rate limiter global
const globalLimiter = rateLimit({
windowMs: 15 60 1000, // 15 minutes
max: 100, // 100 requêtes par fenêtre
message: {
error: 'Too Many Requests',
message: 'You have exceeded the 100 requests in 15 minutes limit!',
retryAfter: '15 minutes'
},
standardHeaders: true, // Headers RateLimit-
legacyHeaders: false, // Désactiver X-RateLimit-
handler: (req, res) => {
res.status(429).json({
error: 'Too Many Requests',
message: 'Rate limit exceeded',
retryAfter: req.rateLimit.resetTime
});
},
skip: (req) => {
// Ne pas limiter les requêtes d'admin
return req.user && req.user.role === 'admin';
}
});
app.use('/api/', globalLimiter);
// Rate limiter strict pour les endpoints sensibles
const strictLimiter = rateLimit({
windowMs: 15 60 1000,
max: 5,
skipSuccessfulRequests: true, // Ne compter que les échecs
message: 'Too many failed attempts, please try again later'
});
app.use('/auth/login', strictLimiter);
app.use('/auth/register', strictLimiter);
// Rate limiter par utilisateur
const createUserLimiter = (maxRequests = 100) => {
return rateLimit({
windowMs: 60 60 1000, // 1 heure
max: maxRequests,
keyGenerator: (req) => {
// Utiliser l'ID utilisateur comme clé
return req.user ? req.user.id.toString() : req.ip;
},
handler: (req, res) => {
res.status(429).json({
error: 'Too Many Requests',
message: User rate limit exceeded (${maxRequests} requests/hour)
});
}
});
};
app.use('/api/v1/posts', createUserLimiter(50));
// Installation: npm install rate-limit-redis ioredis
const rateLimit = require('express-rate-limit');
const RedisStore = require('rate-limit-redis');
const Redis = require('ioredis');
// Configuration Redis
const redis = new Redis({
host: process.env.REDISHOST || 'localhost',
port: process.env.REDISPORT || 6379,
password: process.env.REDISPASSWORD,
db: process.env.REDISDB || 0,
enableOfflineQueue: false,
maxRetriesPerRequest: 3
});
redis.on('error', (err) => {
console.error('Redis error:', err);
});
// Rate limiter avec Redis
const redisLimiter = rateLimit({
store: new RedisStore({
client: redis,
prefix: 'rl:', // Préfixe pour les clés Redis
}),
windowMs: 15 60 1000,
max: 100,
standardHeaders: true,
legacyHeaders: false
});
app.use('/api/', redisLimiter);
// middleware/advancedRateLimit.js
class AdvancedRateLimiter {
constructor(redis) {
this.redis = redis;
}
/
Sliding Window Rate Limiter
/
slidingWindow(options = {}) {
const {
windowMs = 60000,
max = 60,
keyPrefix = 'sw:'
} = options;
return async (req, res, next) => {
try {
const key = ${keyPrefix}${this.getKey(req)};
const now = Date.now();
const windowStart = now - windowMs;
// Pipeline Redis pour performance
const pipeline = this.redis.pipeline();
// Supprimer les entrées expirées
pipeline.zremrangebyscore(key, 0, windowStart);
// Ajouter la requête actuelle
pipeline.zadd(key, now, ${now}-${Math.random()});
// Compter les requêtes dans la fenêtre
pipeline.zcard(key);
// Définir l'expiration de la clé
pipeline.expire(key, Math.ceil(windowMs / 1000));
const results = await pipeline.exec();
// Le résultat de zcard est à l'index 2
const requestCount = results[2][1];
// Headers de rate limit
res.setHeader('X-RateLimit-Limit', max);
res.setHeader('X-RateLimit-Remaining', Math.max(0, max - requestCount));
res.setHeader('X-RateLimit-Reset', new Date(now + windowMs).toISOString());
if (requestCount > max) {
return res.status(429).json({
error: 'Too Many Requests',
message: 'Rate limit exceeded',
retryAfter: Math.ceil(windowMs / 1000)
});
}
next();
} catch (error) {
console.error('Rate limit error:', error);
// En cas d'erreur Redis, laisser passer (fail-open)
next();
}
};
}
/
Token Bucket Rate Limiter
/
tokenBucket(options = {}) {
const {
capacity = 100,
refillRate = 10, // tokens par seconde
cost = 1,
keyPrefix = 'tb:'
} = options;
return async (req, res, next) => {
try {
const key = ${keyPrefix}${this.getKey(req)};
const now = Date.now() / 1000; // Secondes
// Récupérer l'état actuel
const data = await this.redis.hgetall(key);
let tokens = parseFloat(data.tokens) || capacity;
let lastRefill = parseFloat(data.lastRefill) || now;
// Calculer le refill
const timePassed = now - lastRefill;
const tokensToAdd = timePassed refillRate;
tokens = Math.min(capacity, tokens + tokensToAdd);
// Headers
res.setHeader('X-RateLimit-Limit', capacity);
res.setHeader('X-RateLimit-Remaining', Math.floor(tokens));
if (tokens < cost) {
const waitTime = (cost - tokens) / refillRate;
res.setHeader('Retry-After', Math.ceil(waitTime));
return res.status(429).json({
error: 'Too Many Requests',
message: 'Token bucket depleted',
retryAfter: Math.ceil(waitTime)
});
}
// Consommer les tokens
tokens -= cost;
// Sauvegarder le nouvel état
await this.redis.hset(key, {
tokens: tokens.toString(),
lastRefill: now.toString()
});
await this.redis.expire(key, 3600); // Expiration 1h
next();
} catch (error) {
console.error('Token bucket error:', error);
next();
}
};
}
/
Leaky Bucket Rate Limiter
/
leakyBucket(options = {}) {
const {
capacity = 100,
leakRate = 10, // requêtes par seconde
keyPrefix = 'lb:'
} = options;
return async (req, res, next) => {
try {
const key = ${keyPrefix}${this.getKey(req)};
const now = Date.now() / 1000;
const data = await this.redis.hgetall(key);
let level = parseFloat(data.level) || 0;
let lastLeak = parseFloat(data.lastLeak) || now;
// Calculer la fuite
const timePassed = now - lastLeak;
const leaked = timePassed leakRate;
level = Math.max(0, level - leaked);
// Vérifier la capacité
if (level >= capacity) {
res.setHeader('Retry-After', Math.ceil((level - capacity + 1) / leakRate));
return res.status(429).json({
error: 'Too Many Requests',
message: 'Leaky bucket full',
retryAfter: Math.ceil((level - capacity + 1) / leakRate)
});
}
// Ajouter la requête
level += 1;
// Sauvegarder
await this.redis.hset(key, {
level: level.toString(),
lastLeak: now.toString()
});
await this.redis.expire(key, 3600);
res.setHeader('X-RateLimit-Remaining', Math.floor(capacity - level));
next();
} catch (error) {
console.error('Leaky bucket error:', error);
next();
}
};
}
/
Générer la clé de rate limiting
/
getKey(req) {
// Par utilisateur si authentifié
if (req.user) {
return user:${req.user.id};
}
// Par API key
if (req.apiKey) {
return apikey:${req.apiKey.id};
}
// Par IP
return ip:${req.ip};
}
}
module.exports = AdvancedRateLimiter;
// Utilisation
const limiter = new AdvancedRateLimiter(redis);
// Sliding window
app.use('/api/v1/search', limiter.slidingWindow({
windowMs: 60000,
max: 30
}));
// Token bucket pour l'upload
app.post('/api/v1/upload', limiter.tokenBucket({
capacity: 10,
refillRate: 1,
cost: 5 // Upload coûte 5 tokens
}));
// Leaky bucket pour les notifications
app.post('/api/v1/notifications', limiter.leakyBucket({
capacity: 100,
leakRate: 10
}));
// models/UserTier.js
const USERTIERS = {
free: {
name: 'Free',
limits: {
requestsperhour: 100,
requestsperday: 1000,
burst: 20
}
},
basic: {
name: 'Basic',
limits: {
requestsperhour: 1000,
requestsperday: 10000,
burst: 50
}
},
premium: {
name: 'Premium',
limits: {
requestsperhour: 10000,
requestsperday: 100000,
burst: 200
}
},
enterprise: {
name: 'Enterprise',
limits: {
requestsperhour: Infinity,
requestsperday: Infinity,
burst: 1000
}
}
};
// middleware/tieredRateLimit.js
const rateLimit = require('express-rate-limit');
const RedisStore = require('rate-limit-redis');
const createTieredLimiter = (redis) => {
return async (req, res, next) => {
// Déterminer le tier de l'utilisateur
let tier = 'free';
if (req.user) {
tier = req.user.subscriptiontier || 'free';
} else if (req.apiKey) {
tier = req.apiKey.tier || 'free';
}
const limits = USERTIERS[tier].limits;
// Rate limiter par heure
const hourlyLimiter = rateLimit({
store: new RedisStore({
client: redis,
prefix: rl:hourly:${tier}:
}),
windowMs: 60 60 1000,
max: limits.requestsperhour,
keyGenerator: (req) => req.user ? req.user.id.toString() : req.ip,
handler: (req, res) => {
res.status(429).json({
error: 'Too Many Requests',
message: Hourly rate limit exceeded for ${tier} tier,
limit: limits.requestsperhour,
tier: tier
});
}
});
// Rate limiter par jour
const dailyLimiter = rateLimit({
store: new RedisStore({
client: redis,
prefix: rl:daily:${tier}:
}),
windowMs: 24 60 60 1000,
max: limits.requestsperday,
keyGenerator: (req) => req.user ? req.user.id.toString() : req.ip
});
// Burst limiter
const burstLimiter = rateLimit({
store: new RedisStore({
client: redis,
prefix: rl:burst:${tier}:
}),
windowMs: 1000, // 1 seconde
max: limits.burst,
keyGenerator: (req) => req.user ? req.user.id.toString() : req.ip
});
// Ajouter les informations de tier aux headers
res.setHeader('X-RateLimit-Tier', tier);
// Appliquer les limiters en série
burstLimiter(req, res, (err) => {
if (err) return next(err);
hourlyLimiter(req, res, (err) => {
if (err) return next(err);
dailyLimiter(req, res, next);
});
});
};
};
module.exports = createTieredLimiter;
// Utilisation
app.use('/api/', createTieredLimiter(redis));
# Installation: pip install fastapi slowapi redis
from fastapi import FastAPI, Request, HTTPException
from slowapi import Limiter, ratelimitexceededhandler
from slowapi.util import getremoteaddress
from slowapi.errors import RateLimitExceeded
from slowapi.middleware import SlowAPIMiddleware
import redis
from datetime import datetime, timedelta
from typing import Optional
import time
app = FastAPI()
# Configuration Redis
redisclient = redis.Redis(
host='localhost',
port=6379,
db=0,
decoderesponses=True
)
# Limiter basique (en mémoire)
limiter = Limiter(keyfunc=getremoteaddress)
app.state.limiter = limiter
app.addexceptionhandler(RateLimitExceeded, ratelimitexceededhandler)
app.addmiddleware(SlowAPIMiddleware)
# Rate limiter global
@app.get("/")
@limiter.limit("100/minute")
async def index(request: Request):
return {"message": "Hello World"}
# Rate limiter par endpoint
@app.get("/search")
@limiter.limit("10/minute")
async def search(request: Request, q: str):
return {"query": q}
# Rate limiter multiple
@app.post("/upload")
@limiter.limit("5/minute")
@limiter.limit("100/hour")
async def upload(request: Request):
return {"message": "File uploaded"}
# Rate Limiter avancé avec Redis
class RedisRateLimiter:
def init(self, redisclient):
self.redis = redisclient
def slidingwindow(
self,
key: str,
maxrequests: int,
windowseconds: int
) -> tuple[bool, dict]:
"""
Sliding window rate limiter
Returns: (allowed, info)
"""
now = time.time()
windowstart = now - windowseconds
pipe = self.redis.pipeline()
# Supprimer les anciennes entrées
pipe.zremrangebyscore(key, 0, windowstart)
# Ajouter la requête actuelle
pipe.zadd(key, {f"{now}": now})
# Compter les requêtes
pipe.zcard(key)
# Expiration
pipe.expire(key, windowseconds)
results = pipe.execute()
requestcount = results[2]
return requestcount <= maxrequests, {
'limit': maxrequests,
'remaining': max(0, maxrequests - requestcount),
'reset': int(now + windowseconds)
}
def tokenbucket(
self,
key: str,
capacity: int,
refillrate: float,
cost: int = 1
) -> tuple[bool, dict]:
"""
Token bucket rate limiter
Returns: (allowed, info)
"""
now = time.time()
# Récupérer l'état actuel
data = self.redis.hgetall(key)
tokens = float(data.get('tokens', capacity))
lastrefill = float(data.get('lastrefill', now))
# Refill
timepassed = now - lastrefill
tokenstoadd = timepassed refillrate
tokens = min(capacity, tokens + tokenstoadd)
allowed = tokens >= cost
if allowed:
tokens -= cost
# Sauvegarder
self.redis.hset(key, mapping={
'tokens': str(tokens),
'lastrefill': str(now)
})
self.redis.expire(key, 3600)
return allowed, {
'limit': capacity,
'remaining': int(tokens),
'retryafter': int((cost - tokens) / refillrate) if not allowed else 0
}
# Dependency pour rate limiting
async def checkratelimit(request: Request):
rl = RedisRateLimiter(redisclient)
# Clé basée sur l'utilisateur ou l'IP
userid = getattr(request.state, 'userid', None)
key = f"rl:{userid or request.client.host}"
allowed, info = rl.slidingwindow(key, maxrequests=100, windowseconds=60)
# Ajouter les headers
request.state.ratelimitinfo = info
if not allowed:
raise HTTPException(
statuscode=429,
detail={
"error": "Too Many Requests",
"limit": info['limit'],
"remaining": info['remaining'],
"reset": info['reset']
}
)
# Middleware pour ajouter les headers
@app.middleware("http")
async def addratelimitheaders(request: Request, callnext):
response = await callnext(request)
if hasattr(request.state, 'ratelimitinfo'):
info = request.state.ratelimitinfo
response.headers['X-RateLimit-Limit'] = str(info['limit'])
response.headers['X-RateLimit-Remaining'] = str(info['remaining'])
response.headers['X-RateLimit-Reset'] = str(info['reset'])
return response
# Route avec rate limiting personnalisé
from fastapi import Depends
@app.get("/api/data", dependencies=[Depends(checkratelimit)])
async def getdata(request: Request):
return {"data": "Protected endpoint"}
# Rate limiting par tier
USERTIERS = {
'free': {'requestsperhour': 100},
'basic': {'requestsperhour': 1000},
'premium': {'requestsperhour': 10000}
}
async def tieredratelimit(request: Request):
usertier = getattr(request.state, 'usertier', 'free')
limit = USERTIERS[usertier]['requestsperhour']
rl = RedisRateLimiter(redisclient)
userid = getattr(request.state, 'userid', request.client.host)
key = f"rl:hourly:{usertier}:{userid}"
allowed, info = rl.slidingwindow(key, maxrequests=limit, windowseconds=3600)
if not allowed:
raise HTTPException(
statuscode=429,
detail={
"error": "Rate limit exceeded",
"tier": usertier,
"limit": limit
}
)
@app.get("/api/premium", dependencies=[Depends(tieredratelimit)])
async def premiumendpoint():
return {"message": "Premium data"}
// utils/requestQueue.js
const Queue = require('bull');
class RequestQueue {
constructor(redis) {
this.queue = new Queue('api-requests', {
redis: {
host: redis.options.host,
port: redis.options.port
}
});
// Configuration du worker
this.queue.process(10, async (job) => {
return await this.processRequest(job.data);
});
}
async addRequest(requestData, priority = 1) {
const job = await this.queue.add(requestData, {
priority: priority,
attempts: 3,
backoff: {
type: 'exponential',
delay: 2000
},
removeOnComplete: true,
removeOnFail: false
});
return job.id;
}
async processRequest(data) {
// Traiter la requête
console.log('Processing request:', data);
// Simuler un traitement
await new Promise(resolve => setTimeout(resolve, 1000));
return { success: true, data };
}
async getJobStatus(jobId) {
const job = await this.queue.getJob(jobId);
if (!job) {
return { status: 'notfound' };
}
const state = await job.getState();
return {
status: state,
progress: job.progress(),
result: job.returnvalue
};
}
}
module.exports = RequestQueue;
// Utilisation
const requestQueue = new RequestQueue(redis);
app.post('/api/heavy-operation', async (req, res) => {
const jobId = await requestQueue.addRequest({
userId: req.user.id,
operation: req.body.operation,
data: req.body.data
});
res.status(202).json({
message: 'Request queued',
jobid: jobId,
statusurl: /api/jobs/${jobId}
});
});
app.get('/api/jobs/:jobId', async (req, res) => {
const status = await requestQueue.getJobStatus(req.params.jobId);
res.json(status);
});
// middleware/adaptiveThrottle.js
class AdaptiveThrottle {
constructor(redis) {
this.redis = redis;
this.metricsKey = 'metrics:server';
}
async getServerLoad() {
const metrics = await this.redis.hgetall(this.metricsKey);
return {
cpu: parseFloat(metrics.cpu || 0),
memory: parseFloat(metrics.memory || 0),
activeRequests: parseInt(metrics.activeRequests || 0)
};
}
async updateMetrics(metrics) {
await this.redis.hset(this.metricsKey, metrics);
await this.redis.expire(this.metricsKey, 60);
}
calculateDelay(load) {
// Augmenter le délai basé sur la charge
if (load.cpu > 80 || load.memory > 80) {
return 2000; // 2 secondes
} else if (load.cpu > 60 || load.memory > 60) {
return 1000; // 1 seconde
} else if (load.cpu > 40 || load.memory > 40) {
return 500; // 500ms
}
return 0;
}
middleware() {
return async (req, res, next) => {
const load = await this.getServerLoad();
const delay = this.calculateDelay(load);
if (delay > 0) {
res.setHeader('Retry-After', Math.ceil(delay / 1000));
// Option 1: Rejeter la requête
if (load.cpu > 90 || load.memory > 90) {
return res.status(503).json({
error: 'Service Unavailable',
message: 'Server is under heavy load',
retryAfter: Math.ceil(delay / 1000)
});
}
// Option 2: Throttle (ajouter un délai)
await new Promise(resolve => setTimeout(resolve, delay));
}
// Incrémenter le compteur de requêtes actives
await this.redis.hincrby(this.metricsKey, 'activeRequests', 1);
// Décrémenter à la fin
res.on('finish', async () => {
await this.redis.hincrby(this.metricsKey, 'activeRequests', -1);
});
next();
};
}
}
module.exports = AdaptiveThrottle;
// Utilisation
const throttle = new AdaptiveThrottle(redis);
app.use('/api/', throttle.middleware());
// Worker pour mettre à jour les métriques
const os = require('os');
setInterval(async () => {
const cpuUsage = os.loadavg()[0] / os.cpus().length 100;
const totalMem = os.totalmem();
const freeMem = os.freemem();
const memUsage = ((totalMem - freeMem) / totalMem) 100;
await throttle.updateMetrics({
cpu: cpuUsage.toFixed(2),
memory: memUsage.toFixed(2)
});
}, 5000); // Toutes les 5 secondes
// Pour les architectures multi-serveurs
// Utiliser Redis pour partager l'état entre serveurs
const Redis = require('ioredis');
const Redlock = require('redlock');
const redis = new Redis({
host: 'redis-cluster',
port: 6379
});
const redlock = new Redlock([redis], {
driftFactor: 0.01,
retryCount: 3,
retryDelay: 200
});
class DistributedRateLimiter {
constructor(redis, redlock) {
this.redis = redis;
this.redlock = redlock;
}
async checkLimit(key, limit, window) {
const lockKey = lock:${key};
try {
// Acquérir un lock
const lock = await this.redlock.acquire([lockKey], 1000);
try {
const count = await this.redis.incr(key);
if (count === 1) {
await this.redis.expire(key, window);
}
const allowed = count <= limit;
return {
allowed,
current: count,
limit,
remaining: Math.max(0, limit - count)
};
} finally {
await lock.release();
}
} catch (error) {
console.error('Distributed rate limit error:', error);
// Fail-open en cas d'erreur
return { allowed: true };
}
}
}
// Pour protéger les services en aval
class CircuitBreaker {
constructor(options = {}) {
this.failureThreshold = options.failureThreshold || 5;
this.resetTimeout = options.resetTimeout || 60000;
this.state = 'CLOSED'; // CLOSED, OPEN, HALFOPEN
this.failures = 0;
this.nextAttempt = Date.now();
}
async execute(fn) {
if (this.state === 'OPEN') {
if (Date.now() < this.nextAttempt) {
throw new Error('Circuit breaker is OPEN');
}
this.state = 'HALFOPEN';
}
try {
const result = await fn();
if (this.state === 'HALFOPEN') {
this.state = 'CLOSED';
this.failures = 0;
}
return result;
} catch (error) {
this.failures++;
if (this.failures >= this.failureThreshold) {
this.state = 'OPEN';
this.nextAttempt = Date.now() + this.resetTimeout;
}
throw error;
}
}
middleware() {
return async (req, res, next) => {
if (this.state === 'OPEN') {
return res.status(503).json({
error: 'Service Unavailable',
message: 'Circuit breaker is open',
retryAfter: Math.ceil((this.nextAttempt - Date.now()) / 1000)
});
}
next();
};
}
}
// Utilisation
const breaker = new CircuitBreaker({
failureThreshold: 5,
resetTimeout: 60000
});
app.use('/api/external', breaker.middleware());
app.get('/api/external/data', async (req, res) => {
try {
const data = await breaker.execute(async () => {
return await fetch('https://external-api.com/data');
});
res.json(data);
} catch (error) {
res.status(503).json({
error: 'Service Unavailable',
message: error.message
});
}
});
// utils/rateLimitAnalytics.js
class RateLimitAnalytics {
constructor(redis) {
this.redis = redis;
}
async recordViolation(key, endpoint, limit) {
const date = new Date().toISOString().split('T')[0];
const analyticsKey = analytics:violations:${date};
await this.redis.hincrby(analyticsKey, endpoint, 1);
await this.redis.expire(analyticsKey, 7 24 3600); // 7 jours
// Enregistrer les détails
await this.redis.lpush(violations:${key}, JSON.stringify({
endpoint,
limit,
timestamp: new Date().toISOString()
}));
await this.redis.ltrim(violations:${key}, 0, 99); // Garder 100 dernières
}
async getViolationStats(days = 7) {
const stats = {};
for (let i = 0; i < days; i++) {
const date = new Date();
date.setDate(date.getDate() - i);
const dateStr = date.toISOString().split('T')[0];
const key = analytics:violations:${dateStr};
const data = await this.redis.hgetall(key);
stats[dateStr] = data;
}
return stats;
}
async getTopViolators(limit = 10) {
// Utiliser un sorted set pour tracker les violations
const key = 'analytics:topviolators';
const violators = await this.redis.zrevrange(key, 0, limit - 1, 'WITHSCORES');
const result = [];
for (let i = 0; i < violators.length; i += 2) {
result.push({
key: violators[i],
violations: parseInt(violators[i + 1])
});
}
return result;
}
}
// Middleware avec analytics
const analytics = new RateLimitAnalytics(redis);
const rateLimiterWithAnalytics = (options) => {
const limiter = rateLimit({
...options,
handler: async (req, res) => {
const key = options.keyGenerator(req);
await analytics.recordViolation(key, req.path, options.max);
// Incrémenter le compteur global
await redis.zincrby('analytics:topviolators', 1, key);
res.status(429).json({
error: 'Too Many Requests',
message: 'Rate limit exceeded'
});
}
});
return limiter;
};
// Endpoint pour les statistiques
app.get('/admin/rate-limit/stats', async (req, res) => {
const [violations, topViolators] = await Promise.all([
analytics.getViolationStats(7),
analytics.getTopViolators(20)
]);
res.json({
violationsbyday: violations,
topviolators: topViolators
});
});
// tests/rateLimit.test.js
const request = require('supertest');
const app = require('../app');
const redis = require('../config/redis');
describe('Rate Limiting', () => {
beforeEach(async () => {
// Nettoyer Redis avant chaque test
await redis.flushdb();
});
describe('Global rate limit', () => {
it('devrait permettre les requêtes sous la limite', async () => {
for (let i = 0; i < 10; i++) {
const res = await request(app).get('/api/data');
expect(res.statusCode).toBe(200);
}
});
it('devrait bloquer après la limite', async () => {
// Faire 100 requêtes (limite)
for (let i = 0; i < 100; i++) {
await request(app).get('/api/data');
}
// La 101ème devrait être bloquée
const res = await request(app).get('/api/data');
expect(res.statusCode).toBe(429);
expect(res.body.error).toBe('Too Many Requests');
});
it('devrait inclure les headers de rate limit', async () => {
const res = await request(app).get('/api/data');
expect(res.headers['x-ratelimit-limit']).toBeDefined();
expect(res.headers['x-ratelimit-remaining']).toBeDefined();
expect(res.headers['x-ratelimit-reset']).toBeDefined();
});
it('devrait reset après la fenêtre', async () => {
// Remplir la limite
for (let i = 0; i < 100; i++) {
await request(app).get('/api/data');
}
// Attendre que la fenêtre expire
await new Promise(resolve => setTimeout(resolve, 60000));
// Devrait fonctionner à nouveau
const res = await request(app).get('/api/data');
expect(res.statusCode).toBe(200);
}, 70000);
});
describe('Tiered rate limiting', () => {
it('devrait appliquer différentes limites par tier', async () => {
const freeUser = { id: 1, tier: 'free' };
const premiumUser = { id: 2, tier: 'premium' };
// Free user: 100 requêtes
for (let i = 0; i < 100; i++) {
const res = await request(app)
.get('/api/data')
.set('Authorization', Bearer ${generateToken(freeUser)});
expect(res.statusCode).toBe(200);
}
// La 101ème est bloquée
const freeBlocked = await request(app)
.get('/api/data')
.set('Authorization', Bearer ${generateToken(freeUser)});
expect(freeBlocked.statusCode).toBe(429);
// Premium user peut faire plus
for (let i = 0; i < 200; i++) {
const res = await request(app)
.get('/api/data')
.set('Authorization', Bearer ${generateToken(premiumUser)});
expect(res.statusCode).toBe(200);
}
});
});
});
Le rate limiting et le throttling sont essentiels pour:
Points clés:
Mots-clés: rate limiting, throttling, API protection, Express.js, FastAPI, Redis, token bucket, sliding window, distributed rate limiting, circuit breaker, backpressure, API security
Cet article est vivant — corrections, contre-arguments et retours de production sont les bienvenus. Trois canaux, choisissez celui qui vous convient.