fixed up ratelimiting

This commit is contained in:
Boki 2025-07-06 18:53:02 -04:00
parent a616c92656
commit a7146a3f57
15 changed files with 912 additions and 186 deletions

View file

@ -20,7 +20,7 @@ export { DeadLetterQueueHandler } from './dlq-handler';
export { QueueMetricsCollector } from './queue-metrics';
// Rate limiting
export { QueueRateLimiter } from './rate-limiter';
export { QueueRateLimiter } from './rate-limiter-new';
// Types
export type {

View file

@ -3,8 +3,9 @@ import type { CacheProvider } from '@stock-bot/cache';
import { createCache } from '@stock-bot/cache';
import type { HandlerRegistry } from '@stock-bot/handler-registry';
import { getLogger } from '@stock-bot/logger';
import Redis from 'ioredis';
import { Queue, type QueueWorkerConfig } from './queue';
import { QueueRateLimiter } from './rate-limiter';
import { QueueRateLimiter } from './rate-limiter-new';
import { getFullQueueName, parseQueueName } from './service-utils';
import type {
GlobalStats,
@ -37,6 +38,7 @@ export class QueueManager {
private caches = new Map<string, CacheProvider>();
private rateLimiter?: QueueRateLimiter;
private redisConnection: ReturnType<typeof getRedisConnection>;
private ioredisConnection?: Redis;
private isShuttingDown = false;
private shutdownPromise: Promise<void> | null = null;
private config: QueueManagerConfig;
@ -65,16 +67,19 @@ export class QueueManager {
this.handlerRegistry = handlerRegistry;
this.logger = logger || getLogger('QueueManager');
this.redisConnection = getRedisConnection(config.redis);
// Create ioredis connection for rate limiter
this.ioredisConnection = new Redis({
host: config.redis.host,
port: config.redis.port,
password: config.redis.password,
db: config.redis.db || 0,
maxRetriesPerRequest: 3,
enableReadyCheck: true,
});
// Initialize rate limiter if rules are provided
if (config.rateLimitRules && config.rateLimitRules.length > 0) {
this.rateLimiter = new QueueRateLimiter(this.redisConnection, this.logger);
config.rateLimitRules.forEach(rule => {
if (this.rateLimiter) {
this.rateLimiter.addRule(rule);
}
});
}
// Rate limiter will be initialized when rules are added dynamically
// No longer initialized from config
// Auto-discover routes if enabled and registry provided
if (config.serviceName && config.autoDiscoverHandlers !== false && handlerRegistry) {
@ -153,6 +158,7 @@ export class QueueManager {
startWorker: workers > 0,
handlerRegistry: options.handlerRegistry || this.handlerRegistry,
serviceName: this.config.serviceName,
queueManager: this,
};
const queue = new Queue(
@ -169,16 +175,7 @@ export class QueueManager {
// Automatically initialize batch cache for the queue
this.initializeBatchCacheSync(queueName);
// Add queue-specific rate limit rules
if (this.rateLimiter && mergedOptions.rateLimitRules) {
mergedOptions.rateLimitRules.forEach(rule => {
// Ensure queue name is set for queue-specific rules
const ruleWithQueue = { ...rule, queueName };
if (this.rateLimiter) {
this.rateLimiter.addRule(ruleWithQueue);
}
});
}
// Rate limit rules are now added via addRateLimitRules after handlers are initialized
this.logger.info('Queue created with batch cache', {
queueName,
@ -303,23 +300,44 @@ export class QueueManager {
/**
* Add a rate limit rule
*/
addRateLimitRule(rule: RateLimitRule): void {
async addRateLimitRule(rule: RateLimitRule): Promise<void> {
if (!this.rateLimiter) {
this.rateLimiter = new QueueRateLimiter(this.redisConnection, this.logger);
if (!this.ioredisConnection) {
throw new Error('IoRedis connection not initialized');
}
this.rateLimiter = new QueueRateLimiter(this.ioredisConnection, this.logger);
}
this.rateLimiter.addRule(rule);
await this.rateLimiter.addRule(rule);
}
/**
* Check rate limits for a job
* Add multiple rate limit rules at once
*/
async addRateLimitRules(rules: RateLimitRule[]): Promise<void> {
if (!this.rateLimiter) {
if (!this.ioredisConnection) {
throw new Error('IoRedis connection not initialized');
}
this.rateLimiter = new QueueRateLimiter(this.ioredisConnection, this.logger);
}
await Promise.all(rules.map(rule => this.rateLimiter!.addRule(rule)));
this.logger.info('Added rate limit rules to QueueManager', {
ruleCount: rules.length,
rules,
});
}
/**
* Check rate limits for a job - overloaded to support both interfaces
*/
async checkRateLimit(
queueName: string,
handler: string,
operation: string
handlerOrQueueName: string,
operationOrHandler?: string,
operationIfThreeParams?: string
): Promise<{
allowed: boolean;
retryAfter?: number;
retryDelayMs?: number;
remainingPoints?: number;
appliedRule?: RateLimitRule;
}> {
@ -327,7 +345,27 @@ export class QueueManager {
return { allowed: true };
}
return await this.rateLimiter.checkLimit(queueName, handler, operation);
// Support both 2-parameter and 3-parameter calls
let handler: string;
let operation: string | undefined;
if (operationIfThreeParams !== undefined) {
// 3-parameter call: checkRateLimit(queueName, handler, operation)
handler = operationOrHandler!;
operation = operationIfThreeParams;
} else {
// 2-parameter call: checkRateLimit(handler, operation)
handler = handlerOrQueueName;
operation = operationOrHandler;
}
// The new rate limiter only needs handler and operation
const result = await this.rateLimiter.checkLimit(handler, operation);
return {
allowed: result.allowed,
retryDelayMs: result.retryDelayMs,
remainingPoints: result.remainingPoints,
};
}
/**
@ -342,7 +380,12 @@ export class QueueManager {
};
}
return await this.rateLimiter.getStatus(queueName, handler, operation);
// The new rate limiter only needs handler and operation
const status = await this.rateLimiter.getStatus(handler, operation);
return {
queueName,
...status
};
}
/**
@ -467,6 +510,16 @@ export class QueueManager {
});
await Promise.all(cacheShutdownPromises);
// Close ioredis connection
if (this.ioredisConnection) {
try {
await this.ioredisConnection.quit();
this.logger.debug('IoRedis connection closed');
} catch (error) {
this.logger.warn('Error closing ioredis connection', { error: (error as Error).message });
}
}
// Clear collections
this.queues.clear();

View file

@ -20,6 +20,7 @@ export interface QueueWorkerConfig {
startWorker?: boolean;
handlerRegistry?: HandlerRegistry;
serviceName?: string;
queueManager?: any; // Reference to queue manager for rate limiting
}
/**
@ -35,6 +36,7 @@ export class Queue {
private readonly logger: Logger;
private readonly handlerRegistry?: HandlerRegistry;
private serviceName?: string;
private queueManager?: any;
constructor(
queueName: string,
@ -48,6 +50,7 @@ export class Queue {
this.logger = logger || console;
this.handlerRegistry = config.handlerRegistry;
this.serviceName = config.serviceName;
this.queueManager = config.queueManager;
this.logger.debug('Queue constructor called', {
queueName,
@ -332,13 +335,18 @@ export class Queue {
const connection = getRedisConnection(this.redisConfig);
for (let i = 0; i < workerCount; i++) {
const worker = new Worker(this.queueName, this.processJob.bind(this), {
const worker: Worker = new Worker(this.queueName, async (job: Job) => this.processJob(job, worker), {
connection,
concurrency,
maxStalledCount: 3,
stalledInterval: 30000,
// Add a name to identify the worker
name: `${this.serviceName || 'unknown'}_worker_${i}`,
// Enable manual rate limiting
limiter: {
max: 100,
duration: 1,
},
});
this.logger.info(`Starting worker ${i + 1}/${workerCount} for queue`, {
@ -348,7 +356,7 @@ export class Queue {
});
// Setup worker event handlers
worker.on('completed', job => {
worker.on('completed', (job: Job) => {
this.logger.trace('Job completed', {
queueName: this.queueName,
jobId: job.id,
@ -357,17 +365,27 @@ export class Queue {
});
});
worker.on('failed', (job, err) => {
this.logger.error('Job failed', {
queueName: this.queueName,
jobId: job?.id,
handler: job?.data?.handler,
operation: job?.data?.operation,
error: err.message,
});
worker.on('failed', (job: Job | undefined, err: Error) => {
// Only log as error if it's not a rate limit error
if (err.name !== 'RateLimitError') {
this.logger.error('Job failed', {
queueName: this.queueName,
jobId: job?.id,
handler: job?.data?.handler,
operation: job?.data?.operation,
error: err.message,
});
} else {
this.logger.debug('Job rate limited', {
queueName: this.queueName,
jobId: job?.id,
handler: job?.data?.handler,
operation: job?.data?.operation,
});
}
});
worker.on('error', error => {
worker.on('error', (error: Error) => {
this.logger.error('Worker error', {
queueName: this.queueName,
workerId: i,
@ -386,9 +404,16 @@ export class Queue {
}
/**
* Process a job using the handler registry
* Process a job using the handler registry with rate limiting
*/
private async processJob(job: Job): Promise<unknown> {
private async processJob(job: Job, worker: Worker): Promise<unknown> {
this.logger.debug('Processing job', {
jobId: job.id,
jobName: job.name,
queueName: this.queueName,
hasQueueManager: !!this.queueManager,
jobData: job.data
});
const { handler, operation, payload }: JobData = job.data;
this.logger.trace('Processing job', {
@ -404,6 +429,42 @@ export class Queue {
throw new Error('Handler registry not configured for worker processing');
}
// Check rate limits if available
if (this.queueManager && this.queueManager.checkRateLimit) {
this.logger.debug('Checking rate limits', {
handler,
operation,
queueManager: this.queueManager.constructor.name
});
const rateLimitCheck = await this.queueManager.checkRateLimit(
handler,
operation
);
this.logger.debug('Rate limit check result', {
allowed: rateLimitCheck.allowed,
retryDelayMs: rateLimitCheck.retryDelayMs,
handler,
operation
});
if (!rateLimitCheck.allowed) {
this.logger.trace('Rate limit exceeded, throwing Worker.RateLimitError', {
id: job.id,
handler,
operation,
retryDelayMs: rateLimitCheck.retryDelayMs
});
// Use BullMQ's manual rate limiting
// First inform the worker about the rate limit delay
await worker.rateLimit(rateLimitCheck.retryDelayMs || 1000);
// Then throw the RateLimitError
throw Worker.RateLimitError();
}
}
this.logger.debug('Looking up handler in registry', {
handler,
operation,
@ -419,6 +480,7 @@ export class Queue {
const result = await jobHandler(payload);
this.logger.info(`[Queue] Job ${job.id} - ${handler}:${operation} completed successfully`);
this.logger.trace('Job completed successfully', {
id: job.id,
handler,
@ -428,6 +490,11 @@ export class Queue {
return result;
} catch (error) {
// Re-throw RateLimitError without logging as error
if (error === Worker.RateLimitError() || (error as any).name === 'RateLimitError') {
throw error;
}
this.logger.error('Job processing failed', {
id: job.id,
handler,

View file

@ -0,0 +1,380 @@
import { RateLimiterRedis, type IRateLimiterRedisOptions } from 'rate-limiter-flexible';
import type Redis from 'ioredis';
import type { RateLimitWindow, RateLimitConfig, RateLimitRule } from './types';
// Logger interface for type safety
interface Logger {
info(message: string, meta?: Record<string, unknown>): void;
error(message: string, meta?: Record<string, unknown>): void;
warn(message: string, meta?: Record<string, unknown>): void;
debug(message: string, meta?: Record<string, unknown>): void;
trace(message: string, meta?: Record<string, unknown>): void;
}
interface RateLimiterEntry {
windows: Array<{
limiter: RateLimiterRedis;
window: RateLimitWindow;
}>;
cost: number;
}
export interface RateLimitResult {
allowed: boolean;
retryDelayMs?: number;
remainingPoints?: number;
cost?: number;
}
/**
* Enhanced rate limiter that supports:
* - Multiple time windows per rule
* - Variable costs per operation
* - Hierarchical rules (operation > handler > queue > global)
*/
export class QueueRateLimiter {
private limiters = new Map<string, RateLimiterEntry>();
private readonly logger: Logger;
constructor(
private redisClient: Redis,
logger?: Logger
) {
this.logger = logger || console;
this.logger.debug('QueueRateLimiter initialized', {
redisClientType: typeof this.redisClient,
redisClientKeys: this.redisClient ? Object.keys(this.redisClient).slice(0, 10) : [],
hasClient: !!this.redisClient,
isIoRedis: this.redisClient && this.redisClient.constructor.name === 'Redis'
});
}
/**
* Add a rate limit rule
*/
async addRule(rule: RateLimitRule): Promise<void> {
const key = this.getRuleKey(rule);
// Create array of limiters for each window
const windows: RateLimiterEntry['windows'] = [];
const limits = rule.config.limits || [];
this.logger.debug('Adding rate limit rule', {
key,
rule,
windowCount: limits.length,
cost: rule.config.cost
});
for (const window of limits) {
const limiterOptions: IRateLimiterRedisOptions = {
storeClient: this.redisClient as any,
keyPrefix: `rate_limit:${key}:${window.duration}s`,
points: window.points,
duration: window.duration,
blockDuration: window.blockDuration || 0,
};
this.logger.debug('Creating RateLimiterRedis', {
keyPrefix: limiterOptions.keyPrefix,
points: limiterOptions.points,
duration: limiterOptions.duration,
hasRedisClient: !!this.redisClient,
redisClientType: typeof this.redisClient
});
try {
const limiter = new RateLimiterRedis(limiterOptions);
windows.push({ limiter, window });
} catch (error) {
this.logger.error('Failed to create RateLimiterRedis', { error, limiterOptions });
throw error;
}
}
// Default cost: handler defines the pool (no default cost), operations default to 1
const defaultCost = rule.level === 'operation' ? 1 : 0;
this.limiters.set(key, {
windows,
cost: rule.config.cost !== undefined ? rule.config.cost : defaultCost
});
this.logger.info('Rate limit rule added', {
key,
level: rule.level,
handler: rule.handler,
operation: rule.operation,
windows: windows.length,
cost: rule.config.cost !== undefined ? rule.config.cost : defaultCost
});
}
/**
* Check if an operation is allowed based on rate limits
*/
async checkLimit(handler: string, operation?: string): Promise<RateLimitResult> {
// Build keys to check from most specific to least specific
const keysToCheck: string[] = [];
if (operation) {
keysToCheck.push(this.getRuleKey({ level: 'operation', handler, operation, config: {} } as RateLimitRule));
}
keysToCheck.push(this.getRuleKey({ level: 'handler', handler, config: {} } as RateLimitRule));
keysToCheck.push(this.getRuleKey({ level: 'queue', config: {} } as RateLimitRule));
keysToCheck.push(this.getRuleKey({ level: 'global', config: {} } as RateLimitRule));
this.logger.debug('Checking rate limits', {
handler,
operation,
keysToCheck,
availableKeys: Array.from(this.limiters.keys())
});
// First, find the cost from the most specific rule
let cost = 1; // Default cost
for (const key of keysToCheck) {
const entry = this.limiters.get(key);
if (entry && entry.cost !== undefined) {
cost = entry.cost;
this.logger.info(`[RateLimiter] Using cost ${cost} from rule: ${key}`);
this.logger.debug('Using cost from rule', {
key,
cost,
handler,
operation
});
break;
}
}
// Then find the rate limit windows from the first matching rule that has windows
let windowEntry: RateLimiterEntry | undefined;
let appliedKey: string | undefined;
for (const key of keysToCheck) {
const entry = this.limiters.get(key);
if (entry && entry.windows.length > 0) {
windowEntry = entry;
appliedKey = key;
break;
}
}
if (!windowEntry) {
this.logger.debug('No rate limit rules found', {
handler,
operation
});
return { allowed: true };
}
this.logger.info(`[RateLimiter] Applying rate limit rule: ${appliedKey}`, {
appliedKey,
windowCount: windowEntry.windows.length,
cost,
handler,
operation
});
// Check all windows with the determined cost
let maxRetryDelayMs = 0;
this.logger.debug('Checking rate limit windows', {
handler,
operation,
cost,
windowCount: windowEntry.windows.length
});
// Special handling for 0-cost operations - they should always pass
if (cost === 0) {
this.logger.info(`[RateLimiter] Zero-cost operation ${handler}:${operation || 'N/A'} - allowing without consuming points`);
return {
allowed: true,
cost: 0
};
}
for (const { limiter, window } of windowEntry.windows) {
// For handler-level rate limits, all operations share the same pool
// Use the applied key to determine the consumer key
const consumerKey = appliedKey?.startsWith('handler:') ? handler :
(operation ? `${handler}:${operation}` : handler);
try {
// First check current state without consuming
const currentState = await limiter.get(consumerKey);
const now = Date.now();
this.logger.info(`[RateLimiter] Current state before consume:`, {
handler,
operation,
consumerKey,
cost,
window: `${window.points}pts/${window.duration}s`,
consumedPoints: currentState?.consumedPoints || 0,
remainingPoints: currentState?.remainingPoints ?? window.points,
msBeforeNext: currentState?.msBeforeNext || 0,
currentTime: new Date(now).toISOString(),
resetTime: currentState?.msBeforeNext ? new Date(now + currentState.msBeforeNext).toISOString() : 'N/A',
hasState: !!currentState,
stateDetails: currentState ? JSON.stringify(currentState) : 'no state'
});
// Try to consume points
this.logger.info(`[RateLimiter] Attempting to consume ${cost} points for ${consumerKey}`);
const result = await limiter.consume(consumerKey, cost);
this.logger.info(`[RateLimiter] Successfully consumed ${cost} points, ${result.remainingPoints} remaining`);
this.logger.debug('Consumed points successfully', {
handler,
operation,
consumerKey,
window: `${window.points}pts/${window.duration}s`,
cost,
remainingPoints: result.remainingPoints,
msBeforeNext: result.msBeforeNext
});
this.logger.trace('Rate limit window passed', {
handler,
operation,
window: `${window.points}pts/${window.duration}s`,
cost
});
} catch (rejRes: any) {
// Rate limit exceeded for this window
const retryDelayMs = rejRes.msBeforeNext || window.blockDuration || 1000;
maxRetryDelayMs = Math.max(maxRetryDelayMs, retryDelayMs);
this.logger.info(`[RateLimiter] RATE LIMIT HIT: ${consumerKey} - ${cost} points rejected, retry in ${retryDelayMs}ms`, {
consumedPoints: rejRes.consumedPoints || 0,
remainingPoints: rejRes.remainingPoints || 0,
totalPoints: rejRes.totalPoints || window.points,
isFirstInDuration: rejRes.isFirstInDuration || false,
msBeforeNext: rejRes.msBeforeNext || 0,
retryDelayMs
});
this.logger.trace('Rate limit exceeded', {
handler,
operation,
consumerKey,
window: `${window.points}pts/${window.duration}s`,
cost,
remainingPoints: rejRes.remainingPoints || 0,
retryDelayMs: maxRetryDelayMs
});
return {
allowed: false,
retryDelayMs: maxRetryDelayMs,
remainingPoints: rejRes.remainingPoints || 0,
cost
};
}
}
// All windows passed
return {
allowed: true,
cost
};
}
/**
* Get rule key for storing rate limiter
*/
private getRuleKey(rule: RateLimitRule): string {
switch (rule.level) {
case 'global':
return 'global';
case 'queue':
return `queue:${rule.queueName || 'default'}`;
case 'handler':
return `handler:${rule.handler}`;
case 'operation':
return `operation:${rule.handler}:${rule.operation}`;
default:
return rule.level;
}
}
/**
* Get current rate limit status
*/
async getStatus(handler: string, operation?: string): Promise<any> {
// Build keys to check from most specific to least specific
const keysToCheck: string[] = [];
if (operation) {
keysToCheck.push(this.getRuleKey({ level: 'operation', handler, operation, config: {} } as RateLimitRule));
}
keysToCheck.push(this.getRuleKey({ level: 'handler', handler, config: {} } as RateLimitRule));
keysToCheck.push(this.getRuleKey({ level: 'queue', config: {} } as RateLimitRule));
keysToCheck.push(this.getRuleKey({ level: 'global', config: {} } as RateLimitRule));
// Find the first matching rule
let entry: RateLimiterEntry | undefined;
let appliedKey: string | undefined;
for (const key of keysToCheck) {
entry = this.limiters.get(key);
if (entry) {
appliedKey = key;
break;
}
}
if (!entry) {
return {
handler,
operation,
hasRule: false
};
}
// Get status for all windows
const consumerKey = operation ? `${handler}:${operation}` : handler;
const windows = await Promise.all(
entry.windows.map(async ({ limiter, window }) => {
const result = await limiter.get(consumerKey);
return {
points: window.points,
duration: window.duration,
remaining: result?.remainingPoints ?? window.points,
resetIn: result?.msBeforeNext ?? 0,
};
})
);
return {
handler,
operation,
hasRule: true,
appliedKey,
cost: entry.cost,
windows
};
}
/**
* Reset rate limits for a specific handler/operation
*/
async reset(handler: string, operation?: string): Promise<void> {
const keysToCheck: string[] = [];
if (operation) {
keysToCheck.push(this.getRuleKey({ level: 'operation', handler, operation, config: {} } as RateLimitRule));
}
keysToCheck.push(this.getRuleKey({ level: 'handler', handler, config: {} } as RateLimitRule));
for (const key of keysToCheck) {
const entry = this.limiters.get(key);
if (entry) {
await Promise.all(
entry.windows.map(({ limiter }) => limiter.delete(handler))
);
}
}
this.logger.info('Rate limits reset', { handler, operation });
}
}

View file

@ -1,5 +1,5 @@
import { RateLimiterRedis, RateLimiterRes } from 'rate-limiter-flexible';
import type { RateLimitConfig as BaseRateLimitConfig, RateLimitRule } from './types';
import type { RateLimitWindow, RateLimitConfig, RateLimitRule } from './types';
// Logger interface for type safety
interface Logger {
@ -9,13 +9,16 @@ interface Logger {
debug(message: string, meta?: Record<string, unknown>): void;
}
// Extend the base config to add rate-limiter specific fields
export interface RateLimitConfig extends BaseRateLimitConfig {
keyPrefix?: string;
interface RateLimiterEntry {
windows: Array<{
limiter: RateLimiterRedis;
window: RateLimitWindow;
}>;
cost: number;
}
export class QueueRateLimiter {
private limiters = new Map<string, RateLimiterRedis>();
private limiters = new Map<string, RateLimiterEntry>();
private rules: RateLimitRule[] = [];
private readonly logger: Logger;
@ -33,23 +36,33 @@ export class QueueRateLimiter {
this.rules.push(rule);
const key = this.getRuleKey(rule.level, rule.queueName, rule.handler, rule.operation);
const limiter = new RateLimiterRedis({
storeClient: this.redisClient,
keyPrefix: `rl:${key}`,
points: rule.config.points,
duration: rule.config.duration,
blockDuration: rule.config.blockDuration || 0,
// Extract limits and cost from config
const limits = rule.config.limits || [];
const cost = rule.config.cost || 1;
// Create rate limiters for each window
const windows = limits.map((window, index) => {
const limiter = new RateLimiterRedis({
storeClient: this.redisClient,
keyPrefix: `rl:${key}:${index}`,
points: window.points,
duration: window.duration,
blockDuration: window.blockDuration || 0,
});
return { limiter, window };
});
this.limiters.set(key, limiter);
this.limiters.set(key, { windows, cost });
this.logger.info('Rate limit rule added', {
level: rule.level,
queueName: rule.queueName,
handler: rule.handler,
operation: rule.operation,
points: rule.config.points,
duration: rule.config.duration,
windows: limits.length,
cost,
});
}
@ -57,6 +70,7 @@ export class QueueRateLimiter {
* Check if a job can be processed based on rate limits
* Uses hierarchical precedence: operation > handler > queue > global
* The most specific matching rule takes precedence
* Returns the longest wait time if multiple windows are hit
*/
async checkLimit(
queueName: string,
@ -67,6 +81,7 @@ export class QueueRateLimiter {
retryAfter?: number;
remainingPoints?: number;
appliedRule?: RateLimitRule;
cost?: number;
}> {
const applicableRule = this.getMostSpecificRule(queueName, handler, operation);
@ -80,28 +95,74 @@ export class QueueRateLimiter {
applicableRule.handler,
applicableRule.operation
);
const limiter = this.limiters.get(key);
const limiterEntry = this.limiters.get(key);
if (!limiter) {
if (!limiterEntry || limiterEntry.windows.length === 0) {
this.logger.warn('Rate limiter not found for rule', { key, rule: applicableRule });
return { allowed: true };
}
try {
const result = await this.consumePoint(
limiter,
this.getConsumerKey(queueName, handler, operation)
);
const consumerKey = this.getConsumerKey(queueName, handler, operation);
const cost = limiterEntry.cost;
// Check all windows and collect results
const windowResults = await Promise.all(
limiterEntry.windows.map(async ({ limiter, window }) => {
try {
// Try to consume points for this window
const result = await limiter.consume(consumerKey, cost);
return {
allowed: true,
remainingPoints: result.remainingPoints,
retryAfter: 0,
};
} catch (rejRes) {
if (rejRes instanceof RateLimiterRes) {
return {
allowed: false,
remainingPoints: rejRes.remainingPoints,
retryAfter: rejRes.msBeforeNext,
};
}
throw rejRes;
}
})
);
// Find if any window rejected the request
const rejectedWindow = windowResults.find(r => !r.allowed);
if (rejectedWindow) {
// Find the longest wait time among all rejected windows
const maxRetryAfter = Math.max(
...windowResults.filter(r => !r.allowed).map(r => r.retryAfter || 0)
);
this.logger.warn('Rate limit exceeded', {
handler,
operation,
cost,
retryAfter: maxRetryAfter,
});
return {
...result,
allowed: false,
retryAfter: maxRetryAfter,
remainingPoints: rejectedWindow.remainingPoints,
appliedRule: applicableRule,
cost,
};
} catch (error) {
this.logger.error('Rate limit check failed', { queueName, handler, operation, error });
// On error, allow the request to proceed
return { allowed: true };
}
// All windows allowed - return the minimum remaining points
const minRemainingPoints = Math.min(...windowResults.map(r => r.remainingPoints || 0));
return {
allowed: true,
remainingPoints: minRemainingPoints,
appliedRule: applicableRule,
cost,
};
}
/**
@ -144,35 +205,6 @@ export class QueueRateLimiter {
return rule;
}
/**
* Consume a point from the rate limiter
*/
private async consumePoint(
limiter: RateLimiterRedis,
key: string
): Promise<{ allowed: boolean; retryAfter?: number; remainingPoints?: number }> {
try {
const result = await limiter.consume(key);
return {
allowed: true,
remainingPoints: result.remainingPoints,
};
} catch (rejRes) {
if (rejRes instanceof RateLimiterRes) {
this.logger.warn('Rate limit exceeded', {
key,
retryAfter: rejRes.msBeforeNext,
});
return {
allowed: false,
retryAfter: rejRes.msBeforeNext,
remainingPoints: rejRes.remainingPoints,
};
}
throw rejRes;
}
}
/**
* Get rule key for storing rate limiter
@ -216,13 +248,13 @@ export class QueueRateLimiter {
handler: string;
operation: string;
appliedRule?: RateLimitRule;
limit?: {
level: string;
cost?: number;
windows?: Array<{
points: number;
duration: number;
remaining: number;
resetIn: number;
};
}>;
}> {
const applicableRule = this.getMostSpecificRule(queueName, handler, operation);
@ -240,9 +272,9 @@ export class QueueRateLimiter {
applicableRule.handler,
applicableRule.operation
);
const limiter = this.limiters.get(key);
const limiterEntry = this.limiters.get(key);
if (!limiter) {
if (!limiterEntry || limiterEntry.windows.length === 0) {
return {
queueName,
handler,
@ -253,22 +285,27 @@ export class QueueRateLimiter {
try {
const consumerKey = this.getConsumerKey(queueName, handler, operation);
const result = await limiter.get(consumerKey);
const limit = {
level: applicableRule.level,
points: limiter.points,
duration: limiter.duration,
remaining: result?.remainingPoints ?? limiter.points,
resetIn: result?.msBeforeNext ?? 0,
};
// Get status for all windows
const windows = await Promise.all(
limiterEntry.windows.map(async ({ limiter, window }) => {
const result = await limiter.get(consumerKey);
return {
points: window.points,
duration: window.duration,
remaining: result?.remainingPoints ?? window.points,
resetIn: result?.msBeforeNext ?? 0,
};
})
);
return {
queueName,
handler,
operation,
appliedRule: applicableRule,
limit,
cost: limiterEntry.cost,
windows,
};
} catch (error) {
this.logger.error('Failed to get rate limit status', {
@ -282,6 +319,7 @@ export class QueueRateLimiter {
handler,
operation,
appliedRule: applicableRule,
cost: limiterEntry.cost,
};
}
}
@ -297,9 +335,12 @@ export class QueueRateLimiter {
if (rule) {
const key = this.getRuleKey(rule.level, rule.queueName, rule.handler, rule.operation);
const limiter = this.limiters.get(key);
if (limiter) {
await limiter.delete(consumerKey);
const limiterEntry = this.limiters.get(key);
if (limiterEntry) {
// Reset all windows for this consumer
await Promise.all(
limiterEntry.windows.map(({ limiter }) => limiter.delete(consumerKey))
);
}
}
} else {

View file

@ -61,8 +61,6 @@ export interface QueueOptions {
concurrency?: number;
enableMetrics?: boolean;
enableDLQ?: boolean;
enableRateLimit?: boolean;
rateLimitRules?: RateLimitRule[]; // Queue-specific rate limit rules
handlerRegistry?: any; // HandlerRegistry from @stock-bot/handler-registry
}
@ -70,8 +68,6 @@ export interface QueueManagerConfig {
redis: RedisConfig;
defaultQueueOptions?: QueueOptions;
enableScheduledJobs?: boolean;
globalRateLimit?: RateLimitConfig;
rateLimitRules?: RateLimitRule[]; // Global rate limit rules
serviceName?: string; // For service discovery and namespacing
autoDiscoverHandlers?: boolean; // Auto-discover queue routes from handler registry
}
@ -108,12 +104,17 @@ export interface HandlerInitializer {
}
// Rate limiting types
export interface RateLimitConfig {
export interface RateLimitWindow {
points: number;
duration: number;
blockDuration?: number;
}
export interface RateLimitConfig {
limits?: RateLimitWindow[];
cost?: number;
}
export interface RateLimitRule {
level: 'global' | 'queue' | 'handler' | 'operation';
queueName?: string; // For queue-level limits