restructured libs to be more aligned with core components

This commit is contained in:
Boki 2025-06-23 19:51:48 -04:00
parent 947b1d748d
commit 0d1be9e3cb
50 changed files with 73 additions and 67 deletions

View file

@ -1,191 +0,0 @@
# @stock-bot/event-bus
Lightweight event bus for inter-service communication in the Stock Bot platform.
## Overview
This library provides a simple pub/sub event system using Redis, designed for real-time event distribution between microservices. It focuses on simplicity and reliability for event-driven communication.
## Features
- Simple pub/sub pattern using Redis
- Automatic reconnection and resubscription
- Local event emission (works even without Redis)
- TypeScript support with predefined trading event types
- Lightweight with minimal dependencies
## Installation
```bash
bun add @stock-bot/event-bus
```
## Usage
### Basic Setup
```typescript
import { createEventBus, TradingEventType } from '@stock-bot/event-bus';
const eventBus = createEventBus({
serviceName: 'data-ingestion',
redisConfig: {
host: 'localhost',
port: 6379,
},
enableLogging: true,
});
// Wait for connection
await eventBus.waitForConnection();
```
### Publishing Events
```typescript
// Publish a price update
await eventBus.publish(TradingEventType.PRICE_UPDATE, {
symbol: 'AAPL',
price: 150.25,
volume: 1000000,
timestamp: Date.now(),
});
// Publish with metadata
await eventBus.publish(TradingEventType.ORDER_FILLED,
{
orderId: '12345',
symbol: 'TSLA',
side: 'buy',
quantity: 100,
price: 250.50,
},
{ source: 'ib-gateway', region: 'us' }
);
```
### Subscribing to Events
```typescript
// Subscribe to price updates
await eventBus.subscribe(TradingEventType.PRICE_UPDATE, async (message) => {
console.log(`Price update for ${message.data.symbol}: $${message.data.price}`);
});
// Subscribe to order events
await eventBus.subscribe(TradingEventType.ORDER_FILLED, async (message) => {
const { orderId, symbol, quantity, price } = message.data;
console.log(`Order ${orderId} filled: ${quantity} ${symbol} @ $${price}`);
});
```
### Event Types
The library includes predefined event types for common trading operations:
```typescript
enum TradingEventType {
// Market data events
PRICE_UPDATE = 'market.price.update',
ORDERBOOK_UPDATE = 'market.orderbook.update',
TRADE_EXECUTED = 'market.trade.executed',
// Order events
ORDER_CREATED = 'order.created',
ORDER_FILLED = 'order.filled',
ORDER_CANCELLED = 'order.cancelled',
ORDER_REJECTED = 'order.rejected',
// Position events
POSITION_OPENED = 'position.opened',
POSITION_CLOSED = 'position.closed',
POSITION_UPDATED = 'position.updated',
// Strategy events
STRATEGY_SIGNAL = 'strategy.signal',
STRATEGY_STARTED = 'strategy.started',
STRATEGY_STOPPED = 'strategy.stopped',
// Risk events
RISK_LIMIT_BREACH = 'risk.limit.breach',
RISK_WARNING = 'risk.warning',
// System events
SERVICE_STARTED = 'system.service.started',
SERVICE_STOPPED = 'system.service.stopped',
SERVICE_ERROR = 'system.service.error',
}
```
### Typed Events
Use TypeScript generics for type-safe event handling:
```typescript
import type { PriceUpdateEvent, OrderEvent } from '@stock-bot/event-bus';
// Type-safe subscription
await eventBus.subscribe<PriceUpdateEvent>(
TradingEventType.PRICE_UPDATE,
async (message) => {
// message.data is typed as PriceUpdateEvent
const { symbol, price, volume } = message.data;
}
);
```
### Cleanup
```typescript
// Unsubscribe from specific event
await eventBus.unsubscribe(TradingEventType.PRICE_UPDATE);
// Close all connections
await eventBus.close();
```
## Architecture Notes
This library is designed for lightweight, real-time event distribution. For reliable job processing, retries, and persistence, use the `@stock-bot/queue` library with BullMQ instead.
### When to Use Event Bus
- Real-time notifications (price updates, trade executions)
- Service coordination (strategy signals, risk alerts)
- System monitoring (service status, errors)
### When to Use Queue
- Data processing jobs
- Batch operations
- Tasks requiring persistence and retries
- Scheduled operations
## Error Handling
The event bus handles connection failures gracefully:
```typescript
try {
await eventBus.publish(TradingEventType.PRICE_UPDATE, data);
} catch (error) {
// Event will still be emitted locally
console.error('Failed to publish to Redis:', error);
}
```
## Development
```bash
# Install dependencies
bun install
# Build
bun run build
# Run tests
bun test
# Clean build artifacts
bun run clean
```

View file

@ -1,39 +0,0 @@
{
"name": "@stock-bot/event-bus",
"version": "1.0.0",
"description": "Event bus library for inter-service communication",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"type": "module",
"scripts": {
"build": "tsc",
"test": "bun test",
"clean": "rimraf dist"
},
"dependencies": {
"@stock-bot/logger": "*",
"ioredis": "^5.3.2",
"eventemitter3": "^5.0.1"
},
"devDependencies": {
"@types/node": "^20.11.0",
"typescript": "^5.3.0",
"bun-types": "^1.2.15"
},
"exports": {
".": {
"import": "./dist/index.js",
"require": "./dist/index.js",
"types": "./dist/index.d.ts"
},
"./types": {
"import": "./dist/types.js",
"require": "./dist/types.js",
"types": "./dist/types.d.ts"
}
},
"files": [
"dist",
"README.md"
]
}

View file

@ -1,239 +0,0 @@
import { EventEmitter } from 'eventemitter3';
import Redis from 'ioredis';
import { getLogger } from '@stock-bot/logger';
import type { EventBusConfig, EventBusMessage, EventHandler, EventSubscription } from './types';
/**
* Lightweight Event Bus for inter-service communication
* Uses Redis pub/sub for simple, real-time event distribution
*/
export class EventBus extends EventEmitter {
private publisher: Redis;
private subscriber: Redis;
private readonly serviceName: string;
private readonly logger: ReturnType<typeof getLogger>;
private subscriptions: Map<string, EventSubscription> = new Map();
private isConnected: boolean = false;
constructor(config: EventBusConfig) {
super();
this.serviceName = config.serviceName;
this.logger = getLogger(`event-bus:${this.serviceName}`);
// Create Redis connections
const redisOptions = {
host: config.redisConfig.host,
port: config.redisConfig.port,
password: config.redisConfig.password,
db: config.redisConfig.db || 0,
lazyConnect: false,
enableOfflineQueue: true,
reconnectOnError: (err: Error) => {
this.logger.error('Redis connection error:', err);
return true;
},
};
this.publisher = new Redis(redisOptions);
this.subscriber = new Redis(redisOptions);
this.setupRedisHandlers();
}
private setupRedisHandlers(): void {
// Publisher handlers
this.publisher.on('connect', () => {
this.logger.info('Publisher connected to Redis');
this.isConnected = true;
});
this.publisher.on('error', error => {
this.logger.error('Publisher Redis error:', error);
});
// Subscriber handlers
this.subscriber.on('connect', () => {
this.logger.info('Subscriber connected to Redis');
// Resubscribe to all channels on reconnect
this.resubscribeAll();
});
this.subscriber.on('error', error => {
this.logger.error('Subscriber Redis error:', error);
});
// Handle incoming messages
this.subscriber.on('message', this.handleMessage.bind(this));
}
private handleMessage(channel: string, message: string): void {
try {
const eventMessage: EventBusMessage = JSON.parse(message);
// Skip messages from self
if (eventMessage.source === this.serviceName) {
return;
}
// Extract event type from channel (remove 'events:' prefix)
const eventType = channel.replace('events:', '');
// Emit locally
this.emit(eventType, eventMessage);
// Call registered handler if exists
const subscription = this.subscriptions.get(eventType);
if (subscription?.handler) {
Promise.resolve(subscription.handler(eventMessage)).catch(error => {
this.logger.error(`Handler error for event ${eventType}:`, error);
});
}
this.logger.debug(`Received event: ${eventType} from ${eventMessage.source}`);
} catch (error) {
this.logger.error('Failed to handle message:', { error, channel, message });
}
}
/**
* Publish an event
*/
async publish<T = any>(type: string, data: T, metadata?: Record<string, any>): Promise<void> {
const message: EventBusMessage<T> = {
id: this.generateId(),
type,
source: this.serviceName,
timestamp: Date.now(),
data,
metadata,
};
// Emit locally first
this.emit(type, message);
// Publish to Redis
if (this.isConnected) {
try {
const channel = `events:${type}`;
await this.publisher.publish(channel, JSON.stringify(message));
this.logger.debug(`Published event: ${type}`, { messageId: message.id });
} catch (error) {
this.logger.error(`Failed to publish event: ${type}`, { error, messageId: message.id });
throw error;
}
} else {
this.logger.warn(`Not connected to Redis, event ${type} only emitted locally`);
}
}
/**
* Subscribe to an event
*/
async subscribe<T = any>(eventType: string, handler: EventHandler<T>): Promise<void> {
// Register handler
this.subscriptions.set(eventType, { channel: `events:${eventType}`, handler });
// Add local listener
this.on(eventType, handler);
// Subscribe to Redis channel
try {
const channel = `events:${eventType}`;
await this.subscriber.subscribe(channel);
this.logger.debug(`Subscribed to event: ${eventType}`);
} catch (error) {
this.logger.error(`Failed to subscribe to event: ${eventType}`, error);
throw error;
}
}
/**
* Unsubscribe from an event
*/
async unsubscribe(eventType: string, handler?: EventHandler): Promise<void> {
// Remove specific handler or all handlers
if (handler) {
this.off(eventType, handler);
} else {
this.removeAllListeners(eventType);
}
// Remove from subscriptions
this.subscriptions.delete(eventType);
// Unsubscribe from Redis
try {
const channel = `events:${eventType}`;
await this.subscriber.unsubscribe(channel);
this.logger.debug(`Unsubscribed from event: ${eventType}`);
} catch (error) {
this.logger.error(`Failed to unsubscribe from event: ${eventType}`, error);
}
}
/**
* Resubscribe to all channels (used on reconnect)
*/
private async resubscribeAll(): Promise<void> {
for (const [eventType, subscription] of this.subscriptions.entries()) {
try {
await this.subscriber.subscribe(subscription.channel);
this.logger.debug(`Resubscribed to event: ${eventType}`);
} catch (error) {
this.logger.error(`Failed to resubscribe to event: ${eventType}`, error);
}
}
}
/**
* Wait for connection to be established
*/
async waitForConnection(timeout: number = 5000): Promise<void> {
const startTime = Date.now();
while (!this.isConnected && Date.now() - startTime < timeout) {
await new Promise(resolve => setTimeout(resolve, 100));
}
if (!this.isConnected) {
throw new Error(`Failed to connect to Redis within ${timeout}ms`);
}
}
/**
* Close all connections
*/
async close(): Promise<void> {
this.isConnected = false;
// Clear all subscriptions
this.subscriptions.clear();
this.removeAllListeners();
// Close Redis connections
await Promise.all([this.publisher.quit(), this.subscriber.quit()]);
this.logger.info('Event bus closed');
}
/**
* Generate unique message ID
*/
private generateId(): string {
return `${this.serviceName}-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
}
/**
* Check if connected to Redis
*/
get connected(): boolean {
return this.isConnected;
}
/**
* Get service name
*/
get service(): string {
return this.serviceName;
}
}

View file

@ -1,13 +0,0 @@
import { EventBus } from './event-bus';
import type { EventBusConfig } from './types';
/**
* Create a new event bus instance
*/
export function createEventBus(config: EventBusConfig): EventBus {
return new EventBus(config);
}
// Re-export everything
export { EventBus } from './event-bus';
export * from './types';

View file

@ -1,111 +0,0 @@
export interface EventBusMessage<T = any> {
id: string;
type: string;
source: string;
timestamp: number;
data: T;
metadata?: Record<string, unknown>;
}
export interface EventHandler<T = any> {
(message: EventBusMessage<T>): Promise<void> | void;
}
export interface EventBusConfig {
serviceName: string;
redisConfig: {
host: string;
port: number;
password?: string;
db?: number;
};
enableLogging?: boolean;
}
export interface EventSubscription {
channel: string;
handler: EventHandler;
}
// Trading-specific event types
export enum TradingEventType {
// Market data events
PRICE_UPDATE = 'market.price.update',
ORDERBOOK_UPDATE = 'market.orderbook.update',
TRADE_EXECUTED = 'market.trade.executed',
// Order events
ORDER_CREATED = 'order.created',
ORDER_FILLED = 'order.filled',
ORDER_CANCELLED = 'order.cancelled',
ORDER_REJECTED = 'order.rejected',
// Position events
POSITION_OPENED = 'position.opened',
POSITION_CLOSED = 'position.closed',
POSITION_UPDATED = 'position.updated',
// Strategy events
STRATEGY_SIGNAL = 'strategy.signal',
STRATEGY_STARTED = 'strategy.started',
STRATEGY_STOPPED = 'strategy.stopped',
// Risk events
RISK_LIMIT_BREACH = 'risk.limit.breach',
RISK_WARNING = 'risk.warning',
// System events
SERVICE_STARTED = 'system.service.started',
SERVICE_STOPPED = 'system.service.stopped',
SERVICE_ERROR = 'system.service.error',
}
// Event data types
export interface PriceUpdateEvent {
symbol: string;
price: number;
volume: number;
timestamp: number;
}
export interface OrderEvent {
orderId: string;
symbol: string;
side: 'buy' | 'sell';
quantity: number;
price?: number;
type: 'market' | 'limit' | 'stop' | 'stop_limit';
status: string;
portfolioId: string;
strategyId?: string;
}
export interface PositionEvent {
positionId: string;
symbol: string;
quantity: number;
averageCost: number;
currentPrice: number;
unrealizedPnl: number;
realizedPnl: number;
portfolioId: string;
}
export interface StrategySignalEvent {
strategyId: string;
signal: 'buy' | 'sell' | 'hold';
symbol: string;
confidence: number;
indicators: Record<string, number>;
timestamp: number;
}
export interface RiskEvent {
type: 'position_size' | 'daily_loss' | 'max_drawdown' | 'concentration';
severity: 'warning' | 'critical';
currentValue: number;
limit: number;
portfolioId?: string;
strategyId?: string;
message: string;
}

View file

@ -1,10 +0,0 @@
{
"extends": "../../../tsconfig.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src",
"composite": true
},
"include": ["src/**/*"],
"references": [{ "path": "../../core/logger" }]
}

View file

@ -1,306 +0,0 @@
# @stock-bot/queue
A reusable queue library with batch processing capabilities for the stock-bot project.
## Features
- **Queue Management**: Built on BullMQ with Redis backing
- **Batch Processing**: Efficient processing of large datasets
- **Provider Registry**: Pluggable job handler system
- **Cache Integration**: Uses @stock-bot/cache for payload storage
- **TypeScript Support**: Full type safety and IntelliSense
- **Configurable**: Flexible configuration for different environments
## Installation
```bash
npm install @stock-bot/queue
```
## Quick Start
### Basic Queue Setup
```typescript
import { QueueManager, handlerRegistry } from '@stock-bot/queue';
// Initialize queue manager (typically done via dependency injection)
const queueManager = new QueueManager({
redis: {
host: 'localhost',
port: 6379,
},
});
// Get or create a queue
const queue = queueManager.getQueue('my-service-queue', {
workers: 5,
concurrency: 20,
});
// Register handlers
handlerRegistry.register('market-data', {
'fetch-price': async (payload) => {
// Handle price fetching
return { price: 100, symbol: payload.symbol };
},
'update-data': async (payload) => {
// Handle data updates
return { success: true };
},
});
// Queue is ready to use - no initialization needed
```
### Batch Processing
```typescript
import { processItems, initializeBatchCache } from '@stock-bot/queue';
// Initialize cache first
await initializeBatchCache();
// Process items in batches
const result = await processItems(
['AAPL', 'GOOGL', 'MSFT'],
(symbol, index) => ({ symbol, timestamp: Date.now() }),
queueManager,
{
totalDelayMs: 60000, // 1 minute total
useBatching: true,
batchSize: 100,
priority: 1,
provider: 'market-data',
operation: 'fetch-price',
}
);
console.log(result);
// {
// jobsCreated: 1,
// mode: 'batch',
// totalItems: 3,
// batchesCreated: 1,
// duration: 150
// }
```
### Generic Processing
```typescript
import { processItems } from '@stock-bot/queue';
const result = await processItems(
['AAPL', 'GOOGL', 'MSFT'],
(symbol, index) => ({
symbol,
index,
timestamp: Date.now(),
}),
queueManager,
{
operation: 'live-data',
provider: 'yahoo',
totalDelayMs: 300000, // 5 minutes
useBatching: false,
priority: 1,
}
);
```
## API Reference
### QueueManager
The main queue management class.
#### Constructor
```typescript
new QueueManager(config?: QueueConfig)
```
#### Methods
- `initialize()`: Initialize the queue and workers
- `registerProvider(name, config)`: Register a job provider
- `add(name, data, options)`: Add a single job
- `addBulk(jobs)`: Add multiple jobs in bulk
- `getStats()`: Get queue statistics
- `pause()`: Pause job processing
- `resume()`: Resume job processing
- `clean(grace, limit)`: Clean completed/failed jobs
- `shutdown()`: Shutdown the queue manager
### Batch Processing Functions
#### processItems()
Process items either directly or in batches.
```typescript
processItems<T>(
items: T[],
processor: (item: T, index: number) => any,
queue: QueueManager,
options: ProcessOptions
): Promise<BatchResult>
```
#### processBatchJob()
Process a batch job (used internally by workers).
```typescript
processBatchJob(
jobData: BatchJobData,
queue: QueueManager
): Promise<any>
```
### Provider Registry
Manage job handlers for different providers.
```typescript
// Register provider
providerRegistry.register('provider-name', {
'operation-1': async (payload) => { /* handle */ },
'operation-2': async (payload) => { /* handle */ },
});
// Check provider exists
if (providerRegistry.hasProvider('provider-name')) {
// Provider is registered
}
// Get handler
const handler = providerRegistry.getHandler('provider-name', 'operation-1');
```
## Configuration
### QueueConfig
```typescript
interface QueueConfig {
workers?: number; // Number of worker processes
concurrency?: number; // Jobs per worker
redis?: {
host?: string;
port?: number;
password?: string;
db?: number;
};
queueName?: string; // Name for the queue
defaultJobOptions?: {
removeOnComplete?: number;
removeOnFail?: number;
attempts?: number;
backoff?: {
type: string;
delay: number;
};
};
}
```
### ProcessOptions
```typescript
interface ProcessOptions {
totalDelayMs: number; // Total time to spread jobs over
batchSize?: number; // Items per batch (batch mode)
priority?: number; // Job priority
useBatching?: boolean; // Use batch vs direct mode
retries?: number; // Number of retry attempts
ttl?: number; // Cache TTL for batch payloads
removeOnComplete?: number; // Keep N completed jobs
removeOnFail?: number; // Keep N failed jobs
provider?: string; // Provider name for job routing
operation?: string; // Operation name for job routing
}
```
## Migration from Existing Queue
If you're migrating from an existing queue implementation:
1. **Replace imports**:
```typescript
// Before
import { QueueService } from '../services/queue.service';
import { processItems } from '../utils/batch-helpers';
// After
import { QueueManager, processItems } from '@stock-bot/queue';
```
2. **Update initialization**:
```typescript
// Before
const queueService = new QueueService();
await queueService.initialize();
// After
const queueManager = new QueueManager({
redis: { host: 'localhost', port: 6379 }
});
// No initialization needed
```
3. **Update provider registration**:
```typescript
// Before
providerRegistry.register('provider', config);
// After
handlerRegistry.register('provider', config);
```
## Examples
See the `/examples` directory for complete implementation examples:
- `basic-usage.ts` - Basic queue setup and job processing
- `batch-processing.ts` - Advanced batch processing scenarios
- `provider-setup.ts` - Provider registration patterns
- `migration-example.ts` - Migration from existing queue service
## Best Practices
1. **Initialize cache before batch operations**:
```typescript
await initializeBatchCache();
```
2. **Use appropriate batch sizes**:
- Small items: 500-1000 per batch
- Large items: 50-100 per batch
3. **Set reasonable delays**:
- Spread jobs over time to avoid overwhelming services
- Consider rate limits of external APIs
4. **Clean up periodically**:
```typescript
const queue = queueManager.getQueue('my-queue');
await queue.clean(24 * 60 * 60 * 1000); // Clean jobs older than 24h
```
5. **Monitor queue stats**:
```typescript
const queue = queueManager.getQueue('my-queue');
const stats = await queue.getStats();
console.log('Queue status:', stats);
```
## Environment Variables
- `WORKER_COUNT`: Number of worker processes (default: 5)
- `WORKER_CONCURRENCY`: Jobs per worker (default: 20)
- `DRAGONFLY_HOST`: Redis/Dragonfly host (default: localhost)
- `DRAGONFLY_PORT`: Redis/Dragonfly port (default: 6379)
- `DRAGONFLY_PASSWORD`: Redis/Dragonfly password
- `DRAGONFLY_DB`: Redis/Dragonfly database number (default: 0)

View file

@ -1,29 +0,0 @@
{
"name": "@stock-bot/queue",
"version": "1.0.0",
"description": "Reusable queue library with batch processing capabilities",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"scripts": {
"build": "tsc",
"dev": "tsc --watch",
"clean": "rm -rf dist"
},
"dependencies": {
"bullmq": "^5.0.0",
"ioredis": "^5.3.0",
"rate-limiter-flexible": "^3.0.0",
"@stock-bot/cache": "*",
"@stock-bot/logger": "*",
"@stock-bot/types": "*",
"@stock-bot/handlers": "*"
},
"devDependencies": {
"typescript": "^5.3.0",
"@types/node": "^20.0.0",
"testcontainers": "^10.0.0"
},
"publishConfig": {
"access": "restricted"
}
}

View file

@ -1,345 +0,0 @@
import { QueueManager } from './queue-manager';
import type { BatchJobData, BatchResult, JobData, ProcessOptions } from './types';
/**
* Main function - processes items either directly or in batches
* Each item becomes payload: item (no processing needed)
*/
export async function processItems<T>(
items: T[],
queueName: string,
options: ProcessOptions,
queueManager: QueueManager
): Promise<BatchResult> {
const queue = queueManager.getQueue(queueName);
const logger = queue.createChildLogger('batch-processor', {
queueName,
totalItems: items.length,
mode: options.useBatching ? 'batch' : 'direct',
});
const startTime = Date.now();
if (items.length === 0) {
return {
jobsCreated: 0,
mode: 'direct',
totalItems: 0,
duration: 0,
};
}
logger.info('Starting batch processing', {
totalItems: items.length,
mode: options.useBatching ? 'batch' : 'direct',
batchSize: options.batchSize,
totalDelayHours: options.totalDelayHours,
});
try {
const result = options.useBatching
? await processBatched(items, queueName, options, queueManager)
: await processDirect(items, queueName, options, queueManager);
const duration = Date.now() - startTime;
logger.info('Batch processing completed', {
...result,
duration: `${(duration / 1000).toFixed(1)}s`,
});
return { ...result, duration };
} catch (error) {
logger.error('Batch processing failed', { error });
throw error;
}
}
/**
* Process items directly - each item becomes a separate job
*/
async function processDirect<T>(
items: T[],
queueName: string,
options: ProcessOptions,
queueManager: QueueManager
): Promise<Omit<BatchResult, 'duration'>> {
const queue = queueManager.getQueue(queueName);
const logger = queue.createChildLogger('batch-direct', {
queueName,
totalItems: items.length,
});
const totalDelayMs = options.totalDelayHours * 60 * 60 * 1000; // Convert hours to milliseconds
const delayPerItem = totalDelayMs / items.length;
logger.info('Creating direct jobs', {
totalItems: items.length,
delayPerItem: `${(delayPerItem / 1000).toFixed(1)}s`,
});
const jobs = items.map((item, index) => ({
name: 'process-item',
data: {
handler: options.handler || 'generic',
operation: options.operation || 'process-item',
payload: item, // Just the item directly - no wrapper!
priority: options.priority || undefined,
},
opts: {
delay: index * delayPerItem,
priority: options.priority || undefined,
attempts: options.retries || 3,
removeOnComplete: options.removeOnComplete || 10,
removeOnFail: options.removeOnFail || 5,
},
}));
const createdJobs = await addJobsInChunks(queueName, jobs, queueManager);
return {
totalItems: items.length,
jobsCreated: createdJobs.length,
mode: 'direct',
};
}
/**
* Process items in batches - store items directly
*/
async function processBatched<T>(
items: T[],
queueName: string,
options: ProcessOptions,
queueManager: QueueManager
): Promise<Omit<BatchResult, 'duration'>> {
const queue = queueManager.getQueue(queueName);
const logger = queue.createChildLogger('batch-batched', {
queueName,
totalItems: items.length,
});
const batchSize = options.batchSize || 100;
const batches = createBatches(items, batchSize);
const totalDelayMs = options.totalDelayHours * 60 * 60 * 1000; // Convert hours to milliseconds
const delayPerBatch = totalDelayMs / batches.length;
logger.info('Creating batch jobs', {
totalItems: items.length,
batchSize,
totalBatches: batches.length,
delayPerBatch: `${(delayPerBatch / 1000 / 60).toFixed(2)} minutes`,
});
const batchJobs = await Promise.all(
batches.map(async (batch, batchIndex) => {
// Just store the items directly - no processing needed
const payloadKey = await storeItems(batch, queueName, options, queueManager);
return {
name: 'process-batch',
data: {
handler: options.handler || 'generic',
operation: 'process-batch-items',
payload: {
payloadKey,
batchIndex,
totalBatches: batches.length,
itemCount: batch.length,
totalDelayHours: options.totalDelayHours,
} as BatchJobData,
priority: options.priority || undefined,
},
opts: {
delay: batchIndex * delayPerBatch,
priority: options.priority || undefined,
attempts: options.retries || 3,
removeOnComplete: options.removeOnComplete || 10,
removeOnFail: options.removeOnFail || 5,
},
};
})
);
const createdJobs = await addJobsInChunks(queueName, batchJobs, queueManager);
return {
totalItems: items.length,
jobsCreated: createdJobs.length,
batchesCreated: batches.length,
mode: 'batch',
};
}
/**
* Process a batch job - loads items and creates individual jobs
*/
export async function processBatchJob(jobData: BatchJobData, queueName: string, queueManager: QueueManager): Promise<unknown> {
const queue = queueManager.getQueue(queueName);
const logger = queue.createChildLogger('batch-job', {
queueName,
batchIndex: jobData.batchIndex,
payloadKey: jobData.payloadKey,
});
const { payloadKey, batchIndex, totalBatches, itemCount, totalDelayHours } = jobData;
logger.debug('Processing batch job', {
batchIndex,
totalBatches,
itemCount,
totalDelayHours,
});
try {
const payload = await loadPayload(payloadKey, queueName, queueManager);
if (!payload || !payload.items || !payload.options) {
logger.error('Invalid payload data', { payloadKey, payload });
throw new Error(`Invalid payload data for key: ${payloadKey}`);
}
const { items, options } = payload;
// Calculate the time window for this batch
const totalDelayMs = totalDelayHours * 60 * 60 * 1000; // Convert hours to ms
const delayPerBatch = totalDelayMs / totalBatches; // Time allocated for each batch
const delayPerItem = delayPerBatch / items.length; // Distribute items evenly within batch window
logger.debug('Calculating job delays', {
batchIndex,
delayPerBatch: `${(delayPerBatch / 1000 / 60).toFixed(2)} minutes`,
delayPerItem: `${(delayPerItem / 1000).toFixed(2)} seconds`,
itemsInBatch: items.length,
});
// Create jobs directly from items - each item becomes payload: item
const jobs = items.map((item: unknown, index: number) => ({
name: 'process-item',
data: {
handler: options.handler || 'generic',
operation: options.operation || 'generic',
payload: item, // Just the item directly!
priority: options.priority || undefined,
},
opts: {
delay: index * delayPerItem, // Distribute evenly within batch window
priority: options.priority || undefined,
attempts: options.retries || 3,
},
}));
const createdJobs = await addJobsInChunks(queueName, jobs, queueManager);
// Cleanup payload after successful processing
await cleanupPayload(payloadKey, queueName, queueManager);
return {
batchIndex,
itemsProcessed: items.length,
jobsCreated: createdJobs.length,
};
} catch (error) {
logger.error('Batch job processing failed', { batchIndex, error });
throw error;
}
}
// Helper functions
function createBatches<T>(items: T[], batchSize: number): T[][] {
const batches: T[][] = [];
for (let i = 0; i < items.length; i += batchSize) {
batches.push(items.slice(i, i + batchSize));
}
return batches;
}
async function storeItems<T>(
items: T[],
queueName: string,
options: ProcessOptions,
queueManager: QueueManager
): Promise<string> {
const cache = queueManager.getCache(queueName);
const payloadKey = `payload:${Date.now()}:${Math.random().toString(36).substr(2, 9)}`;
const payload = {
items, // Just store the items directly
options: {
delayPerItem: 1000,
priority: options.priority || undefined,
retries: options.retries || 3,
handler: options.handler || 'generic',
operation: options.operation || 'generic',
},
createdAt: new Date().toISOString(),
};
const ttlSeconds = options.ttl || 86400; // 24 hours default
await cache.set(payloadKey, payload, ttlSeconds);
return payloadKey;
}
async function loadPayload<T>(
key: string,
queueName: string,
queueManager: QueueManager
): Promise<{
items: T[];
options: {
delayPerItem: number;
priority?: number;
retries: number;
handler: string;
operation: string;
};
} | null> {
const cache = queueManager.getCache(queueName);
return (await cache.get(key)) as {
items: T[];
options: {
delayPerItem: number;
priority?: number;
retries: number;
handler: string;
operation: string;
};
} | null;
}
async function cleanupPayload(key: string, queueName: string, queueManager: QueueManager): Promise<void> {
const cache = queueManager.getCache(queueName);
await cache.del(key);
}
async function addJobsInChunks(
queueName: string,
jobs: Array<{ name: string; data: JobData; opts?: Record<string, unknown> }>,
queueManager: QueueManager,
chunkSize = 100
): Promise<unknown[]> {
const queue = queueManager.getQueue(queueName);
const logger = queue.createChildLogger('batch-chunk', {
queueName,
totalJobs: jobs.length,
});
const allCreatedJobs = [];
for (let i = 0; i < jobs.length; i += chunkSize) {
const chunk = jobs.slice(i, i + chunkSize);
try {
const createdJobs = await queue.addBulk(chunk);
allCreatedJobs.push(...createdJobs);
// Small delay between chunks to avoid overwhelming Redis
if (i + chunkSize < jobs.length) {
await new Promise(resolve => setTimeout(resolve, 100));
}
} catch (error) {
logger.error('Failed to add job chunk', {
startIndex: i,
chunkSize: chunk.length,
error,
});
}
}
return allCreatedJobs;
}

View file

@ -1,257 +0,0 @@
import { Queue, type Job } from 'bullmq';
import type { DLQConfig, RedisConfig } from './types';
import { getRedisConnection } from './utils';
// Logger interface for type safety
interface Logger {
info(message: string, meta?: Record<string, unknown>): void;
error(message: string, meta?: Record<string, unknown>): void;
warn(message: string, meta?: Record<string, unknown>): void;
debug(message: string, meta?: Record<string, unknown>): void;
}
export class DeadLetterQueueHandler {
private dlq: Queue;
private config: Required<DLQConfig>;
private failureCount = new Map<string, number>();
private readonly logger: Logger;
constructor(
private mainQueue: Queue,
connection: RedisConfig,
config: DLQConfig = {},
logger?: Logger
) {
this.logger = logger || console;
this.config = {
maxRetries: config.maxRetries ?? 3,
retryDelay: config.retryDelay ?? 60000, // 1 minute
alertThreshold: config.alertThreshold ?? 100,
cleanupAge: config.cleanupAge ?? 168, // 7 days
};
// Create DLQ with same name but -dlq suffix
const dlqName = `${mainQueue.name}-dlq`;
this.dlq = new Queue(dlqName, { connection: getRedisConnection(connection) });
}
/**
* Process a failed job - either retry or move to DLQ
*/
async handleFailedJob(job: Job, error: Error): Promise<void> {
const jobKey = `${job.name}:${job.id}`;
const currentFailures = (this.failureCount.get(jobKey) || 0) + 1;
this.failureCount.set(jobKey, currentFailures);
this.logger.warn('Job failed', {
jobId: job.id,
jobName: job.name,
attempt: job.attemptsMade,
maxAttempts: job.opts.attempts,
error: error.message,
failureCount: currentFailures,
});
// Check if job should be moved to DLQ
if (job.attemptsMade >= (job.opts.attempts || this.config.maxRetries)) {
await this.moveToDeadLetterQueue(job, error);
this.failureCount.delete(jobKey);
}
}
/**
* Move job to dead letter queue
*/
private async moveToDeadLetterQueue(job: Job, error: Error): Promise<void> {
try {
const dlqData = {
originalJob: {
id: job.id,
name: job.name,
data: job.data,
opts: job.opts,
attemptsMade: job.attemptsMade,
failedReason: job.failedReason,
processedOn: job.processedOn,
timestamp: job.timestamp,
},
error: {
message: error.message,
stack: error.stack,
name: error.name,
},
movedToDLQAt: new Date().toISOString(),
};
await this.dlq.add('failed-job', dlqData, {
removeOnComplete: 100,
removeOnFail: 50,
});
this.logger.error('Job moved to DLQ', {
jobId: job.id,
jobName: job.name,
error: error.message,
});
// Check if we need to alert
await this.checkAlertThreshold();
} catch (dlqError) {
this.logger.error('Failed to move job to DLQ', {
jobId: job.id,
error: dlqError,
});
}
}
/**
* Retry jobs from DLQ
*/
async retryDLQJobs(limit = 10): Promise<number> {
const jobs = await this.dlq.getCompleted(0, limit);
let retriedCount = 0;
for (const dlqJob of jobs) {
try {
const { originalJob } = dlqJob.data;
// Re-add to main queue with delay
await this.mainQueue.add(originalJob.name, originalJob.data, {
...originalJob.opts,
delay: this.config.retryDelay,
attempts: this.config.maxRetries,
});
// Remove from DLQ
await dlqJob.remove();
retriedCount++;
this.logger.info('Job retried from DLQ', {
originalJobId: originalJob.id,
jobName: originalJob.name,
});
} catch (error) {
this.logger.error('Failed to retry DLQ job', {
dlqJobId: dlqJob.id,
error,
});
}
}
return retriedCount;
}
/**
* Get DLQ statistics
*/
async getStats(): Promise<{
total: number;
recent: number;
byJobName: Record<string, number>;
oldestJob: Date | null;
}> {
const [completed, failed, waiting] = await Promise.all([
this.dlq.getCompleted(),
this.dlq.getFailed(),
this.dlq.getWaiting(),
]);
const allJobs = [...completed, ...failed, ...waiting];
const byJobName: Record<string, number> = {};
let oldestTimestamp: number | null = null;
for (const job of allJobs) {
const jobName = job.data.originalJob?.name || 'unknown';
byJobName[jobName] = (byJobName[jobName] || 0) + 1;
if (!oldestTimestamp || job.timestamp < oldestTimestamp) {
oldestTimestamp = job.timestamp;
}
}
// Count recent jobs (last 24 hours)
const oneDayAgo = Date.now() - 24 * 60 * 60 * 1000;
const recent = allJobs.filter(job => job.timestamp > oneDayAgo).length;
return {
total: allJobs.length,
recent,
byJobName,
oldestJob: oldestTimestamp ? new Date(oldestTimestamp) : null,
};
}
/**
* Clean up old DLQ entries
*/
async cleanup(): Promise<number> {
const ageInMs = this.config.cleanupAge * 60 * 60 * 1000;
const cutoffTime = Date.now() - ageInMs;
const jobs = await this.dlq.getCompleted();
let removedCount = 0;
for (const job of jobs) {
if (job.timestamp < cutoffTime) {
await job.remove();
removedCount++;
}
}
this.logger.info('DLQ cleanup completed', {
removedCount,
cleanupAge: `${this.config.cleanupAge} hours`,
});
return removedCount;
}
/**
* Check if alert threshold is exceeded
*/
private async checkAlertThreshold(): Promise<void> {
const stats = await this.getStats();
if (stats.total >= this.config.alertThreshold) {
this.logger.error('DLQ alert threshold exceeded', {
threshold: this.config.alertThreshold,
currentCount: stats.total,
byJobName: stats.byJobName,
});
// In a real implementation, this would trigger alerts
}
}
/**
* Get failed jobs for inspection
*/
async inspectFailedJobs(limit = 10): Promise<
Array<{
id: string;
name: string;
data: unknown;
error: unknown;
failedAt: string;
attempts: number;
}>
> {
const jobs = await this.dlq.getCompleted(0, limit);
return jobs.map(job => ({
id: job.data.originalJob.id,
name: job.data.originalJob.name,
data: job.data.originalJob.data,
error: job.data.error,
failedAt: job.data.movedToDLQAt,
attempts: job.data.originalJob.attemptsMade,
}));
}
/**
* Shutdown DLQ handler
*/
async shutdown(): Promise<void> {
await this.dlq.close();
this.failureCount.clear();
}
}

View file

@ -1,75 +0,0 @@
// Core exports
export { Queue } from './queue';
export { QueueManager } from './queue-manager';
export { SmartQueueManager } from './smart-queue-manager';
export { ServiceCache, createServiceCache } from './service-cache';
export {
SERVICE_REGISTRY,
getServiceConfig,
findServiceForHandler,
getFullQueueName,
parseQueueName
} from './service-registry';
// Re-export handler registry and utilities from handlers package
export { handlerRegistry, createJobHandler } from '@stock-bot/handlers';
// Batch processing
export { processBatchJob, processItems } from './batch-processor';
// DLQ handling
export { DeadLetterQueueHandler } from './dlq-handler';
// Metrics
export { QueueMetricsCollector } from './queue-metrics';
// Rate limiting
export { QueueRateLimiter } from './rate-limiter';
// Types
export type {
// Core types
JobData,
JobOptions,
QueueOptions,
GlobalStats,
// Batch processing types
BatchResult,
ProcessOptions,
BatchJobData,
// Handler types
JobHandler,
TypedJobHandler,
HandlerConfig,
HandlerConfigWithSchedule,
HandlerInitializer,
QueueStats,
QueueWorkerConfig,
// Configuration types
RedisConfig,
QueueConfig,
QueueManagerConfig,
// Rate limiting types
RateLimitConfig,
RateLimitRule,
// DLQ types
DLQConfig,
DLQJobInfo,
// Scheduled job types
ScheduledJob,
ScheduleConfig,
// Smart Queue types
SmartQueueConfig,
QueueRoute,
} from './types';
// Re-export service registry types
export type { ServiceConfig } from './service-registry';

View file

@ -1,444 +0,0 @@
import { createCache } from '@stock-bot/cache';
import type { CacheProvider } from '@stock-bot/cache';
import { Queue, type QueueWorkerConfig } from './queue';
import { QueueRateLimiter } from './rate-limiter';
import type {
GlobalStats,
QueueManagerConfig,
QueueOptions,
QueueStats,
RateLimitRule,
} from './types';
import { getRedisConnection } from './utils';
// Logger interface for type safety
interface Logger {
info(message: string, meta?: Record<string, unknown>): void;
error(message: string, meta?: Record<string, unknown>): void;
warn(message: string, meta?: Record<string, unknown>): void;
debug(message: string, meta?: Record<string, unknown>): void;
trace(message: string, meta?: Record<string, unknown>): void;
child?(name: string, context?: Record<string, unknown>): Logger;
}
/**
* QueueManager provides unified queue and cache management
* Main entry point for all queue operations with getQueue() method
*/
export class QueueManager {
private queues = new Map<string, Queue>();
private caches = new Map<string, CacheProvider>();
private rateLimiter?: QueueRateLimiter;
private redisConnection: ReturnType<typeof getRedisConnection>;
private isShuttingDown = false;
private shutdownPromise: Promise<void> | null = null;
private config: QueueManagerConfig;
private readonly logger: Logger;
constructor(config: QueueManagerConfig, logger?: Logger) {
this.config = config;
this.logger = logger || console;
this.redisConnection = getRedisConnection(config.redis);
// Initialize rate limiter if rules are provided
if (config.rateLimitRules && config.rateLimitRules.length > 0) {
this.rateLimiter = new QueueRateLimiter(this.redisConnection, this.logger);
config.rateLimitRules.forEach(rule => {
if (this.rateLimiter) {
this.rateLimiter.addRule(rule);
}
});
}
this.logger.info('QueueManager initialized', {
redis: `${config.redis.host}:${config.redis.port}`,
});
}
/**
* Get or create a queue - unified method that handles both scenarios
* This is the main method for accessing queues
*/
getQueue(queueName: string, options: QueueOptions = {}): Queue {
// Return existing queue if it exists
if (this.queues.has(queueName)) {
const existingQueue = this.queues.get(queueName);
if (existingQueue) {
return existingQueue;
}
}
// Create new queue with merged options
const mergedOptions = {
...this.config.defaultQueueOptions,
...options,
};
// Prepare queue configuration
const workers = mergedOptions.workers ?? this.config.defaultQueueOptions?.workers ?? 1;
const concurrency = mergedOptions.concurrency ?? this.config.defaultQueueOptions?.concurrency ?? 1;
const queueConfig: QueueWorkerConfig = {
workers,
concurrency,
startWorker: workers > 0 && !this.config.delayWorkerStart,
};
const queue = new Queue(
queueName,
this.config.redis,
mergedOptions.defaultJobOptions || {},
queueConfig,
this.logger
);
// Store the queue
this.queues.set(queueName, queue);
// Automatically initialize batch cache for the queue
this.initializeBatchCacheSync(queueName);
// Add queue-specific rate limit rules
if (this.rateLimiter && mergedOptions.rateLimitRules) {
mergedOptions.rateLimitRules.forEach(rule => {
// Ensure queue name is set for queue-specific rules
const ruleWithQueue = { ...rule, queueName };
if (this.rateLimiter) {
this.rateLimiter.addRule(ruleWithQueue);
}
});
}
this.logger.info('Queue created with batch cache', {
queueName,
workers: workers,
concurrency: concurrency,
});
return queue;
}
/**
* Check if a queue exists
*/
hasQueue(queueName: string): boolean {
return this.queues.has(queueName);
}
/**
* Get all queue names
*/
getQueueNames(): string[] {
return Array.from(this.queues.keys());
}
/**
* Get or create a cache for a queue
*/
getCache(queueName: string): CacheProvider {
if (!this.caches.has(queueName)) {
const cacheProvider = createCache({
redisConfig: this.config.redis,
keyPrefix: `batch:${queueName}:`,
ttl: 86400, // 24 hours default
enableMetrics: true,
logger: this.logger,
});
this.caches.set(queueName, cacheProvider);
this.logger.trace('Cache created for queue', { queueName });
}
const cache = this.caches.get(queueName);
if (!cache) {
throw new Error(`Expected cache for queue ${queueName} to exist`);
}
return cache;
}
/**
* Initialize cache for a queue (ensures it's ready)
*/
async initializeCache(queueName: string): Promise<void> {
const cache = this.getCache(queueName);
await cache.waitForReady(10000);
this.logger.info('Cache initialized for queue', { queueName });
}
/**
* Initialize batch cache synchronously (for automatic initialization)
* The cache will be ready for use, but we don't wait for Redis connection
*/
private initializeBatchCacheSync(queueName: string): void {
// Just create the cache - it will connect automatically when first used
this.getCache(queueName);
this.logger.trace('Batch cache initialized synchronously for queue', { queueName });
}
/**
* Get statistics for all queues
*/
async getGlobalStats(): Promise<GlobalStats> {
const queueStats: Record<string, QueueStats> = {};
let totalJobs = 0;
let totalWorkers = 0;
for (const [queueName, queue] of this.queues) {
const stats = await queue.getStats();
queueStats[queueName] = stats;
totalJobs += stats.waiting + stats.active + stats.completed + stats.failed + stats.delayed;
totalWorkers += stats.workers || 0;
}
return {
queues: queueStats,
totalJobs,
totalWorkers,
uptime: process.uptime(),
};
}
/**
* Get statistics for a specific queue
*/
async getQueueStats(queueName: string): Promise<QueueStats | undefined> {
const queue = this.queues.get(queueName);
if (!queue) {
return undefined;
}
return await queue.getStats();
}
/**
* Add a rate limit rule
*/
addRateLimitRule(rule: RateLimitRule): void {
if (!this.rateLimiter) {
this.rateLimiter = new QueueRateLimiter(this.redisConnection, this.logger);
}
this.rateLimiter.addRule(rule);
}
/**
* Check rate limits for a job
*/
async checkRateLimit(
queueName: string,
handler: string,
operation: string
): Promise<{
allowed: boolean;
retryAfter?: number;
remainingPoints?: number;
appliedRule?: RateLimitRule;
}> {
if (!this.rateLimiter) {
return { allowed: true };
}
return await this.rateLimiter.checkLimit(queueName, handler, operation);
}
/**
* Get rate limit status
*/
async getRateLimitStatus(queueName: string, handler: string, operation: string) {
if (!this.rateLimiter) {
return {
queueName,
handler,
operation,
};
}
return await this.rateLimiter.getStatus(queueName, handler, operation);
}
/**
* Pause all queues
*/
async pauseAll(): Promise<void> {
const pausePromises = Array.from(this.queues.values()).map(queue => queue.pause());
await Promise.all(pausePromises);
this.logger.info('All queues paused');
}
/**
* Resume all queues
*/
async resumeAll(): Promise<void> {
const resumePromises = Array.from(this.queues.values()).map(queue => queue.resume());
await Promise.all(resumePromises);
this.logger.info('All queues resumed');
}
/**
* Pause a specific queue
*/
async pauseQueue(queueName: string): Promise<boolean> {
const queue = this.queues.get(queueName);
if (!queue) {
return false;
}
await queue.pause();
return true;
}
/**
* Resume a specific queue
*/
async resumeQueue(queueName: string): Promise<boolean> {
const queue = this.queues.get(queueName);
if (!queue) {
return false;
}
await queue.resume();
return true;
}
/**
* Drain all queues
*/
async drainAll(delayed = false): Promise<void> {
const drainPromises = Array.from(this.queues.values()).map(queue => queue.drain(delayed));
await Promise.all(drainPromises);
this.logger.info('All queues drained', { delayed });
}
/**
* Clean all queues
*/
async cleanAll(
grace: number = 0,
limit: number = 100,
type: 'completed' | 'failed' = 'completed'
): Promise<void> {
const cleanPromises = Array.from(this.queues.values()).map(queue =>
queue.clean(grace, limit, type)
);
await Promise.all(cleanPromises);
this.logger.info('All queues cleaned', { type, grace, limit });
}
/**
* Shutdown all queues and workers (thread-safe)
*/
async shutdown(): Promise<void> {
// If already shutting down, return the existing promise
if (this.shutdownPromise) {
return this.shutdownPromise;
}
if (this.isShuttingDown) {
return;
}
this.isShuttingDown = true;
this.logger.info('Shutting down QueueManager...');
// Create shutdown promise
this.shutdownPromise = this.performShutdown();
return this.shutdownPromise;
}
/**
* Perform the actual shutdown
*/
private async performShutdown(): Promise<void> {
try {
// Close all queues (this now includes workers since they're managed by Queue class)
const queueShutdownPromises = Array.from(this.queues.values()).map(async queue => {
try {
// Add timeout to queue.close() to prevent hanging
await queue.close();
// const timeoutPromise = new Promise<never>((_, reject) =>
// setTimeout(() => reject(new Error('Queue close timeout')), 100)
// );
// await Promise.race([closePromise, timeoutPromise]);
} catch (error) {
this.logger.warn('Error closing queue', { error: (error as Error).message });
}
});
await Promise.all(queueShutdownPromises);
// Close all caches
const cacheShutdownPromises = Array.from(this.caches.values()).map(async cache => {
try {
// Clear cache before shutdown
await cache.clear();
} catch (error) {
this.logger.warn('Error clearing cache', { error: (error as Error).message });
}
});
await Promise.all(cacheShutdownPromises);
// Clear collections
this.queues.clear();
this.caches.clear();
this.logger.info('QueueManager shutdown complete');
} catch (error) {
this.logger.error('Error during shutdown', { error: (error as Error).message });
throw error;
} finally {
// Reset shutdown state
this.shutdownPromise = null;
this.isShuttingDown = false;
}
}
/**
* Start workers for all queues (used when delayWorkerStart is enabled)
*/
startAllWorkers(): void {
if (!this.config.delayWorkerStart) {
this.logger.info(
'startAllWorkers() called but workers already started automatically (delayWorkerStart is false)'
);
return;
}
let workersStarted = 0;
for (const queue of this.queues.values()) {
const workerCount = this.config.defaultQueueOptions?.workers || 1;
const concurrency = this.config.defaultQueueOptions?.concurrency || 1;
if (workerCount > 0) {
queue.startWorkersManually(workerCount, concurrency);
workersStarted++;
}
}
this.logger.info('All workers started', {
totalQueues: this.queues.size,
queuesWithWorkers: workersStarted,
delayWorkerStart: this.config.delayWorkerStart,
});
}
/**
* Wait for all queues to be ready
*/
async waitUntilReady(): Promise<void> {
const readyPromises = Array.from(this.queues.values()).map(queue => queue.waitUntilReady());
await Promise.all(readyPromises);
}
/**
* Get Redis configuration (for backward compatibility)
*/
getRedisConfig() {
return this.config.redis;
}
/**
* Get the current configuration
*/
getConfig(): Readonly<QueueManagerConfig> {
return { ...this.config };
}
}

View file

@ -1,318 +0,0 @@
import { Queue, QueueEvents } from 'bullmq';
// import { getLogger } from '@stock-bot/logger';
// const logger = getLogger('queue-metrics');
export interface QueueMetrics {
// Job counts
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
paused?: number;
// Performance metrics
processingTime: {
avg: number;
min: number;
max: number;
p95: number;
p99: number;
};
// Throughput
throughput: {
completedPerMinute: number;
failedPerMinute: number;
totalPerMinute: number;
};
// Job age
oldestWaitingJob: Date | null;
// Health
isHealthy: boolean;
healthIssues: string[];
}
export class QueueMetricsCollector {
private processingTimes: number[] = [];
private completedTimestamps: number[] = [];
private failedTimestamps: number[] = [];
private jobStartTimes = new Map<string, number>();
private readonly maxSamples = 1000;
private readonly metricsInterval = 60000; // 1 minute
constructor(
private queue: Queue,
private queueEvents: QueueEvents
) {
this.setupEventListeners();
}
/**
* Setup event listeners for metrics collection
*/
private setupEventListeners(): void {
this.queueEvents.on('completed', () => {
// Record completion
this.completedTimestamps.push(Date.now());
this.cleanupOldTimestamps();
});
this.queueEvents.on('failed', () => {
// Record failure
this.failedTimestamps.push(Date.now());
this.cleanupOldTimestamps();
});
// Track processing times
this.queueEvents.on('active', ({ jobId }) => {
this.jobStartTimes.set(jobId, Date.now());
});
this.queueEvents.on('completed', ({ jobId }) => {
const startTime = this.jobStartTimes.get(jobId);
if (startTime) {
const processingTime = Date.now() - startTime;
this.recordProcessingTime(processingTime);
this.jobStartTimes.delete(jobId);
}
});
}
/**
* Record processing time
*/
private recordProcessingTime(time: number): void {
this.processingTimes.push(time);
// Keep only recent samples
if (this.processingTimes.length > this.maxSamples) {
this.processingTimes = this.processingTimes.slice(-this.maxSamples);
}
}
/**
* Clean up old timestamps
*/
private cleanupOldTimestamps(): void {
const cutoff = Date.now() - this.metricsInterval;
this.completedTimestamps = this.completedTimestamps.filter(ts => ts > cutoff);
this.failedTimestamps = this.failedTimestamps.filter(ts => ts > cutoff);
}
/**
* Collect current metrics
*/
async collect(): Promise<QueueMetrics> {
// Get job counts
const [waiting, active, completed, failed, delayed] = await Promise.all([
this.queue.getWaitingCount(),
this.queue.getActiveCount(),
this.queue.getCompletedCount(),
this.queue.getFailedCount(),
this.queue.getDelayedCount(),
]);
// BullMQ doesn't have getPausedCount, check if queue is paused
const paused = (await this.queue.isPaused()) ? waiting : 0;
// Calculate processing time metrics
const processingTime = this.calculateProcessingTimeMetrics();
// Calculate throughput
const throughput = this.calculateThroughput();
// Get oldest waiting job
const oldestWaitingJob = await this.getOldestWaitingJob();
// Check health
const { isHealthy, healthIssues } = this.checkHealth({
waiting,
active,
failed,
processingTime,
});
return {
waiting,
active,
completed,
failed,
delayed,
paused,
processingTime,
throughput,
oldestWaitingJob,
isHealthy,
healthIssues,
};
}
/**
* Calculate processing time metrics
*/
private calculateProcessingTimeMetrics(): QueueMetrics['processingTime'] {
if (this.processingTimes.length === 0) {
return { avg: 0, min: 0, max: 0, p95: 0, p99: 0 };
}
const sorted = [...this.processingTimes].sort((a, b) => a - b);
const sum = sorted.reduce((acc, val) => acc + val, 0);
return {
avg: sorted.length > 0 ? Math.round(sum / sorted.length) : 0,
min: sorted[0] || 0,
max: sorted[sorted.length - 1] || 0,
p95: sorted[Math.floor(sorted.length * 0.95)] || 0,
p99: sorted[Math.floor(sorted.length * 0.99)] || 0,
};
}
/**
* Calculate throughput metrics
*/
private calculateThroughput(): QueueMetrics['throughput'] {
const now = Date.now();
const oneMinuteAgo = now - 60000;
const completedPerMinute = this.completedTimestamps.filter(ts => ts > oneMinuteAgo).length;
const failedPerMinute = this.failedTimestamps.filter(ts => ts > oneMinuteAgo).length;
return {
completedPerMinute,
failedPerMinute,
totalPerMinute: completedPerMinute + failedPerMinute,
};
}
/**
* Get oldest waiting job
*/
private async getOldestWaitingJob(): Promise<Date | null> {
const waitingJobs = await this.queue.getWaiting(0, 1);
if (waitingJobs.length > 0) {
return new Date(waitingJobs[0].timestamp);
}
return null;
}
/**
* Check queue health
*/
private checkHealth(metrics: {
waiting: number;
active: number;
failed: number;
processingTime: QueueMetrics['processingTime'];
}): { isHealthy: boolean; healthIssues: string[] } {
const issues: string[] = [];
// Check for high failure rate
const failureRate = metrics.failed / (metrics.failed + this.completedTimestamps.length);
if (failureRate > 0.1) {
issues.push(`High failure rate: ${(failureRate * 100).toFixed(1)}%`);
}
// Check for queue backlog
if (metrics.waiting > 1000) {
issues.push(`Large queue backlog: ${metrics.waiting} jobs waiting`);
}
// Check for slow processing
if (metrics.processingTime.avg > 30000) {
// 30 seconds
issues.push(
`Slow average processing time: ${(metrics.processingTime.avg / 1000).toFixed(1)}s`
);
}
// Check for stalled active jobs
if (metrics.active > 100) {
issues.push(`High number of active jobs: ${metrics.active}`);
}
return {
isHealthy: issues.length === 0,
healthIssues: issues,
};
}
/**
* Get formatted metrics report
*/
async getReport(): Promise<string> {
const metrics = await this.collect();
return `
Queue Metrics Report
===================
Status: ${metrics.isHealthy ? '✅ Healthy' : '⚠️ Issues Detected'}
Job Counts:
- Waiting: ${metrics.waiting}
- Active: ${metrics.active}
- Completed: ${metrics.completed}
- Failed: ${metrics.failed}
- Delayed: ${metrics.delayed}
- Paused: ${metrics.paused}
Performance:
- Avg Processing Time: ${(metrics.processingTime.avg / 1000).toFixed(2)}s
- Min/Max: ${(metrics.processingTime.min / 1000).toFixed(2)}s / ${(metrics.processingTime.max / 1000).toFixed(2)}s
- P95/P99: ${(metrics.processingTime.p95 / 1000).toFixed(2)}s / ${(metrics.processingTime.p99 / 1000).toFixed(2)}s
Throughput:
- Completed/min: ${metrics.throughput.completedPerMinute}
- Failed/min: ${metrics.throughput.failedPerMinute}
- Total/min: ${metrics.throughput.totalPerMinute}
${metrics.oldestWaitingJob ? `Oldest Waiting Job: ${metrics.oldestWaitingJob.toISOString()}` : 'No waiting jobs'}
${metrics.healthIssues.length > 0 ? `\nHealth Issues:\n${metrics.healthIssues.map(issue => `- ${issue}`).join('\n')}` : ''}
`.trim();
}
/**
* Export metrics in Prometheus format
*/
async getPrometheusMetrics(): Promise<string> {
const metrics = await this.collect();
const queueName = this.queue.name;
return `
# HELP queue_jobs_total Total number of jobs by status
# TYPE queue_jobs_total gauge
queue_jobs_total{queue="${queueName}",status="waiting"} ${metrics.waiting}
queue_jobs_total{queue="${queueName}",status="active"} ${metrics.active}
queue_jobs_total{queue="${queueName}",status="completed"} ${metrics.completed}
queue_jobs_total{queue="${queueName}",status="failed"} ${metrics.failed}
queue_jobs_total{queue="${queueName}",status="delayed"} ${metrics.delayed}
queue_jobs_total{queue="${queueName}",status="paused"} ${metrics.paused}
# HELP queue_processing_time_seconds Job processing time in seconds
# TYPE queue_processing_time_seconds summary
queue_processing_time_seconds{queue="${queueName}",quantile="0.5"} ${(metrics.processingTime.avg / 1000).toFixed(3)}
queue_processing_time_seconds{queue="${queueName}",quantile="0.95"} ${(metrics.processingTime.p95 / 1000).toFixed(3)}
queue_processing_time_seconds{queue="${queueName}",quantile="0.99"} ${(metrics.processingTime.p99 / 1000).toFixed(3)}
queue_processing_time_seconds_sum{queue="${queueName}"} ${((metrics.processingTime.avg * this.processingTimes.length) / 1000).toFixed(3)}
queue_processing_time_seconds_count{queue="${queueName}"} ${this.processingTimes.length}
# HELP queue_throughput_per_minute Jobs processed per minute
# TYPE queue_throughput_per_minute gauge
queue_throughput_per_minute{queue="${queueName}",status="completed"} ${metrics.throughput.completedPerMinute}
queue_throughput_per_minute{queue="${queueName}",status="failed"} ${metrics.throughput.failedPerMinute}
queue_throughput_per_minute{queue="${queueName}",status="total"} ${metrics.throughput.totalPerMinute}
# HELP queue_health Queue health status
# TYPE queue_health gauge
queue_health{queue="${queueName}"} ${metrics.isHealthy ? 1 : 0}
`.trim();
}
}

View file

@ -1,394 +0,0 @@
import { Queue as BullQueue, QueueEvents, Worker, type Job } from 'bullmq';
import { handlerRegistry } from '@stock-bot/handlers';
import type { JobData, JobOptions, ExtendedJobOptions, QueueStats, RedisConfig } from './types';
import { getRedisConnection } from './utils';
// Logger interface for type safety
interface Logger {
info(message: string, meta?: Record<string, unknown>): void;
error(message: string, meta?: Record<string, unknown>): void;
warn(message: string, meta?: Record<string, unknown>): void;
debug(message: string, meta?: Record<string, unknown>): void;
trace(message: string, meta?: Record<string, unknown>): void;
child?(name: string, context?: Record<string, unknown>): Logger;
}
export interface QueueWorkerConfig {
workers?: number;
concurrency?: number;
startWorker?: boolean;
}
/**
* Consolidated Queue class that handles both job operations and optional worker management
* Can be used as a simple job queue or with workers for automatic processing
*/
export class Queue {
private bullQueue: BullQueue;
private workers: Worker[] = [];
private queueEvents?: QueueEvents;
private queueName: string;
private redisConfig: RedisConfig;
private readonly logger: Logger;
constructor(
queueName: string,
redisConfig: RedisConfig,
defaultJobOptions: JobOptions = {},
config: QueueWorkerConfig = {},
logger?: Logger
) {
this.queueName = queueName;
this.redisConfig = redisConfig;
this.logger = logger || console;
const connection = getRedisConnection(redisConfig);
// Initialize BullMQ queue
this.bullQueue = new BullQueue(`{${queueName}}`, {
connection,
defaultJobOptions: {
removeOnComplete: 10,
removeOnFail: 5,
attempts: 3,
backoff: {
type: 'exponential',
delay: 1000,
},
...defaultJobOptions,
},
});
// Initialize queue events if workers will be used
if (config.workers && config.workers > 0) {
this.queueEvents = new QueueEvents(`{${queueName}}`, { connection });
}
// Start workers if requested and not explicitly disabled
if (config.workers && config.workers > 0 && config.startWorker !== false) {
this.startWorkers(config.workers, config.concurrency || 1);
}
this.logger.trace('Queue created', {
queueName,
workers: config.workers || 0,
concurrency: config.concurrency || 1,
});
}
/**
* Get the queue name
*/
getName(): string {
return this.queueName;
}
/**
* Get the underlying BullMQ queue instance (for monitoring/admin purposes)
*/
getBullQueue(): BullQueue {
return this.bullQueue;
}
/**
* Add a single job to the queue
*/
async add(name: string, data: JobData, options: JobOptions = {}): Promise<Job> {
this.logger.trace('Adding job', { queueName: this.queueName, jobName: name });
return await this.bullQueue.add(name, data, options);
}
/**
* Add multiple jobs to the queue in bulk
*/
async addBulk(jobs: Array<{ name: string; data: JobData; opts?: JobOptions }>): Promise<Job[]> {
this.logger.trace('Adding bulk jobs', {
queueName: this.queueName,
jobCount: jobs.length,
});
return await this.bullQueue.addBulk(jobs);
}
/**
* Add a scheduled job with cron-like pattern
*/
async addScheduledJob(
name: string,
data: JobData,
cronPattern: string,
options: ExtendedJobOptions = {}
): Promise<Job> {
const scheduledOptions: ExtendedJobOptions = {
...options,
repeat: {
pattern: cronPattern,
// Use job name as repeat key to prevent duplicates
key: `${this.queueName}:${name}`,
...options.repeat,
},
};
this.logger.info('Adding scheduled job', {
queueName: this.queueName,
jobName: name,
cronPattern,
repeatKey: scheduledOptions.repeat?.key,
immediately: scheduledOptions.repeat?.immediately,
});
return await this.bullQueue.add(name, data, scheduledOptions);
}
/**
* Get queue statistics
*/
async getStats(): Promise<QueueStats> {
const [waiting, active, completed, failed, delayed] = await Promise.all([
this.bullQueue.getWaiting(),
this.bullQueue.getActive(),
this.bullQueue.getCompleted(),
this.bullQueue.getFailed(),
this.bullQueue.getDelayed(),
]);
const isPaused = await this.bullQueue.isPaused();
return {
waiting: waiting.length,
active: active.length,
completed: completed.length,
failed: failed.length,
delayed: delayed.length,
paused: isPaused,
workers: this.workers.length,
};
}
/**
* Get a specific job by ID
*/
async getJob(jobId: string): Promise<Job | undefined> {
return await this.bullQueue.getJob(jobId);
}
/**
* Get jobs by state
*/
async getJobs(
states: Array<'waiting' | 'active' | 'completed' | 'failed' | 'delayed'>,
start = 0,
end = 100
): Promise<Job[]> {
return await this.bullQueue.getJobs(states, start, end);
}
/**
* Pause the queue (stops processing new jobs)
*/
async pause(): Promise<void> {
await this.bullQueue.pause();
this.logger.info('Queue paused', { queueName: this.queueName });
}
/**
* Resume the queue
*/
async resume(): Promise<void> {
await this.bullQueue.resume();
this.logger.info('Queue resumed', { queueName: this.queueName });
}
/**
* Drain the queue (remove all jobs)
*/
async drain(delayed = false): Promise<void> {
await this.bullQueue.drain(delayed);
this.logger.info('Queue drained', { queueName: this.queueName, delayed });
}
/**
* Clean completed and failed jobs
*/
async clean(
grace: number = 0,
limit: number = 100,
type: 'completed' | 'failed' = 'completed'
): Promise<void> {
await this.bullQueue.clean(grace, limit, type);
this.logger.debug('Queue cleaned', { queueName: this.queueName, type, grace, limit });
}
/**
* Wait until the queue is ready
*/
async waitUntilReady(): Promise<void> {
await this.bullQueue.waitUntilReady();
}
/**
* Close the queue (cleanup resources)
*/
/**
* Close the queue (cleanup resources)
*/
async close(): Promise<void> {
try {
// Close the queue itself
await this.bullQueue.close();
this.logger.info('Queue closed', { queueName: this.queueName });
// Close queue events
if (this.queueEvents) {
await this.queueEvents.close();
this.logger.debug('Queue events closed', { queueName: this.queueName });
}
// Close workers first
if (this.workers.length > 0) {
await Promise.all(
this.workers.map(async worker => {
return await worker.close();
})
);
this.workers = [];
this.logger.debug('Workers closed', { queueName: this.queueName });
}
} catch (error) {
this.logger.error('Error closing queue', { queueName: this.queueName, error });
throw error;
}
}
/**
* Create a child logger with additional context
* Useful for batch processing and other queue operations
*/
createChildLogger(name: string, context?: Record<string, unknown>) {
if (this.logger && typeof this.logger.child === 'function') {
return this.logger.child(name, context);
}
// Fallback to main logger if child not supported (e.g., console)
return this.logger;
}
/**
* Start workers for this queue
*/
private startWorkers(workerCount: number, concurrency: number): void {
const connection = getRedisConnection(this.redisConfig);
for (let i = 0; i < workerCount; i++) {
const worker = new Worker(`{${this.queueName}}`, this.processJob.bind(this), {
connection,
concurrency,
maxStalledCount: 3,
stalledInterval: 30000,
});
// Setup worker event handlers
worker.on('completed', job => {
this.logger.trace('Job completed', {
queueName: this.queueName,
jobId: job.id,
handler: job.data?.handler,
operation: job.data?.operation,
});
});
worker.on('failed', (job, err) => {
this.logger.error('Job failed', {
queueName: this.queueName,
jobId: job?.id,
handler: job?.data?.handler,
operation: job?.data?.operation,
error: err.message,
});
});
worker.on('error', error => {
this.logger.error('Worker error', {
queueName: this.queueName,
workerId: i,
error: error.message,
});
});
this.workers.push(worker);
}
this.logger.info('Workers started', {
queueName: this.queueName,
workerCount,
concurrency,
});
}
/**
* Process a job using the handler registry
*/
private async processJob(job: Job): Promise<unknown> {
const { handler, operation, payload }: JobData = job.data;
this.logger.trace('Processing job', {
id: job.id,
handler,
operation,
queueName: this.queueName,
});
try {
// Look up handler in registry
const jobHandler = handlerRegistry.getOperation(handler, operation);
if (!jobHandler) {
throw new Error(`No handler found for ${handler}:${operation}`);
}
const result = await jobHandler(payload);
this.logger.trace('Job completed successfully', {
id: job.id,
handler,
operation,
queueName: this.queueName,
});
return result;
} catch (error) {
this.logger.error('Job processing failed', {
id: job.id,
handler,
operation,
queueName: this.queueName,
error: error instanceof Error ? error.message : String(error),
});
throw error;
}
}
/**
* Start workers manually (for delayed initialization)
*/
startWorkersManually(workerCount: number, concurrency: number = 1): void {
if (this.workers.length > 0) {
this.logger.warn('Workers already started for queue', { queueName: this.queueName });
return;
}
// Initialize queue events if not already done
if (!this.queueEvents) {
const connection = getRedisConnection(this.redisConfig);
this.queueEvents = new QueueEvents(`{${this.queueName}}`, { connection });
}
this.startWorkers(workerCount, concurrency);
}
/**
* Get the number of active workers
*/
getWorkerCount(): number {
return this.workers.length;
}
}

View file

@ -1,338 +0,0 @@
import { RateLimiterRedis, RateLimiterRes } from 'rate-limiter-flexible';
import type { RateLimitConfig as BaseRateLimitConfig, RateLimitRule } from './types';
// Logger interface for type safety
interface Logger {
info(message: string, meta?: Record<string, unknown>): void;
error(message: string, meta?: Record<string, unknown>): void;
warn(message: string, meta?: Record<string, unknown>): void;
debug(message: string, meta?: Record<string, unknown>): void;
}
// Extend the base config to add rate-limiter specific fields
export interface RateLimitConfig extends BaseRateLimitConfig {
keyPrefix?: string;
}
export class QueueRateLimiter {
private limiters = new Map<string, RateLimiterRedis>();
private rules: RateLimitRule[] = [];
private readonly logger: Logger;
constructor(
private redisClient: ReturnType<typeof import('./utils').getRedisConnection>,
logger?: Logger
) {
this.logger = logger || console;
}
/**
* Add a rate limit rule
*/
addRule(rule: RateLimitRule): void {
this.rules.push(rule);
const key = this.getRuleKey(rule.level, rule.queueName, rule.handler, rule.operation);
const limiter = new RateLimiterRedis({
storeClient: this.redisClient,
keyPrefix: `rl:${key}`,
points: rule.config.points,
duration: rule.config.duration,
blockDuration: rule.config.blockDuration || 0,
});
this.limiters.set(key, limiter);
this.logger.info('Rate limit rule added', {
level: rule.level,
queueName: rule.queueName,
handler: rule.handler,
operation: rule.operation,
points: rule.config.points,
duration: rule.config.duration,
});
}
/**
* Check if a job can be processed based on rate limits
* Uses hierarchical precedence: operation > handler > queue > global
* The most specific matching rule takes precedence
*/
async checkLimit(
queueName: string,
handler: string,
operation: string
): Promise<{
allowed: boolean;
retryAfter?: number;
remainingPoints?: number;
appliedRule?: RateLimitRule;
}> {
const applicableRule = this.getMostSpecificRule(queueName, handler, operation);
if (!applicableRule) {
return { allowed: true };
}
const key = this.getRuleKey(
applicableRule.level,
applicableRule.queueName,
applicableRule.handler,
applicableRule.operation
);
const limiter = this.limiters.get(key);
if (!limiter) {
this.logger.warn('Rate limiter not found for rule', { key, rule: applicableRule });
return { allowed: true };
}
try {
const result = await this.consumePoint(
limiter,
this.getConsumerKey(queueName, handler, operation)
);
return {
...result,
appliedRule: applicableRule,
};
} catch (error) {
this.logger.error('Rate limit check failed', { queueName, handler, operation, error });
// On error, allow the request to proceed
return { allowed: true };
}
}
/**
* Get the most specific rule that applies to this job
* Precedence: operation > handler > queue > global
*/
private getMostSpecificRule(
queueName: string,
handler: string,
operation: string
): RateLimitRule | undefined {
// 1. Check for operation-specific rule (most specific)
let rule = this.rules.find(
r =>
r.level === 'operation' &&
r.queueName === queueName &&
r.handler === handler &&
r.operation === operation
);
if (rule) {
return rule;
}
// 2. Check for handler-specific rule
rule = this.rules.find(
r => r.level === 'handler' && r.queueName === queueName && r.handler === handler
);
if (rule) {
return rule;
}
// 3. Check for queue-specific rule
rule = this.rules.find(r => r.level === 'queue' && r.queueName === queueName);
if (rule) {
return rule;
}
// 4. Check for global rule (least specific)
rule = this.rules.find(r => r.level === 'global');
return rule;
}
/**
* Consume a point from the rate limiter
*/
private async consumePoint(
limiter: RateLimiterRedis,
key: string
): Promise<{ allowed: boolean; retryAfter?: number; remainingPoints?: number }> {
try {
const result = await limiter.consume(key);
return {
allowed: true,
remainingPoints: result.remainingPoints,
};
} catch (rejRes) {
if (rejRes instanceof RateLimiterRes) {
this.logger.warn('Rate limit exceeded', {
key,
retryAfter: rejRes.msBeforeNext,
});
return {
allowed: false,
retryAfter: rejRes.msBeforeNext,
remainingPoints: rejRes.remainingPoints,
};
}
throw rejRes;
}
}
/**
* Get rule key for storing rate limiter
*/
private getRuleKey(
level: string,
queueName?: string,
handler?: string,
operation?: string
): string {
switch (level) {
case 'global':
return 'global';
case 'queue':
return `queue:${queueName}`;
case 'handler':
return `handler:${queueName}:${handler}`;
case 'operation':
return `operation:${queueName}:${handler}:${operation}`;
default:
return level;
}
}
/**
* Get consumer key for rate limiting (what gets counted)
*/
private getConsumerKey(queueName: string, handler: string, operation: string): string {
return `${queueName}:${handler}:${operation}`;
}
/**
* Get current rate limit status for a queue/handler/operation
*/
async getStatus(
queueName: string,
handler: string,
operation: string
): Promise<{
queueName: string;
handler: string;
operation: string;
appliedRule?: RateLimitRule;
limit?: {
level: string;
points: number;
duration: number;
remaining: number;
resetIn: number;
};
}> {
const applicableRule = this.getMostSpecificRule(queueName, handler, operation);
if (!applicableRule) {
return {
queueName,
handler,
operation,
};
}
const key = this.getRuleKey(
applicableRule.level,
applicableRule.queueName,
applicableRule.handler,
applicableRule.operation
);
const limiter = this.limiters.get(key);
if (!limiter) {
return {
queueName,
handler,
operation,
appliedRule: applicableRule,
};
}
try {
const consumerKey = this.getConsumerKey(queueName, handler, operation);
const result = await limiter.get(consumerKey);
const limit = {
level: applicableRule.level,
points: limiter.points,
duration: limiter.duration,
remaining: result?.remainingPoints ?? limiter.points,
resetIn: result?.msBeforeNext ?? 0,
};
return {
queueName,
handler,
operation,
appliedRule: applicableRule,
limit,
};
} catch (error) {
this.logger.error('Failed to get rate limit status', { queueName, handler, operation, error });
return {
queueName,
handler,
operation,
appliedRule: applicableRule,
};
}
}
/**
* Reset rate limits for a specific consumer
*/
async reset(queueName: string, handler?: string, operation?: string): Promise<void> {
if (handler && operation) {
// Reset specific operation
const consumerKey = this.getConsumerKey(queueName, handler, operation);
const rule = this.getMostSpecificRule(queueName, handler, operation);
if (rule) {
const key = this.getRuleKey(rule.level, rule.queueName, rule.handler, rule.operation);
const limiter = this.limiters.get(key);
if (limiter) {
await limiter.delete(consumerKey);
}
}
} else {
// Reset broader scope - this is more complex with the new hierarchy
this.logger.warn('Broad reset not implemented yet', { queueName, handler, operation });
}
this.logger.info('Rate limits reset', { queueName, handler, operation });
}
/**
* Get all configured rate limit rules
*/
getRules(): RateLimitRule[] {
return [...this.rules];
}
/**
* Remove a rate limit rule
*/
removeRule(level: string, queueName?: string, handler?: string, operation?: string): boolean {
const key = this.getRuleKey(level, queueName, handler, operation);
const ruleIndex = this.rules.findIndex(
r =>
r.level === level &&
(!queueName || r.queueName === queueName) &&
(!handler || r.handler === handler) &&
(!operation || r.operation === operation)
);
if (ruleIndex >= 0) {
this.rules.splice(ruleIndex, 1);
this.limiters.delete(key);
this.logger.info('Rate limit rule removed', { level, queueName, handler, operation });
return true;
}
return false;
}
}

View file

@ -1,169 +0,0 @@
import { createCache, type CacheProvider, type CacheStats } from '@stock-bot/cache';
import type { RedisConfig } from './types';
import { getServiceConfig } from './service-registry';
/**
* Service-aware cache that uses the service's Redis DB
* Automatically prefixes keys with the service's cache namespace
*/
export class ServiceCache implements CacheProvider {
private cache: CacheProvider;
private prefix: string;
constructor(
serviceName: string,
redisConfig: RedisConfig,
isGlobalCache: boolean = false,
logger?: any
) {
// Get service configuration
const serviceConfig = getServiceConfig(serviceName);
if (!serviceConfig && !isGlobalCache) {
throw new Error(`Unknown service: ${serviceName}`);
}
// Determine Redis DB and prefix
let db: number;
let prefix: string;
if (isGlobalCache) {
// Global cache uses db:0
db = 0;
prefix = 'stock-bot:shared';
} else {
// Service cache uses service's DB
db = serviceConfig!.db;
prefix = serviceConfig!.cachePrefix;
}
// Create underlying cache with correct DB
const cacheConfig = {
redisConfig: {
...redisConfig,
db,
},
keyPrefix: prefix + ':',
logger,
};
this.cache = createCache(cacheConfig);
this.prefix = prefix;
}
// Implement CacheProvider interface
async get<T = any>(key: string): Promise<T | null> {
return this.cache.get<T>(key);
}
async set<T = any>(
key: string,
value: T,
options?:
| number
| {
ttl?: number;
preserveTTL?: boolean;
onlyIfExists?: boolean;
onlyIfNotExists?: boolean;
getOldValue?: boolean;
}
): Promise<T | null> {
return this.cache.set(key, value, options);
}
async del(key: string): Promise<void> {
return this.cache.del(key);
}
async exists(key: string): Promise<boolean> {
return this.cache.exists(key);
}
async clear(): Promise<void> {
return this.cache.clear();
}
async keys(pattern: string): Promise<string[]> {
return this.cache.keys(pattern);
}
getStats(): CacheStats {
return this.cache.getStats();
}
async health(): Promise<boolean> {
return this.cache.health();
}
async waitForReady(timeout?: number): Promise<void> {
return this.cache.waitForReady(timeout);
}
isReady(): boolean {
return this.cache.isReady();
}
// Enhanced cache methods (delegate to underlying cache if available)
async update<T = any>(key: string, value: T): Promise<T | null> {
if (this.cache.update) {
return this.cache.update(key, value);
}
// Fallback implementation
return this.cache.set(key, value, { preserveTTL: true });
}
async setIfExists<T = any>(key: string, value: T, ttl?: number): Promise<boolean> {
if (this.cache.setIfExists) {
return this.cache.setIfExists(key, value, ttl);
}
// Fallback implementation
const result = await this.cache.set(key, value, { onlyIfExists: true, ttl });
return result !== null;
}
async setIfNotExists<T = any>(key: string, value: T, ttl?: number): Promise<boolean> {
if (this.cache.setIfNotExists) {
return this.cache.setIfNotExists(key, value, ttl);
}
// Fallback implementation
const result = await this.cache.set(key, value, { onlyIfNotExists: true, ttl });
return result !== null;
}
async replace<T = any>(key: string, value: T, ttl?: number): Promise<T | null> {
if (this.cache.replace) {
return this.cache.replace(key, value, ttl);
}
// Fallback implementation
return this.cache.set(key, value, ttl);
}
async updateField<T = any>(key: string, updater: (current: T | null) => T, ttl?: number): Promise<T | null> {
if (this.cache.updateField) {
return this.cache.updateField(key, updater, ttl);
}
// Fallback implementation
const current = await this.cache.get<T>(key);
const updated = updater(current);
return this.cache.set(key, updated, ttl);
}
/**
* Get the actual Redis key with prefix
*/
getKey(key: string): string {
return `${this.prefix}:${key}`;
}
}
/**
* Factory function to create service cache
*/
export function createServiceCache(
serviceName: string,
redisConfig: RedisConfig,
options: { global?: boolean; logger?: any } = {}
): ServiceCache {
return new ServiceCache(serviceName, redisConfig, options.global, options.logger);
}

View file

@ -1,115 +0,0 @@
/**
* Service Registry Configuration
* Maps services to their Redis databases and configurations
*/
export interface ServiceConfig {
/** Redis database number for this service (used for both queues and cache) */
db: number;
/** Prefix for queue keys (e.g., 'bull:di') */
queuePrefix: string;
/** Prefix for cache keys (e.g., 'cache:di') */
cachePrefix: string;
/** Whether this service only produces jobs (doesn't process them) */
producerOnly?: boolean;
/** List of handlers this service owns (auto-discovered if not provided) */
handlers?: string[];
}
/**
* Central registry of all services and their configurations
* Each service gets one Redis DB for both queues and cache
*
* Database assignments:
* - db:0 = Global shared cache
* - db:1 = data-ingestion (queues + cache)
* - db:2 = data-pipeline (queues + cache)
* - db:3 = web-api (cache only, producer-only for queues)
*/
export const SERVICE_REGISTRY: Record<string, ServiceConfig> = {
'data-ingestion': {
db: 1,
queuePrefix: 'bull:di',
cachePrefix: 'cache:di',
handlers: ['ceo', 'qm', 'webshare', 'ib', 'proxy'],
},
'data-pipeline': {
db: 2,
queuePrefix: 'bull:dp',
cachePrefix: 'cache:dp',
handlers: ['exchanges', 'symbols'],
},
'web-api': {
db: 3,
queuePrefix: 'bull:api', // Not used since producer-only
cachePrefix: 'cache:api',
producerOnly: true,
},
// Add aliases for services with different naming conventions
'webApi': {
db: 3,
queuePrefix: 'bull:api',
cachePrefix: 'cache:api',
producerOnly: true,
},
'dataIngestion': {
db: 1,
queuePrefix: 'bull:di',
cachePrefix: 'cache:di',
handlers: ['ceo', 'qm', 'webshare', 'ib', 'proxy'],
},
'dataPipeline': {
db: 2,
queuePrefix: 'bull:dp',
cachePrefix: 'cache:dp',
handlers: ['exchanges', 'symbols'],
},
};
/**
* Get service configuration
*/
export function getServiceConfig(serviceName: string): ServiceConfig | undefined {
return SERVICE_REGISTRY[serviceName];
}
/**
* Find which service owns a handler
*/
export function findServiceForHandler(handlerName: string): string | undefined {
for (const [serviceName, config] of Object.entries(SERVICE_REGISTRY)) {
if (config.handlers?.includes(handlerName)) {
return serviceName;
}
}
return undefined;
}
/**
* Get full queue name - just the handler name since each service has its own Redis DB
*/
export function getFullQueueName(serviceName: string, handlerName: string): string {
// Just return the handler name since DB isolation provides namespace separation
return handlerName;
}
/**
* Parse a full queue name into service and handler
* Since queue names are just handler names now, we need to find the service from the handler
*/
export function parseQueueName(fullQueueName: string): { service: string; handler: string } | null {
// Queue name is just the handler name now
const handlerName = fullQueueName;
// Find which service owns this handler
const serviceName = findServiceForHandler(handlerName);
if (!serviceName) {
return null;
}
return {
service: serviceName,
handler: handlerName,
};
}

View file

@ -1,349 +0,0 @@
import { Queue as BullQueue, type Job } from 'bullmq';
import { handlerRegistry } from '@stock-bot/handlers';
import { getLogger, type Logger } from '@stock-bot/logger';
import { QueueManager } from './queue-manager';
import { Queue } from './queue';
import type {
SmartQueueConfig,
QueueRoute,
JobData,
JobOptions,
RedisConfig
} from './types';
import {
SERVICE_REGISTRY,
getServiceConfig,
findServiceForHandler,
getFullQueueName,
parseQueueName,
type ServiceConfig
} from './service-registry';
import { getRedisConnection } from './utils';
/**
* Smart Queue Manager with automatic service discovery and routing
* Handles cross-service communication seamlessly
*/
export class SmartQueueManager extends QueueManager {
private serviceName: string;
private serviceConfig: ServiceConfig;
private queueRoutes = new Map<string, QueueRoute>();
private connections = new Map<number, any>(); // Redis connections by DB
private producerQueues = new Map<string, BullQueue>(); // For cross-service sending
private _logger: Logger;
constructor(config: SmartQueueConfig, logger?: Logger) {
// Get service config
const serviceConfig = getServiceConfig(config.serviceName);
if (!serviceConfig) {
throw new Error(`Unknown service: ${config.serviceName}`);
}
// Update Redis config to use service's DB
const modifiedConfig = {
...config,
redis: {
...config.redis,
db: serviceConfig.db,
},
};
super(modifiedConfig, logger);
this.serviceName = config.serviceName;
this.serviceConfig = serviceConfig;
this._logger = logger || getLogger('SmartQueueManager');
// Auto-discover routes if enabled
if (config.autoDiscoverHandlers !== false) {
this.discoverQueueRoutes();
}
this._logger.info('SmartQueueManager initialized', {
service: this.serviceName,
db: serviceConfig.db,
handlers: serviceConfig.handlers,
producerOnly: serviceConfig.producerOnly,
});
}
/**
* Discover all available queue routes from handler registry
*/
private discoverQueueRoutes(): void {
// Discover from handler registry if available
try {
const handlers = handlerRegistry.getAllHandlers();
for (const [handlerName, handlerConfig] of handlers) {
// Find which service owns this handler
const ownerService = findServiceForHandler(handlerName);
if (ownerService) {
const ownerConfig = getServiceConfig(ownerService)!;
const fullName = getFullQueueName(ownerService, handlerName);
this.queueRoutes.set(handlerName, {
fullName,
service: ownerService,
handler: handlerName,
db: ownerConfig.db,
operations: Object.keys(handlerConfig.operations || {}),
});
this._logger.trace('Discovered queue route', {
handler: handlerName,
service: ownerService,
db: ownerConfig.db,
});
}
}
} catch (error) {
this._logger.warn('Handler registry not available, using static configuration', { error });
}
// Also add routes from static configuration
Object.entries(SERVICE_REGISTRY).forEach(([serviceName, config]) => {
if (config.handlers) {
config.handlers.forEach(handlerName => {
if (!this.queueRoutes.has(handlerName)) {
const fullName = getFullQueueName(serviceName, handlerName);
this.queueRoutes.set(handlerName, {
fullName,
service: serviceName,
handler: handlerName,
db: config.db,
});
}
});
}
});
}
/**
* Get or create a Redis connection for a specific DB
*/
private getConnection(db: number): any {
if (!this.connections.has(db)) {
const redisConfig: RedisConfig = {
...this.getRedisConfig(),
db,
};
const connection = getRedisConnection(redisConfig);
this.connections.set(db, connection);
this._logger.debug('Created Redis connection', { db });
}
return this.connections.get(db);
}
/**
* Get a queue for the current service (for processing)
* Overrides parent to use namespaced queue names
*/
override getQueue(queueName: string, options = {}): Queue {
// For local queues, use the service namespace
const fullQueueName = getFullQueueName(this.serviceName, queueName);
return super.getQueue(fullQueueName, options);
}
/**
* Send a job to any queue (local or remote)
* This is the main method for cross-service communication
*/
async send(
targetQueue: string,
operation: string,
payload: unknown,
options: JobOptions = {}
): Promise<Job> {
// Resolve the target queue
const route = this.resolveQueueRoute(targetQueue);
if (!route) {
throw new Error(`Unknown queue: ${targetQueue}`);
}
// Validate operation if we have metadata
if (route.operations && !route.operations.includes(operation)) {
this._logger.warn('Operation not found in handler metadata', {
queue: targetQueue,
operation,
available: route.operations,
});
}
// Get or create producer queue for the target
const producerQueue = this.getProducerQueue(route);
// Create job data
const jobData: JobData = {
handler: route.handler,
operation,
payload,
};
// Send the job
const job = await producerQueue.add(operation, jobData, options);
this._logger.debug('Job sent to queue', {
from: this.serviceName,
to: route.service,
queue: route.handler,
operation,
jobId: job.id,
});
return job;
}
/**
* Alias for send() with more explicit name
*/
async sendTo(
targetService: string,
handler: string,
operation: string,
payload: unknown,
options: JobOptions = {}
): Promise<Job> {
const fullQueueName = `${targetService}:${handler}`;
return this.send(fullQueueName, operation, payload, options);
}
/**
* Resolve a queue name to a route
*/
private resolveQueueRoute(queueName: string): QueueRoute | null {
// Check if it's a handler name (which is now the full queue name)
const parsed = parseQueueName(queueName);
if (parsed) {
const config = getServiceConfig(parsed.service);
if (config) {
return {
fullName: queueName,
service: parsed.service,
handler: parsed.handler,
db: config.db,
};
}
}
// Check if it's just a handler name
const route = this.queueRoutes.get(queueName);
if (route) {
return route;
}
// Try to find in static config
const ownerService = findServiceForHandler(queueName);
if (ownerService) {
const config = getServiceConfig(ownerService)!;
return {
fullName: getFullQueueName(ownerService, queueName),
service: ownerService,
handler: queueName,
db: config.db,
};
}
return null;
}
/**
* Get or create a producer queue for cross-service communication
*/
private getProducerQueue(route: QueueRoute): BullQueue {
if (!this.producerQueues.has(route.fullName)) {
const connection = this.getConnection(route.db);
// Match the queue name format used by workers: {queueName}
const queue = new BullQueue(`{${route.fullName}}`, {
connection,
defaultJobOptions: this.getConfig().defaultQueueOptions?.defaultJobOptions || {},
});
this.producerQueues.set(route.fullName, queue);
}
return this.producerQueues.get(route.fullName)!;
}
/**
* Get all queues (for monitoring purposes)
*/
getAllQueues(): Record<string, BullQueue> {
const allQueues: Record<string, BullQueue> = {};
// Get all worker queues using public API
const workerQueueNames = this.getQueueNames();
for (const name of workerQueueNames) {
const queue = this.getQueue(name);
if (queue && typeof queue.getBullQueue === 'function') {
// Extract the underlying BullMQ queue using the public getter
// Use the simple handler name without service prefix for display
const parts = name.split(':');
const simpleName = parts.length > 1 ? parts[1] : name;
if (simpleName) {
allQueues[simpleName] = queue.getBullQueue();
}
}
}
// Add producer queues
for (const [name, queue] of this.producerQueues) {
// Use the simple handler name without service prefix for display
const parts = name.split(':');
const simpleName = parts.length > 1 ? parts[1] : name;
if (simpleName && !allQueues[simpleName]) {
allQueues[simpleName] = queue;
}
}
// If no queues found, return all registered handlers as BullMQ queues
if (Object.keys(allQueues).length === 0) {
// Create BullMQ queue instances for known handlers
const handlers = ['proxy', 'qm', 'ib', 'ceo', 'webshare', 'exchanges', 'symbols'];
for (const handler of handlers) {
const connection = this.getConnection(1); // Use default DB
allQueues[handler] = new BullQueue(`{${handler}}`, {
connection,
defaultJobOptions: this.getConfig().defaultQueueOptions?.defaultJobOptions || {},
});
}
}
return allQueues;
}
/**
* Get statistics for all queues across all services
*/
async getAllStats(): Promise<Record<string, any>> {
const stats: Record<string, any> = {};
// Get stats for local queues
stats[this.serviceName] = await this.getGlobalStats();
// Get stats for other services if we have access
// This would require additional implementation
return stats;
}
/**
* Graceful shutdown
*/
override async shutdown(): Promise<void> {
// Close producer queues
for (const [name, queue] of this.producerQueues) {
await queue.close();
this._logger.debug('Closed producer queue', { queue: name });
}
// Close additional connections
for (const [db, connection] of this.connections) {
if (db !== this.serviceConfig.db) { // Don't close our main connection
connection.disconnect();
this._logger.debug('Closed Redis connection', { db });
}
}
// Call parent shutdown
await super.shutdown();
}
}

View file

@ -1,169 +0,0 @@
// Import types we need to extend
import type { JobOptions, QueueStats } from '@stock-bot/types';
// Re-export handler and queue types from shared types package
export type {
HandlerConfig,
HandlerConfigWithSchedule,
JobHandler,
ScheduledJob,
TypedJobHandler,
JobData,
JobOptions,
QueueWorkerConfig,
QueueStats
} from '@stock-bot/types';
export interface ProcessOptions {
totalDelayHours: number;
batchSize?: number;
priority?: number;
useBatching?: boolean;
retries?: number;
ttl?: number;
removeOnComplete?: number;
removeOnFail?: number;
// Job routing information
handler?: string;
operation?: string;
}
export interface BatchResult {
jobsCreated: number;
mode: 'direct' | 'batch';
totalItems: number;
batchesCreated?: number;
duration: number;
}
// New improved types for the refactored architecture
export interface RedisConfig {
host: string;
port: number;
password?: string;
db?: number;
}
// Extended job options specific to this queue implementation
export interface ExtendedJobOptions extends JobOptions {
repeat?: {
pattern?: string;
key?: string;
limit?: number;
every?: number;
immediately?: boolean;
};
}
export interface QueueOptions {
defaultJobOptions?: ExtendedJobOptions;
workers?: number;
concurrency?: number;
enableMetrics?: boolean;
enableDLQ?: boolean;
enableRateLimit?: boolean;
rateLimitRules?: RateLimitRule[]; // Queue-specific rate limit rules
}
export interface QueueManagerConfig {
redis: RedisConfig;
defaultQueueOptions?: QueueOptions;
enableScheduledJobs?: boolean;
globalRateLimit?: RateLimitConfig;
rateLimitRules?: RateLimitRule[]; // Global rate limit rules
delayWorkerStart?: boolean; // If true, workers won't start automatically
}
// Queue-specific stats that extend the base types
export interface GlobalStats {
queues: Record<string, QueueStats>;
totalJobs: number;
totalWorkers: number;
uptime: number;
}
// Legacy type for backward compatibility
export interface QueueConfig extends QueueManagerConfig {
queueName?: string;
workers?: number;
concurrency?: number;
handlers?: HandlerInitializer[];
dlqConfig?: DLQConfig;
enableMetrics?: boolean;
}
// Extended batch job data for queue implementation
export interface BatchJobData {
payloadKey: string;
batchIndex: number;
totalBatches: number;
itemCount: number;
totalDelayHours: number; // Total time to distribute all batches
}
export interface HandlerInitializer {
(): void | Promise<void>;
}
// Rate limiting types
export interface RateLimitConfig {
points: number;
duration: number;
blockDuration?: number;
}
export interface RateLimitRule {
level: 'global' | 'queue' | 'handler' | 'operation';
queueName?: string; // For queue-level limits
handler?: string; // For handler-level limits
operation?: string; // For operation-level limits (most specific)
config: RateLimitConfig;
}
// DLQ types
export interface DLQConfig {
maxRetries?: number;
retryDelay?: number;
alertThreshold?: number;
cleanupAge?: number;
}
export interface DLQJobInfo {
id: string;
name: string;
failedReason: string;
attemptsMade: number;
timestamp: number;
data: unknown;
}
export interface ScheduleConfig {
pattern: string;
jobName: string;
data?: unknown;
options?: ExtendedJobOptions;
}
// Smart Queue Types
export interface SmartQueueConfig extends QueueManagerConfig {
/** Name of the current service */
serviceName: string;
/** Whether to auto-discover handlers from registry */
autoDiscoverHandlers?: boolean;
/** Custom service registry (defaults to built-in) */
serviceRegistry?: Record<string, any>;
}
export interface QueueRoute {
/** Full queue name (now just the handler name, e.g., 'ceo') */
fullName: string;
/** Service that owns this queue */
service: string;
/** Handler name */
handler: string;
/** Redis DB number */
db: number;
/** Available operations */
operations?: string[];
}

View file

@ -1,28 +0,0 @@
import type { RedisConfig } from './types';
/**
* Get Redis connection configuration with retry settings
*/
export function getRedisConnection(config: RedisConfig) {
const isTest = process.env.NODE_ENV === 'test' || process.env['BUNIT'] === '1';
return {
host: config.host,
port: config.port,
password: config.password,
db: config.db,
maxRetriesPerRequest: null, // Required by BullMQ
enableReadyCheck: false,
connectTimeout: isTest ? 1000 : 3000,
lazyConnect: false, // Changed from true to ensure connection is established immediately
keepAlive: true, // Changed from false to maintain persistent connections
retryStrategy: (times: number) => {
const maxRetries = isTest ? 1 : 3;
if (times > maxRetries) {
return null; // Stop retrying
}
const delay = isTest ? 100 : Math.min(times * 100, 3000);
return delay;
},
};
}

View file

@ -1,364 +0,0 @@
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
import { handlerRegistry, processItems, Queue, QueueManager } from '../src';
// Suppress Redis connection errors in tests
process.on('unhandledRejection', (reason, promise) => {
if (reason && typeof reason === 'object' && 'message' in reason) {
const message = (reason as Error).message;
if (
message.includes('Connection is closed') ||
message.includes('Connection is in monitoring mode')
) {
return;
}
}
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
});
describe('Batch Processor', () => {
let queueManager: QueueManager;
let queue: Queue;
let queueName: string;
const redisConfig = {
host: 'localhost',
port: 6379,
password: '',
db: 0,
};
beforeEach(async () => {
// Clear handler registry
handlerRegistry.clear();
// Register test handler
handlerRegistry.register('batch-test', {
'process-item': async payload => {
return { processed: true, data: payload };
},
generic: async payload => {
return { processed: true, data: payload };
},
'process-batch-items': async _batchData => {
// This is called by the batch processor internally
return { batchProcessed: true };
},
});
// Use unique queue name per test to avoid conflicts
queueName = `batch-test-queue-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
// Reset and initialize singleton QueueManager for tests
await QueueManager.reset();
queueManager = QueueManager.initialize({
redis: redisConfig,
defaultQueueOptions: {
workers: 0, // No workers in tests
concurrency: 5,
},
});
// Get queue using the new getQueue() method (batch cache is now auto-initialized)
queue = queueManager.getQueue(queueName);
// Note: Batch cache is now automatically initialized when getting the queue
// Ensure completely clean state - wait for queue to be ready first
await queue.getBullQueue().waitUntilReady();
// Clear all job states
await queue.getBullQueue().drain(true);
await queue.getBullQueue().clean(0, 1000, 'completed');
await queue.getBullQueue().clean(0, 1000, 'failed');
await queue.getBullQueue().clean(0, 1000, 'active');
await queue.getBullQueue().clean(0, 1000, 'waiting');
await queue.getBullQueue().clean(0, 1000, 'delayed');
// Add a small delay to ensure cleanup is complete
await new Promise(resolve => setTimeout(resolve, 50));
});
afterEach(async () => {
try {
// Clean up jobs first
if (queue) {
try {
await queue.getBullQueue().drain(true);
await queue.getBullQueue().clean(0, 1000, 'completed');
await queue.getBullQueue().clean(0, 1000, 'failed');
await queue.getBullQueue().clean(0, 1000, 'active');
await queue.getBullQueue().clean(0, 1000, 'waiting');
await queue.getBullQueue().clean(0, 1000, 'delayed');
} catch {
// Ignore cleanup errors
}
await queue.close();
}
if (queueManager) {
await Promise.race([
QueueManager.reset(),
new Promise((_, reject) => setTimeout(() => reject(new Error('Shutdown timeout')), 3000)),
]);
}
} catch (error) {
console.warn('Cleanup error:', error.message);
} finally {
handlerRegistry.clear();
await new Promise(resolve => setTimeout(resolve, 100));
}
});
describe('Direct Processing', () => {
test('should process items directly without batching', async () => {
const items = ['item1', 'item2', 'item3', 'item4', 'item5'];
const result = await processItems(items, queueName, {
totalDelayHours: 0.001, // 3.6 seconds total
useBatching: false,
handler: 'batch-test',
operation: 'process-item',
priority: 1,
});
expect(result.mode).toBe('direct');
expect(result.totalItems).toBe(5);
expect(result.jobsCreated).toBe(5);
// Verify jobs were created - BullMQ has an issue where job ID "1" doesn't show up in state queries
// but exists when queried directly, so we need to check both ways
const [delayedJobs, waitingJobs, activeJobs, completedJobs, failedJobs, job1] =
await Promise.all([
queue.getBullQueue().getJobs(['delayed']),
queue.getBullQueue().getJobs(['waiting']),
queue.getBullQueue().getJobs(['active']),
queue.getBullQueue().getJobs(['completed']),
queue.getBullQueue().getJobs(['failed']),
queue.getBullQueue().getJob('1'), // Job 1 often doesn't show up in state queries
]);
const jobs = [...delayedJobs, ...waitingJobs, ...activeJobs, ...completedJobs, ...failedJobs];
const ourJobs = jobs.filter(
j => j.name === 'process-item' && j.data.handler === 'batch-test'
);
// Include job 1 if we found it directly but it wasn't in the state queries
if (
job1 &&
job1.name === 'process-item' &&
job1.data.handler === 'batch-test' &&
!ourJobs.find(j => j.id === '1')
) {
ourJobs.push(job1);
}
expect(ourJobs.length).toBe(5);
// Check delays are distributed
const delays = ourJobs.map(j => j.opts.delay || 0).sort((a, b) => a - b);
expect(delays[0]).toBe(0);
expect(delays[4]).toBeGreaterThan(delays[0]);
});
test('should process complex objects directly', async () => {
const items = [
{ id: 1, name: 'Product A', price: 100 },
{ id: 2, name: 'Product B', price: 200 },
{ id: 3, name: 'Product C', price: 300 },
];
const result = await processItems(items, queueName, {
totalDelayHours: 0.001,
useBatching: false,
handler: 'batch-test',
operation: 'process-item',
});
expect(result.jobsCreated).toBe(3);
// Check job payloads
const jobs = await queue.getBullQueue().getJobs(['waiting', 'delayed']);
const ourJobs = jobs.filter(
j => j.name === 'process-item' && j.data.handler === 'batch-test'
);
const payloads = ourJobs.map(j => j.data.payload);
expect(payloads).toContainEqual({ id: 1, name: 'Product A', price: 100 });
expect(payloads).toContainEqual({ id: 2, name: 'Product B', price: 200 });
expect(payloads).toContainEqual({ id: 3, name: 'Product C', price: 300 });
});
});
describe('Batch Processing', () => {
test('should process items in batches', async () => {
const items = Array.from({ length: 50 }, (_, i) => ({ id: i, value: `item-${i}` }));
const result = await processItems(items, queueName, {
totalDelayHours: 0.001,
useBatching: true,
batchSize: 10,
handler: 'batch-test',
operation: 'process-item',
});
expect(result.mode).toBe('batch');
expect(result.totalItems).toBe(50);
expect(result.batchesCreated).toBe(5); // 50 items / 10 per batch
expect(result.jobsCreated).toBe(5); // 5 batch jobs
// Verify batch jobs were created
const jobs = await queue.getBullQueue().getJobs(['delayed', 'waiting']);
const batchJobs = jobs.filter(j => j.name === 'process-batch');
expect(batchJobs.length).toBe(5);
});
test('should handle different batch sizes', async () => {
const items = Array.from({ length: 23 }, (_, i) => i);
const result = await processItems(items, queueName, {
totalDelayHours: 0.001,
useBatching: true,
batchSize: 7,
handler: 'batch-test',
operation: 'process-item',
});
expect(result.batchesCreated).toBe(4); // 23/7 = 3.28, rounded up to 4
expect(result.jobsCreated).toBe(4);
});
test('should store batch payloads in cache', async () => {
const items = [
{ type: 'A', data: 'test1' },
{ type: 'B', data: 'test2' },
];
const result = await processItems(items, queueName, {
totalDelayHours: 0.001,
useBatching: true,
batchSize: 2,
handler: 'batch-test',
operation: 'process-item',
ttl: 3600, // 1 hour TTL
});
expect(result.jobsCreated).toBe(1);
// Get the batch job
const jobs = await queue.getBullQueue().getJobs(['waiting', 'delayed']);
expect(jobs.length).toBe(1);
const batchJob = jobs[0];
expect(batchJob.data.payload.payloadKey).toBeDefined();
expect(batchJob.data.payload.itemCount).toBe(2);
});
});
describe('Empty and Edge Cases', () => {
test('should handle empty item list', async () => {
const result = await processItems([], queueName, {
totalDelayHours: 1,
handler: 'batch-test',
operation: 'process-item',
});
expect(result.totalItems).toBe(0);
expect(result.jobsCreated).toBe(0);
expect(result.duration).toBeDefined();
});
test('should handle single item', async () => {
const result = await processItems(['single-item'], queueName, {
totalDelayHours: 0.001,
handler: 'batch-test',
operation: 'process-item',
});
expect(result.totalItems).toBe(1);
expect(result.jobsCreated).toBe(1);
});
test('should handle large batch with delays', async () => {
const items = Array.from({ length: 100 }, (_, i) => ({ index: i }));
const result = await processItems(items, queueName, {
totalDelayHours: 0.01, // 36 seconds total
useBatching: true,
batchSize: 25,
handler: 'batch-test',
operation: 'process-item',
});
expect(result.batchesCreated).toBe(4); // 100/25
expect(result.jobsCreated).toBe(4);
// Check delays are distributed
const jobs = await queue.getBullQueue().getJobs(['delayed', 'waiting']);
const delays = jobs.map(j => j.opts.delay || 0).sort((a, b) => a - b);
expect(delays[0]).toBe(0); // First batch has no delay
expect(delays[3]).toBeGreaterThan(0); // Last batch has delay
});
});
describe('Job Options', () => {
test('should respect custom job options', async () => {
const items = ['a', 'b', 'c'];
await processItems(items, queueName, {
totalDelayHours: 0,
handler: 'batch-test',
operation: 'process-item',
priority: 5,
retries: 10,
removeOnComplete: 100,
removeOnFail: 50,
});
// Check all states including job ID "1" specifically (as it often doesn't show up in state queries)
const [waitingJobs, delayedJobs, job1, job2, job3] = await Promise.all([
queue.getBullQueue().getJobs(['waiting']),
queue.getBullQueue().getJobs(['delayed']),
queue.getBullQueue().getJob('1'),
queue.getBullQueue().getJob('2'),
queue.getBullQueue().getJob('3'),
]);
const jobs = [...waitingJobs, ...delayedJobs];
// Add any missing jobs that exist but don't show up in state queries
[job1, job2, job3].forEach(job => {
if (job && !jobs.find(j => j.id === job.id)) {
jobs.push(job);
}
});
expect(jobs.length).toBe(3);
jobs.forEach(job => {
expect(job.opts.priority).toBe(5);
expect(job.opts.attempts).toBe(10);
expect(job.opts.removeOnComplete).toBe(100);
expect(job.opts.removeOnFail).toBe(50);
});
});
test('should set handler and operation correctly', async () => {
// Register custom handler for this test
handlerRegistry.register('custom-handler', {
'custom-operation': async payload => {
return { processed: true, data: payload };
},
});
await processItems(['test'], queueName, {
totalDelayHours: 0,
handler: 'custom-handler',
operation: 'custom-operation',
});
const jobs = await queue.getBullQueue().getJobs(['waiting']);
expect(jobs.length).toBe(1);
expect(jobs[0].data.handler).toBe('custom-handler');
expect(jobs[0].data.operation).toBe('custom-operation');
});
});
});

View file

@ -1,379 +0,0 @@
import { Queue, Worker } from 'bullmq';
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
import { DeadLetterQueueHandler } from '../src/dlq-handler';
import { getRedisConnection } from '../src/utils';
// Suppress Redis connection errors in tests
process.on('unhandledRejection', (reason, promise) => {
if (reason && typeof reason === 'object' && 'message' in reason) {
const message = (reason as Error).message;
if (
message.includes('Connection is closed') ||
message.includes('Connection is in monitoring mode')
) {
return;
}
}
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
});
describe('DeadLetterQueueHandler', () => {
let mainQueue: Queue;
let dlqHandler: DeadLetterQueueHandler;
let worker: Worker;
let connection: any;
const redisConfig = {
host: 'localhost',
port: 6379,
password: '',
db: 0,
};
beforeEach(async () => {
connection = getRedisConnection(redisConfig);
// Create main queue
mainQueue = new Queue('test-queue', { connection });
// Create DLQ handler
dlqHandler = new DeadLetterQueueHandler(mainQueue, connection, {
maxRetries: 3,
retryDelay: 100,
alertThreshold: 5,
cleanupAge: 24,
});
});
afterEach(async () => {
try {
if (worker) {
await worker.close();
}
await dlqHandler.shutdown();
await mainQueue.close();
} catch {
// Ignore cleanup errors
}
await new Promise(resolve => setTimeout(resolve, 50));
});
describe('Failed Job Handling', () => {
test('should move job to DLQ after max retries', async () => {
let attemptCount = 0;
// Create worker that always fails
worker = new Worker(
'test-queue',
async () => {
attemptCount++;
throw new Error('Job failed');
},
{
connection,
autorun: false,
}
);
// Add job with limited attempts
const _job = await mainQueue.add(
'failing-job',
{ test: true },
{
attempts: 3,
backoff: { type: 'fixed', delay: 50 },
}
);
// Process job manually
await worker.run();
// Wait for retries
await new Promise(resolve => setTimeout(resolve, 300));
// Job should have failed 3 times
expect(attemptCount).toBe(3);
// Check if job was moved to DLQ
const dlqStats = await dlqHandler.getStats();
expect(dlqStats.total).toBe(1);
expect(dlqStats.byJobName['failing-job']).toBe(1);
});
test('should track failure count correctly', async () => {
const job = await mainQueue.add('test-job', { data: 'test' });
const error = new Error('Test error');
// Simulate multiple failures
await dlqHandler.handleFailedJob(job, error);
await dlqHandler.handleFailedJob(job, error);
// On third failure with max attempts reached, should move to DLQ
job.attemptsMade = 3;
job.opts.attempts = 3;
await dlqHandler.handleFailedJob(job, error);
const stats = await dlqHandler.getStats();
expect(stats.total).toBe(1);
});
});
describe('DLQ Statistics', () => {
test('should provide detailed statistics', async () => {
// Add some failed jobs to DLQ
const dlq = new Queue(`test-queue-dlq`, { connection });
await dlq.add('failed-job', {
originalJob: {
id: '1',
name: 'job-type-a',
data: { test: true },
attemptsMade: 3,
},
error: { message: 'Error 1' },
movedToDLQAt: new Date().toISOString(),
});
await dlq.add('failed-job', {
originalJob: {
id: '2',
name: 'job-type-b',
data: { test: true },
attemptsMade: 3,
},
error: { message: 'Error 2' },
movedToDLQAt: new Date().toISOString(),
});
const stats = await dlqHandler.getStats();
expect(stats.total).toBe(2);
expect(stats.recent).toBe(2); // Both are recent
expect(Object.keys(stats.byJobName).length).toBe(2);
expect(stats.oldestJob).toBeDefined();
await dlq.close();
});
test('should count recent jobs correctly', async () => {
const dlq = new Queue(`test-queue-dlq`, { connection });
// Add old job (25 hours ago)
const oldTimestamp = Date.now() - 25 * 60 * 60 * 1000;
await dlq.add(
'failed-job',
{
originalJob: { id: '1', name: 'old-job' },
error: { message: 'Old error' },
movedToDLQAt: new Date(oldTimestamp).toISOString(),
},
{ timestamp: oldTimestamp }
);
// Add recent job
await dlq.add('failed-job', {
originalJob: { id: '2', name: 'recent-job' },
error: { message: 'Recent error' },
movedToDLQAt: new Date().toISOString(),
});
const stats = await dlqHandler.getStats();
expect(stats.total).toBe(2);
expect(stats.recent).toBe(1); // Only one is recent
await dlq.close();
});
});
describe('DLQ Retry', () => {
test('should retry jobs from DLQ', async () => {
const dlq = new Queue(`test-queue-dlq`, { connection });
// Add failed jobs to DLQ
await dlq.add('failed-job', {
originalJob: {
id: '1',
name: 'retry-job',
data: { retry: true },
opts: { priority: 1 },
},
error: { message: 'Failed' },
movedToDLQAt: new Date().toISOString(),
});
await dlq.add('failed-job', {
originalJob: {
id: '2',
name: 'retry-job-2',
data: { retry: true },
opts: {},
},
error: { message: 'Failed' },
movedToDLQAt: new Date().toISOString(),
});
// Retry jobs
const retriedCount = await dlqHandler.retryDLQJobs(10);
expect(retriedCount).toBe(2);
// Check main queue has the retried jobs
const mainQueueJobs = await mainQueue.getWaiting();
expect(mainQueueJobs.length).toBe(2);
expect(mainQueueJobs[0].name).toBe('retry-job');
expect(mainQueueJobs[0].data).toEqual({ retry: true });
// DLQ should be empty
const dlqJobs = await dlq.getCompleted();
expect(dlqJobs.length).toBe(0);
await dlq.close();
});
test('should respect retry limit', async () => {
const dlq = new Queue(`test-queue-dlq`, { connection });
// Add 5 failed jobs
for (let i = 0; i < 5; i++) {
await dlq.add('failed-job', {
originalJob: {
id: `${i}`,
name: `job-${i}`,
data: { index: i },
},
error: { message: 'Failed' },
movedToDLQAt: new Date().toISOString(),
});
}
// Retry only 3 jobs
const retriedCount = await dlqHandler.retryDLQJobs(3);
expect(retriedCount).toBe(3);
// Check counts
const mainQueueJobs = await mainQueue.getWaiting();
expect(mainQueueJobs.length).toBe(3);
const remainingDLQ = await dlq.getCompleted();
expect(remainingDLQ.length).toBe(2);
await dlq.close();
});
});
describe('DLQ Cleanup', () => {
test('should cleanup old DLQ entries', async () => {
const dlq = new Queue(`test-queue-dlq`, { connection });
// Add old job (25 hours ago)
const oldTimestamp = Date.now() - 25 * 60 * 60 * 1000;
await dlq.add(
'failed-job',
{
originalJob: { id: '1', name: 'old-job' },
error: { message: 'Old error' },
},
{ timestamp: oldTimestamp }
);
// Add recent job (1 hour ago)
const recentTimestamp = Date.now() - 1 * 60 * 60 * 1000;
await dlq.add(
'failed-job',
{
originalJob: { id: '2', name: 'recent-job' },
error: { message: 'Recent error' },
},
{ timestamp: recentTimestamp }
);
// Run cleanup (24 hour threshold)
const removedCount = await dlqHandler.cleanup();
expect(removedCount).toBe(1);
// Check remaining jobs
const remaining = await dlq.getCompleted();
expect(remaining.length).toBe(1);
expect(remaining[0].data.originalJob.name).toBe('recent-job');
await dlq.close();
});
});
describe('Failed Job Inspection', () => {
test('should inspect failed jobs', async () => {
const dlq = new Queue(`test-queue-dlq`, { connection });
// Add failed jobs with different error types
await dlq.add('failed-job', {
originalJob: {
id: '1',
name: 'network-job',
data: { url: 'https://api.example.com' },
attemptsMade: 3,
},
error: {
message: 'Network timeout',
stack: 'Error: Network timeout\n at ...',
name: 'NetworkError',
},
movedToDLQAt: '2024-01-01T10:00:00Z',
});
await dlq.add('failed-job', {
originalJob: {
id: '2',
name: 'parse-job',
data: { input: 'invalid-json' },
attemptsMade: 2,
},
error: {
message: 'Invalid JSON',
stack: 'SyntaxError: Invalid JSON\n at ...',
name: 'SyntaxError',
},
movedToDLQAt: '2024-01-01T11:00:00Z',
});
const failedJobs = await dlqHandler.inspectFailedJobs(10);
expect(failedJobs.length).toBe(2);
expect(failedJobs[0]).toMatchObject({
id: '1',
name: 'network-job',
data: { url: 'https://api.example.com' },
error: {
message: 'Network timeout',
name: 'NetworkError',
},
failedAt: '2024-01-01T10:00:00Z',
attempts: 3,
});
await dlq.close();
});
});
describe('Alert Threshold', () => {
test('should detect when alert threshold is exceeded', async () => {
const dlq = new Queue(`test-queue-dlq`, { connection });
// Add jobs to exceed threshold (5)
for (let i = 0; i < 6; i++) {
await dlq.add('failed-job', {
originalJob: {
id: `${i}`,
name: `job-${i}`,
data: { index: i },
},
error: { message: 'Failed' },
movedToDLQAt: new Date().toISOString(),
});
}
const stats = await dlqHandler.getStats();
expect(stats.total).toBe(6);
// In a real implementation, this would trigger alerts
await dlq.close();
});
});
});

View file

@ -1,221 +0,0 @@
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
import { handlerRegistry, QueueManager } from '../src';
// Suppress Redis connection errors in tests
process.on('unhandledRejection', (reason, promise) => {
if (reason && typeof reason === 'object' && 'message' in reason) {
const message = (reason as Error).message;
if (
message.includes('Connection is closed') ||
message.includes('Connection is in monitoring mode')
) {
// Suppress these specific Redis errors in tests
return;
}
}
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
});
describe('QueueManager Integration Tests', () => {
let queueManager: QueueManager;
// Use local Redis/Dragonfly
const redisConfig = {
host: 'localhost',
port: 6379,
password: '',
db: 0,
};
beforeEach(() => {
handlerRegistry.clear();
});
afterEach(async () => {
if (queueManager) {
try {
await Promise.race([
queueManager.shutdown(),
new Promise((_, reject) => setTimeout(() => reject(new Error('Shutdown timeout')), 3000)),
]);
} catch (error) {
// Ignore shutdown errors in tests
console.warn('Shutdown error:', error.message);
} finally {
queueManager = null as any;
}
}
// Clear handler registry to prevent conflicts
handlerRegistry.clear();
// Add delay to allow connections to close
await new Promise(resolve => setTimeout(resolve, 100));
});
test('should initialize queue manager', async () => {
queueManager = new QueueManager({
queueName: 'test-queue',
redis: redisConfig,
workers: 1,
concurrency: 5,
});
await queueManager.initialize();
expect(queueManager.queueName).toBe('test-queue');
});
test('should add and process a job', async () => {
let processedPayload: any;
// Register handler
handlerRegistry.register('test-handler', {
'test-operation': async payload => {
processedPayload = payload;
return { success: true, data: payload };
},
});
queueManager = new QueueManager({
queueName: 'test-queue',
redis: redisConfig,
workers: 1,
});
await queueManager.initialize();
// Add job
const job = await queueManager.add('test-job', {
handler: 'test-handler',
operation: 'test-operation',
payload: { message: 'Hello, Queue!' },
});
expect(job.name).toBe('test-job');
// Wait for processing
await new Promise(resolve => setTimeout(resolve, 100));
expect(processedPayload).toEqual({ message: 'Hello, Queue!' });
});
test('should handle job errors with retries', async () => {
let attemptCount = 0;
handlerRegistry.register('retry-handler', {
'failing-operation': async () => {
attemptCount++;
if (attemptCount < 3) {
throw new Error(`Attempt ${attemptCount} failed`);
}
return { success: true };
},
});
queueManager = new QueueManager({
queueName: 'test-queue-retry',
redis: redisConfig,
workers: 1,
defaultJobOptions: {
attempts: 3,
backoff: {
type: 'fixed',
delay: 50,
},
},
});
await queueManager.initialize();
const job = await queueManager.add('retry-job', {
handler: 'retry-handler',
operation: 'failing-operation',
payload: {},
});
// Wait for retries
await new Promise(resolve => setTimeout(resolve, 500));
const completed = await job.isCompleted();
expect(completed).toBe(true);
expect(attemptCount).toBe(3);
});
test('should collect metrics when enabled', async () => {
queueManager = new QueueManager({
queueName: 'test-queue-metrics',
redis: redisConfig,
workers: 0,
enableMetrics: true,
});
await queueManager.initialize();
// Add some jobs
await queueManager.add('job1', {
handler: 'test',
operation: 'test',
payload: { id: 1 },
});
await queueManager.add('job2', {
handler: 'test',
operation: 'test',
payload: { id: 2 },
});
const metrics = await queueManager.getMetrics();
expect(metrics).toBeDefined();
expect(metrics.waiting).toBeDefined();
expect(metrics.active).toBeDefined();
expect(metrics.completed).toBeDefined();
expect(metrics.failed).toBeDefined();
expect(metrics.processingTime).toBeDefined();
expect(metrics.throughput).toBeDefined();
});
test('should handle rate limiting when enabled', async () => {
let processedCount = 0;
handlerRegistry.register('rate-limited-handler', {
'limited-op': async () => {
processedCount++;
return { processed: true };
},
});
queueManager = new QueueManager({
queueName: 'test-queue-rate',
redis: redisConfig,
workers: 1,
enableRateLimit: true,
rateLimitRules: [
{
level: 'handler',
handler: 'rate-limited-handler',
config: {
points: 2, // 2 requests
duration: 1, // per 1 second
},
},
],
});
await queueManager.initialize();
// Add 3 jobs quickly
for (let i = 0; i < 3; i++) {
await queueManager.add(`job${i}`, {
handler: 'rate-limited-handler',
operation: 'limited-op',
payload: { id: i },
});
}
// Wait for processing
await new Promise(resolve => setTimeout(resolve, 200));
// Only 2 should be processed due to rate limit
expect(processedCount).toBe(2);
});
});

View file

@ -1,371 +0,0 @@
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
import { handlerRegistry, QueueManager } from '../src';
// Suppress Redis connection errors in tests
process.on('unhandledRejection', (reason, promise) => {
if (reason && typeof reason === 'object' && 'message' in reason) {
const message = (reason as Error).message;
if (
message.includes('Connection is closed') ||
message.includes('Connection is in monitoring mode')
) {
return;
}
}
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
});
describe('QueueManager', () => {
let queueManager: QueueManager;
// Use local Redis/Dragonfly
const redisConfig = {
host: 'localhost',
port: 6379,
password: '',
db: 0,
};
beforeEach(() => {
handlerRegistry.clear();
});
afterEach(async () => {
if (queueManager) {
try {
await Promise.race([
queueManager.shutdown(),
new Promise((_, reject) => setTimeout(() => reject(new Error('Shutdown timeout')), 3000)),
]);
} catch (error) {
console.warn('Shutdown error:', error.message);
} finally {
queueManager = null as any;
}
}
handlerRegistry.clear();
await new Promise(resolve => setTimeout(resolve, 100));
});
describe('Basic Operations', () => {
test('should initialize queue manager', async () => {
queueManager = new QueueManager({
redis: redisConfig,
});
// No need to initialize anymore - constructor handles everything
// QueueManager now manages multiple queues, not just one
expect(queueManager).toBeDefined();
});
test('should add and process a job', async () => {
let processedPayload: any;
// Register handler
handlerRegistry.register('test-handler', {
'test-operation': async payload => {
processedPayload = payload;
return { success: true, data: payload };
},
});
queueManager = new QueueManager({
redis: redisConfig,
});
// No need to initialize anymore - constructor handles everything
// Get or create a queue
const queue = queueManager.getQueue('test-queue', {
workers: 1,
});
// Add job
const job = await queue.add('test-job', {
handler: 'test-handler',
operation: 'test-operation',
payload: { message: 'Hello, Queue!' },
});
expect(job.name).toBe('test-job');
// Wait for processing
await new Promise(resolve => setTimeout(resolve, 100));
expect(processedPayload).toEqual({ message: 'Hello, Queue!' });
});
test('should handle missing handler gracefully', async () => {
queueManager = new QueueManager({
queueName: 'test-queue',
redis: redisConfig,
workers: 1,
});
// No need to initialize anymore - constructor handles everything
const job = await queueManager.add('test-job', {
handler: 'non-existent',
operation: 'test-operation',
payload: { test: true },
});
// Wait for job to fail
await new Promise(resolve => setTimeout(resolve, 100));
const failed = await job.isFailed();
expect(failed).toBe(true);
});
test('should add multiple jobs in bulk', async () => {
let processedCount = 0;
handlerRegistry.register('bulk-handler', {
process: async _payload => {
processedCount++;
return { processed: true };
},
});
queueManager = new QueueManager({
queueName: 'test-queue',
redis: redisConfig,
workers: 2,
concurrency: 5,
});
// No need to initialize anymore - constructor handles everything
const jobs = await queueManager.addBulk([
{
name: 'job1',
data: { handler: 'bulk-handler', operation: 'process', payload: { id: 1 } },
},
{
name: 'job2',
data: { handler: 'bulk-handler', operation: 'process', payload: { id: 2 } },
},
{
name: 'job3',
data: { handler: 'bulk-handler', operation: 'process', payload: { id: 3 } },
},
]);
expect(jobs.length).toBe(3);
// Wait for processing
await new Promise(resolve => setTimeout(resolve, 200));
expect(processedCount).toBe(3);
});
test('should get queue statistics', async () => {
queueManager = new QueueManager({
queueName: 'test-queue',
redis: redisConfig,
workers: 0, // No workers, jobs will stay in waiting
});
// No need to initialize anymore - constructor handles everything
// Add some jobs
await queueManager.add('job1', {
handler: 'test',
operation: 'test',
payload: { id: 1 },
});
await queueManager.add('job2', {
handler: 'test',
operation: 'test',
payload: { id: 2 },
});
const stats = await queueManager.getStats();
expect(stats.waiting).toBe(2);
expect(stats.active).toBe(0);
expect(stats.completed).toBe(0);
expect(stats.failed).toBe(0);
});
test('should pause and resume queue', async () => {
let processedCount = 0;
handlerRegistry.register('pause-test', {
process: async () => {
processedCount++;
return { ok: true };
},
});
queueManager = new QueueManager({
queueName: 'test-queue',
redis: redisConfig,
workers: 1,
});
// No need to initialize anymore - constructor handles everything
// Pause queue
await queueManager.pause();
// Add job while paused
await queueManager.add('job1', {
handler: 'pause-test',
operation: 'process',
payload: {},
});
// Wait a bit - job should not be processed
await new Promise(resolve => setTimeout(resolve, 100));
expect(processedCount).toBe(0);
// Resume queue
await queueManager.resume();
// Wait for processing
await new Promise(resolve => setTimeout(resolve, 100));
expect(processedCount).toBe(1);
});
});
describe('Scheduled Jobs', () => {
test('should register and process scheduled jobs', async () => {
let executionCount = 0;
handlerRegistry.registerWithSchedule({
name: 'scheduled-handler',
operations: {
'scheduled-task': async _payload => {
executionCount++;
return { executed: true, timestamp: Date.now() };
},
},
scheduledJobs: [
{
type: 'test-schedule',
operation: 'scheduled-task',
payload: { test: true },
cronPattern: '*/1 * * * * *', // Every second
description: 'Test scheduled job',
},
],
});
queueManager = new QueueManager({
queueName: 'test-queue',
redis: redisConfig,
workers: 1,
enableScheduledJobs: true,
});
// No need to initialize anymore - constructor handles everything
// Wait for scheduled job to execute
await new Promise(resolve => setTimeout(resolve, 2500));
expect(executionCount).toBeGreaterThanOrEqual(2);
});
});
describe('Error Handling', () => {
test('should handle job errors with retries', async () => {
let attemptCount = 0;
handlerRegistry.register('retry-handler', {
'failing-operation': async () => {
attemptCount++;
if (attemptCount < 3) {
throw new Error(`Attempt ${attemptCount} failed`);
}
return { success: true };
},
});
queueManager = new QueueManager({
queueName: 'test-queue',
redis: redisConfig,
workers: 1,
defaultJobOptions: {
attempts: 3,
backoff: {
type: 'fixed',
delay: 50,
},
},
});
// No need to initialize anymore - constructor handles everything
const job = await queueManager.add('retry-job', {
handler: 'retry-handler',
operation: 'failing-operation',
payload: {},
});
// Wait for retries
await new Promise(resolve => setTimeout(resolve, 500));
const completed = await job.isCompleted();
expect(completed).toBe(true);
expect(attemptCount).toBe(3);
});
});
describe('Multiple Handlers', () => {
test('should handle multiple handlers with different operations', async () => {
const results: any[] = [];
handlerRegistry.register('handler-a', {
'operation-1': async payload => {
results.push({ handler: 'a', op: '1', payload });
return { handler: 'a', op: '1' };
},
'operation-2': async payload => {
results.push({ handler: 'a', op: '2', payload });
return { handler: 'a', op: '2' };
},
});
handlerRegistry.register('handler-b', {
'operation-1': async payload => {
results.push({ handler: 'b', op: '1', payload });
return { handler: 'b', op: '1' };
},
});
queueManager = new QueueManager({
queueName: 'test-queue',
redis: redisConfig,
workers: 2,
});
// No need to initialize anymore - constructor handles everything
// Add jobs for different handlers
await queueManager.addBulk([
{
name: 'job1',
data: { handler: 'handler-a', operation: 'operation-1', payload: { id: 1 } },
},
{
name: 'job2',
data: { handler: 'handler-a', operation: 'operation-2', payload: { id: 2 } },
},
{
name: 'job3',
data: { handler: 'handler-b', operation: 'operation-1', payload: { id: 3 } },
},
]);
// Wait for processing
await new Promise(resolve => setTimeout(resolve, 200));
expect(results.length).toBe(3);
expect(results).toContainEqual({ handler: 'a', op: '1', payload: { id: 1 } });
expect(results).toContainEqual({ handler: 'a', op: '2', payload: { id: 2 } });
expect(results).toContainEqual({ handler: 'b', op: '1', payload: { id: 3 } });
});
});
});

View file

@ -1,327 +0,0 @@
import { Queue, QueueEvents, Worker } from 'bullmq';
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
import { QueueMetricsCollector } from '../src/queue-metrics';
import { getRedisConnection } from '../src/utils';
// Suppress Redis connection errors in tests
process.on('unhandledRejection', (reason, promise) => {
if (reason && typeof reason === 'object' && 'message' in reason) {
const message = (reason as Error).message;
if (
message.includes('Connection is closed') ||
message.includes('Connection is in monitoring mode')
) {
return;
}
}
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
});
describe('QueueMetricsCollector', () => {
let queue: Queue;
let queueEvents: QueueEvents;
let metricsCollector: QueueMetricsCollector;
let worker: Worker;
let connection: any;
const redisConfig = {
host: 'localhost',
port: 6379,
password: '',
db: 0,
};
beforeEach(async () => {
connection = getRedisConnection(redisConfig);
// Create queue and events
queue = new Queue('metrics-test-queue', { connection });
queueEvents = new QueueEvents('metrics-test-queue', { connection });
// Create metrics collector
metricsCollector = new QueueMetricsCollector(queue, queueEvents);
// Wait for connections
await queue.waitUntilReady();
await queueEvents.waitUntilReady();
});
afterEach(async () => {
try {
if (worker) {
await worker.close();
}
await queueEvents.close();
await queue.close();
} catch {
// Ignore cleanup errors
}
await new Promise(resolve => setTimeout(resolve, 50));
});
describe('Job Count Metrics', () => {
test('should collect basic job counts', async () => {
// Add jobs in different states
await queue.add('waiting-job', { test: true });
await queue.add('delayed-job', { test: true }, { delay: 60000 });
const metrics = await metricsCollector.collect();
expect(metrics.waiting).toBe(1);
expect(metrics.delayed).toBe(1);
expect(metrics.active).toBe(0);
expect(metrics.completed).toBe(0);
expect(metrics.failed).toBe(0);
});
test('should track completed and failed jobs', async () => {
let jobCount = 0;
// Create worker that alternates between success and failure
worker = new Worker(
'metrics-test-queue',
async () => {
jobCount++;
if (jobCount % 2 === 0) {
throw new Error('Test failure');
}
return { success: true };
},
{ connection }
);
// Add jobs
await queue.add('job1', { test: 1 });
await queue.add('job2', { test: 2 });
await queue.add('job3', { test: 3 });
await queue.add('job4', { test: 4 });
// Wait for processing
await new Promise(resolve => setTimeout(resolve, 200));
const metrics = await metricsCollector.collect();
expect(metrics.completed).toBe(2);
expect(metrics.failed).toBe(2);
});
});
describe('Processing Time Metrics', () => {
test('should track processing times', async () => {
const processingTimes = [50, 100, 150, 200, 250];
let jobIndex = 0;
// Create worker with variable processing times
worker = new Worker(
'metrics-test-queue',
async () => {
const delay = processingTimes[jobIndex++] || 100;
await new Promise(resolve => setTimeout(resolve, delay));
return { processed: true };
},
{ connection }
);
// Add jobs
for (let i = 0; i < processingTimes.length; i++) {
await queue.add(`job${i}`, { index: i });
}
// Wait for processing
await new Promise(resolve => setTimeout(resolve, 1500));
const metrics = await metricsCollector.collect();
expect(metrics.processingTime.avg).toBeGreaterThan(0);
expect(metrics.processingTime.min).toBeGreaterThanOrEqual(50);
expect(metrics.processingTime.max).toBeLessThanOrEqual(300);
expect(metrics.processingTime.p95).toBeGreaterThan(metrics.processingTime.avg);
});
test('should handle empty processing times', async () => {
const metrics = await metricsCollector.collect();
expect(metrics.processingTime).toEqual({
avg: 0,
min: 0,
max: 0,
p95: 0,
p99: 0,
});
});
});
describe('Throughput Metrics', () => {
test('should calculate throughput correctly', async () => {
// Create fast worker
worker = new Worker(
'metrics-test-queue',
async () => {
return { success: true };
},
{ connection, concurrency: 5 }
);
// Add multiple jobs
const jobPromises = [];
for (let i = 0; i < 10; i++) {
jobPromises.push(queue.add(`job${i}`, { index: i }));
}
await Promise.all(jobPromises);
// Wait for processing
await new Promise(resolve => setTimeout(resolve, 500));
const metrics = await metricsCollector.collect();
expect(metrics.throughput.completedPerMinute).toBeGreaterThan(0);
expect(metrics.throughput.totalPerMinute).toBe(
metrics.throughput.completedPerMinute + metrics.throughput.failedPerMinute
);
});
});
describe('Queue Health', () => {
test('should report healthy queue', async () => {
const metrics = await metricsCollector.collect();
expect(metrics.isHealthy).toBe(true);
expect(metrics.healthIssues).toEqual([]);
});
test('should detect high failure rate', async () => {
// Create worker that always fails
worker = new Worker(
'metrics-test-queue',
async () => {
throw new Error('Always fails');
},
{ connection }
);
// Add jobs
for (let i = 0; i < 10; i++) {
await queue.add(`job${i}`, { index: i });
}
// Wait for failures
await new Promise(resolve => setTimeout(resolve, 500));
const metrics = await metricsCollector.collect();
expect(metrics.isHealthy).toBe(false);
expect(metrics.healthIssues).toContain(expect.stringMatching(/High failure rate/));
});
test('should detect large queue backlog', async () => {
// Add many jobs without workers
for (let i = 0; i < 1001; i++) {
await queue.add(`job${i}`, { index: i });
}
const metrics = await metricsCollector.collect();
expect(metrics.isHealthy).toBe(false);
expect(metrics.healthIssues).toContain(expect.stringMatching(/Large queue backlog/));
});
});
describe('Oldest Waiting Job', () => {
test('should track oldest waiting job', async () => {
const beforeAdd = Date.now();
// Add jobs with delays
await queue.add('old-job', { test: true });
await new Promise(resolve => setTimeout(resolve, 100));
await queue.add('new-job', { test: true });
const metrics = await metricsCollector.collect();
expect(metrics.oldestWaitingJob).toBeDefined();
expect(metrics.oldestWaitingJob!.getTime()).toBeGreaterThanOrEqual(beforeAdd);
});
test('should return null when no waiting jobs', async () => {
// Create worker that processes immediately
worker = new Worker(
'metrics-test-queue',
async () => {
return { success: true };
},
{ connection }
);
const metrics = await metricsCollector.collect();
expect(metrics.oldestWaitingJob).toBe(null);
});
});
describe('Metrics Report', () => {
test('should generate formatted report', async () => {
// Add some jobs
await queue.add('job1', { test: true });
await queue.add('job2', { test: true }, { delay: 5000 });
const report = await metricsCollector.getReport();
expect(report).toContain('Queue Metrics Report');
expect(report).toContain('Status:');
expect(report).toContain('Job Counts:');
expect(report).toContain('Performance:');
expect(report).toContain('Throughput:');
expect(report).toContain('Waiting: 1');
expect(report).toContain('Delayed: 1');
});
test('should include health issues in report', async () => {
// Add many jobs to trigger health issue
for (let i = 0; i < 1001; i++) {
await queue.add(`job${i}`, { index: i });
}
const report = await metricsCollector.getReport();
expect(report).toContain('Issues Detected');
expect(report).toContain('Health Issues:');
expect(report).toContain('Large queue backlog');
});
});
describe('Prometheus Metrics', () => {
test('should export metrics in Prometheus format', async () => {
// Add some jobs and process them
worker = new Worker(
'metrics-test-queue',
async () => {
await new Promise(resolve => setTimeout(resolve, 50));
return { success: true };
},
{ connection }
);
await queue.add('job1', { test: true });
await queue.add('job2', { test: true });
// Wait for processing
await new Promise(resolve => setTimeout(resolve, 200));
const prometheusMetrics = await metricsCollector.getPrometheusMetrics();
// Check format
expect(prometheusMetrics).toContain('# HELP queue_jobs_total');
expect(prometheusMetrics).toContain('# TYPE queue_jobs_total gauge');
expect(prometheusMetrics).toContain(
'queue_jobs_total{queue="metrics-test-queue",status="completed"}'
);
expect(prometheusMetrics).toContain('# HELP queue_processing_time_seconds');
expect(prometheusMetrics).toContain('# TYPE queue_processing_time_seconds summary');
expect(prometheusMetrics).toContain('# HELP queue_throughput_per_minute');
expect(prometheusMetrics).toContain('# TYPE queue_throughput_per_minute gauge');
expect(prometheusMetrics).toContain('# HELP queue_health');
expect(prometheusMetrics).toContain('# TYPE queue_health gauge');
});
});
});

View file

@ -1,81 +0,0 @@
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
import { handlerRegistry, QueueManager } from '../src';
describe('QueueManager Simple Tests', () => {
let queueManager: QueueManager;
// Assumes Redis is running locally on default port
const redisConfig = {
host: 'localhost',
port: 6379,
};
beforeEach(() => {
handlerRegistry.clear();
});
afterEach(async () => {
if (queueManager) {
try {
await queueManager.shutdown();
} catch {
// Ignore errors during cleanup
}
}
});
test('should create queue manager instance', () => {
queueManager = new QueueManager({
queueName: 'test-queue',
redis: redisConfig,
});
expect(queueManager.queueName).toBe('test-queue');
});
test('should handle missing Redis gracefully', async () => {
// Use a port that's likely not running Redis
queueManager = new QueueManager({
queueName: 'test-queue',
redis: {
host: 'localhost',
port: 9999,
},
});
await expect(queueManager.initialize()).rejects.toThrow();
});
test('handler registry should work', () => {
const testHandler = async (payload: any) => {
return { success: true, payload };
};
handlerRegistry.register('test-handler', {
'test-op': testHandler,
});
const handler = handlerRegistry.getHandler('test-handler', 'test-op');
expect(handler).toBe(testHandler);
});
test('handler registry should return null for missing handler', () => {
const handler = handlerRegistry.getHandler('missing', 'op');
expect(handler).toBe(null);
});
test('should get handler statistics', () => {
handlerRegistry.register('handler1', {
op1: async () => ({}),
op2: async () => ({}),
});
handlerRegistry.register('handler2', {
op1: async () => ({}),
});
const stats = handlerRegistry.getStats();
expect(stats.handlers).toBe(2);
expect(stats.totalOperations).toBe(3);
});
});

View file

@ -1,311 +0,0 @@
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
import Redis from 'ioredis';
import { QueueRateLimiter } from '../src/rate-limiter';
import { getRedisConnection } from '../src/utils';
// Suppress Redis connection errors in tests
process.on('unhandledRejection', (reason, promise) => {
if (reason && typeof reason === 'object' && 'message' in reason) {
const message = (reason as Error).message;
if (
message.includes('Connection is closed') ||
message.includes('Connection is in monitoring mode')
) {
return;
}
}
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
});
describe('QueueRateLimiter', () => {
let redisClient: Redis;
let rateLimiter: QueueRateLimiter;
const redisConfig = {
host: 'localhost',
port: 6379,
password: '',
db: 0,
};
beforeEach(async () => {
// Create Redis client
redisClient = new Redis(getRedisConnection(redisConfig));
// Clear Redis keys for tests
try {
const keys = await redisClient.keys('rl:*');
if (keys.length > 0) {
await redisClient.del(...keys);
}
} catch {
// Ignore cleanup errors
}
rateLimiter = new QueueRateLimiter(redisClient);
});
afterEach(async () => {
if (redisClient) {
try {
await redisClient.quit();
} catch {
// Ignore cleanup errors
}
}
await new Promise(resolve => setTimeout(resolve, 50));
});
describe('Rate Limit Rules', () => {
test('should add and enforce global rate limit', async () => {
rateLimiter.addRule({
level: 'global',
config: {
points: 5,
duration: 1, // 1 second
},
});
// Consume 5 points
for (let i = 0; i < 5; i++) {
const result = await rateLimiter.checkLimit('any-handler', 'any-operation');
expect(result.allowed).toBe(true);
}
// 6th request should be blocked
const blocked = await rateLimiter.checkLimit('any-handler', 'any-operation');
expect(blocked.allowed).toBe(false);
expect(blocked.retryAfter).toBeGreaterThan(0);
});
test('should add and enforce handler-level rate limit', async () => {
rateLimiter.addRule({
level: 'handler',
handler: 'api-handler',
config: {
points: 3,
duration: 1,
},
});
// api-handler should be limited
for (let i = 0; i < 3; i++) {
const result = await rateLimiter.checkLimit('api-handler', 'any-operation');
expect(result.allowed).toBe(true);
}
const blocked = await rateLimiter.checkLimit('api-handler', 'any-operation');
expect(blocked.allowed).toBe(false);
// Other handlers should not be limited
const otherHandler = await rateLimiter.checkLimit('other-handler', 'any-operation');
expect(otherHandler.allowed).toBe(true);
});
test('should add and enforce operation-level rate limit', async () => {
rateLimiter.addRule({
level: 'operation',
handler: 'data-handler',
operation: 'fetch-prices',
config: {
points: 2,
duration: 1,
},
});
// Specific operation should be limited
for (let i = 0; i < 2; i++) {
const result = await rateLimiter.checkLimit('data-handler', 'fetch-prices');
expect(result.allowed).toBe(true);
}
const blocked = await rateLimiter.checkLimit('data-handler', 'fetch-prices');
expect(blocked.allowed).toBe(false);
// Other operations on same handler should work
const otherOp = await rateLimiter.checkLimit('data-handler', 'fetch-volume');
expect(otherOp.allowed).toBe(true);
});
test('should enforce multiple rate limits (most restrictive wins)', async () => {
// Global: 10/sec
rateLimiter.addRule({
level: 'global',
config: { points: 10, duration: 1 },
});
// Handler: 5/sec
rateLimiter.addRule({
level: 'handler',
handler: 'test-handler',
config: { points: 5, duration: 1 },
});
// Operation: 2/sec
rateLimiter.addRule({
level: 'operation',
handler: 'test-handler',
operation: 'test-op',
config: { points: 2, duration: 1 },
});
// Should be limited by operation level (most restrictive)
for (let i = 0; i < 2; i++) {
const result = await rateLimiter.checkLimit('test-handler', 'test-op');
expect(result.allowed).toBe(true);
}
const blocked = await rateLimiter.checkLimit('test-handler', 'test-op');
expect(blocked.allowed).toBe(false);
});
});
describe('Rate Limit Status', () => {
test('should get rate limit status', async () => {
rateLimiter.addRule({
level: 'handler',
handler: 'status-test',
config: { points: 10, duration: 60 },
});
// Consume some points
await rateLimiter.checkLimit('status-test', 'operation');
await rateLimiter.checkLimit('status-test', 'operation');
const status = await rateLimiter.getStatus('status-test', 'operation');
expect(status.handler).toBe('status-test');
expect(status.operation).toBe('operation');
expect(status.limits.length).toBe(1);
expect(status.limits[0].points).toBe(10);
expect(status.limits[0].remaining).toBe(8);
});
test('should show multiple applicable limits in status', async () => {
rateLimiter.addRule({
level: 'global',
config: { points: 100, duration: 60 },
});
rateLimiter.addRule({
level: 'handler',
handler: 'multi-test',
config: { points: 50, duration: 60 },
});
const status = await rateLimiter.getStatus('multi-test', 'operation');
expect(status.limits.length).toBe(2);
const globalLimit = status.limits.find(l => l.level === 'global');
const handlerLimit = status.limits.find(l => l.level === 'handler');
expect(globalLimit?.points).toBe(100);
expect(handlerLimit?.points).toBe(50);
});
});
describe('Rate Limit Management', () => {
test('should reset rate limits', async () => {
rateLimiter.addRule({
level: 'handler',
handler: 'reset-test',
config: { points: 1, duration: 60 },
});
// Consume the limit
await rateLimiter.checkLimit('reset-test', 'operation');
const blocked = await rateLimiter.checkLimit('reset-test', 'operation');
expect(blocked.allowed).toBe(false);
// Reset limits
await rateLimiter.reset('reset-test');
// Should be allowed again
const afterReset = await rateLimiter.checkLimit('reset-test', 'operation');
expect(afterReset.allowed).toBe(true);
});
test('should get all rules', async () => {
rateLimiter.addRule({
level: 'global',
config: { points: 100, duration: 60 },
});
rateLimiter.addRule({
level: 'handler',
handler: 'test',
config: { points: 50, duration: 60 },
});
const rules = rateLimiter.getRules();
expect(rules.length).toBe(2);
expect(rules[0].level).toBe('global');
expect(rules[1].level).toBe('handler');
});
test('should remove specific rule', async () => {
rateLimiter.addRule({
level: 'handler',
handler: 'remove-test',
config: { points: 1, duration: 1 },
});
// Verify rule exists
await rateLimiter.checkLimit('remove-test', 'op');
const blocked = await rateLimiter.checkLimit('remove-test', 'op');
expect(blocked.allowed).toBe(false);
// Remove rule
const removed = rateLimiter.removeRule('handler', 'remove-test');
expect(removed).toBe(true);
// Should not be limited anymore
const afterRemove = await rateLimiter.checkLimit('remove-test', 'op');
expect(afterRemove.allowed).toBe(true);
});
});
describe('Block Duration', () => {
test('should block for specified duration after limit exceeded', async () => {
rateLimiter.addRule({
level: 'handler',
handler: 'block-test',
config: {
points: 1,
duration: 1,
blockDuration: 2, // Block for 2 seconds
},
});
// Consume limit
await rateLimiter.checkLimit('block-test', 'op');
// Should be blocked
const blocked = await rateLimiter.checkLimit('block-test', 'op');
expect(blocked.allowed).toBe(false);
expect(blocked.retryAfter).toBeGreaterThanOrEqual(1000); // At least 1 second
});
});
describe('Error Handling', () => {
test('should allow requests when rate limiter fails', async () => {
// Create a rate limiter with invalid redis client
const badRedis = new Redis({
host: 'invalid-host',
port: 9999,
retryStrategy: () => null, // Disable retries
});
const failingLimiter = new QueueRateLimiter(badRedis);
failingLimiter.addRule({
level: 'global',
config: { points: 1, duration: 1 },
});
// Should allow even though Redis is not available
const result = await failingLimiter.checkLimit('test', 'test');
expect(result.allowed).toBe(true);
badRedis.disconnect();
});
});
});

View file

@ -1,15 +0,0 @@
{
"extends": "../../../tsconfig.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src",
"composite": true
},
"include": ["src/**/*"],
"references": [
{ "path": "../../data/cache" },
{ "path": "../../core/handlers" },
{ "path": "../../core/logger" },
{ "path": "../../core/types" }
]
}

View file

@ -1,19 +0,0 @@
{
"extends": ["//"],
"tasks": {
"build": {
"dependsOn": ["@stock-bot/cache#build", "@stock-bot/logger#build", "@stock-bot/types#build"],
"outputs": ["dist/**"],
"inputs": [
"src/**",
"package.json",
"tsconfig.json",
"!**/*.test.ts",
"!**/*.spec.ts",
"!**/test/**",
"!**/tests/**",
"!**/__tests__/**"
]
}
}
}

View file

@ -1,202 +0,0 @@
# @stock-bot/shutdown
Shutdown management library for Node.js applications in the Stock Bot platform.
## Features
- ✅ **Automatic Signal Handling** - SIGTERM, SIGINT, SIGUSR2 (Unix), uncaught exceptions
- ✅ **Platform Support** - Windows and Unix/Linux compatible
- ✅ **Multiple Callbacks** - Register multiple cleanup functions
- ✅ **Timeout Protection** - Configurable shutdown timeout
- ✅ **Error Handling** - Failed callbacks don't block shutdown
- ✅ **TypeScript Support** - Full type definitions
- ✅ **Zero Dependencies** - Lightweight and efficient
## Installation
```bash
bun add @stock-bot/shutdown
```
## Quick Start
```typescript
import { onShutdown, setShutdownTimeout } from '@stock-bot/shutdown';
// Configure shutdown timeout (optional, default: 30 seconds)
setShutdownTimeout(15000); // 15 seconds
// Register cleanup callbacks
onShutdown(async () => {
console.log('Closing database connections...');
await database.close();
});
onShutdown(async () => {
console.log('Stopping background jobs...');
await jobQueue.stop();
});
onShutdown(() => {
console.log('Final cleanup...');
// Synchronous cleanup
});
console.log('Application started. Press Ctrl+C to test graceful shutdown.');
```
## API Reference
### Convenience Functions
#### `onShutdown(callback)`
Register a cleanup callback.
```typescript
onShutdown(async () => {
await cleanup();
});
```
#### `setShutdownTimeout(timeout)`
Set shutdown timeout in milliseconds.
```typescript
setShutdownTimeout(30000); // 30 seconds
```
#### `initiateShutdown(signal?)`
Manually trigger shutdown.
```typescript
const result = await initiateShutdown('manual');
console.log(result.success); // true/false
```
#### `shutdownAndExit(signal?, exitCode?)`
Trigger shutdown and exit process.
```typescript
await shutdownAndExit('manual', 0);
```
### Advanced Usage
#### Manual Instance Management
```typescript
import { Shutdown } from '@stock-bot/shutdown';
const shutdown = new Shutdown({
timeout: 20000,
autoRegister: true
});
shutdown.onShutdown(async () => {
await cleanup();
});
// Manual shutdown
const result = await shutdown.shutdown('manual');
```
#### Configuration Options
```typescript
interface ShutdownOptions {
timeout?: number; // Timeout in ms (default: 30000)
autoRegister?: boolean; // Auto-register signals (default: true)
}
```
#### Shutdown Result
```typescript
interface ShutdownResult {
success: boolean;
callbacksExecuted: number;
callbacksFailed: number;
duration: number;
error?: string;
}
```
## Examples
### Express Server
```typescript
import express from 'express';
import { onShutdown, setShutdownTimeout } from '@stock-bot/shutdown';
const app = express();
const server = app.listen(3000);
setShutdownTimeout(10000);
onShutdown(async () => {
console.log('Closing HTTP server...');
await new Promise(resolve => server.close(resolve));
});
onShutdown(async () => {
console.log('Closing database...');
await database.close();
});
```
### Worker Process
```typescript
import { onShutdown } from '@stock-bot/shutdown';
let isRunning = true;
onShutdown(() => {
console.log('Stopping worker...');
isRunning = false;
});
// Worker loop
while (isRunning) {
await processJob();
await new Promise(resolve => setTimeout(resolve, 1000));
}
```
## Signal Handling
The library automatically handles these signals:
- **SIGTERM** - Termination request
- **SIGINT** - Interrupt (Ctrl+C)
- **SIGUSR2** - User-defined signal (Unix only)
- **uncaughtException** - Unhandled exceptions
- **unhandledRejection** - Unhandled promise rejections
On Windows, only SIGTERM and SIGINT are supported due to platform limitations.
## Best Practices
1. **Register callbacks early** in your application startup
2. **Keep callbacks simple** and focused on cleanup
3. **Use appropriate timeouts** based on your cleanup needs
4. **Handle errors gracefully** in callbacks
5. **Test shutdown behavior** in your CI/CD pipeline
## Testing
```typescript
import { resetShutdown, onShutdown } from '@stock-bot/shutdown';
beforeEach(() => {
resetShutdown(); // Clear previous state
});
test('should register shutdown callback', () => {
let cleaned = false;
onShutdown(() => { cleaned = true; });
// Test shutdown behavior
});
```

View file

@ -1,27 +0,0 @@
{
"name": "@stock-bot/shutdown",
"version": "1.0.0",
"description": "Graceful shutdown management for Stock Bot platform",
"type": "module",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"scripts": {
"build": "tsc",
"clean": "rm -rf dist",
"test": "bun test"
},
"dependencies": {},
"devDependencies": {
"typescript": "^5.0.0",
"@types/node": "^20.0.0"
},
"exports": {
".": {
"import": "./dist/index.js",
"types": "./dist/index.d.ts"
}
},
"files": [
"dist"
]
}

View file

@ -1,118 +0,0 @@
import { Shutdown } from './shutdown';
import type { ShutdownResult } from './types';
/**
* @stock-bot/shutdown - Shutdown management library
*
* Main exports for the shutdown library
*/
// Core shutdown classes and types
export { Shutdown } from './shutdown';
export type {
ShutdownCallback,
ShutdownOptions,
ShutdownResult,
PrioritizedShutdownCallback,
} from './types';
// Global singleton instance
let globalInstance: Shutdown | null = null;
/**
* Get the global shutdown instance (creates one if it doesn't exist)
*/
function getGlobalInstance(): Shutdown {
if (!globalInstance) {
globalInstance = Shutdown.getInstance();
}
return globalInstance;
}
/**
* Convenience functions for global shutdown management
*/
/**
* Register a cleanup callback that will be executed during shutdown
*/
export function onShutdown(
callback: () => Promise<void> | void,
priority?: number,
name?: string
): void {
getGlobalInstance().onShutdown(callback, priority, name);
}
/**
* Register a high priority shutdown callback (for queues, critical services)
*/
export function onShutdownHigh(callback: () => Promise<void> | void, name?: string): void {
getGlobalInstance().onShutdownHigh(callback, name);
}
/**
* Register a medium priority shutdown callback (for databases, connections)
*/
export function onShutdownMedium(callback: () => Promise<void> | void, name?: string): void {
getGlobalInstance().onShutdownMedium(callback, name);
}
/**
* Register a low priority shutdown callback (for loggers, cleanup)
*/
export function onShutdownLow(callback: () => Promise<void> | void, name?: string): void {
getGlobalInstance().onShutdownLow(callback, name);
}
/**
* Set the shutdown timeout in milliseconds
*/
export function setShutdownTimeout(timeout: number): void {
getGlobalInstance().setTimeout(timeout);
}
/**
* Check if shutdown is currently in progress
*/
export function isShuttingDown(): boolean {
return globalInstance?.isShutdownInProgress() || false;
}
/**
* Check if shutdown signal was received (for quick checks in running jobs)
*/
export function isShutdownSignalReceived(): boolean {
const globalFlag = globalThis.__SHUTDOWN_SIGNAL_RECEIVED__ || false;
const instanceFlag = globalInstance?.isShutdownSignalReceived() || false;
return globalFlag || instanceFlag;
}
/**
* Get the number of registered shutdown callbacks
*/
export function getShutdownCallbackCount(): number {
return globalInstance?.getCallbackCount() || 0;
}
/**
* Manually initiate graceful shutdown
*/
export function initiateShutdown(signal?: string): Promise<ShutdownResult> {
return getGlobalInstance().shutdown(signal);
}
/**
* Manually initiate graceful shutdown and exit the process
*/
export function shutdownAndExit(signal?: string, exitCode = 0): Promise<never> {
return getGlobalInstance().shutdownAndExit(signal, exitCode);
}
/**
* Reset the global instance (mainly for testing)
*/
export function resetShutdown(): void {
globalInstance = null;
Shutdown.reset();
}

View file

@ -1,258 +0,0 @@
/**
* Shutdown management for Node.js applications
*
* Features:
* - Automatic signal handling (SIGTERM, SIGINT, etc.)
* - Configurable shutdown timeout
* - Multiple cleanup callbacks with error handling
* - Platform-specific signal support (Windows/Unix)
*/
import type {
PrioritizedShutdownCallback,
ShutdownCallback,
ShutdownOptions,
ShutdownResult,
} from './types';
import { getLogger } from '@stock-bot/logger';
// Global flag that works across all processes/workers
declare global {
var __SHUTDOWN_SIGNAL_RECEIVED__: boolean | undefined;
}
export class Shutdown {
private static instance: Shutdown | null = null;
private readonly logger = getLogger('shutdown');
private isShuttingDown = false;
private signalReceived = false; // Track if shutdown signal was received
private shutdownTimeout = 30000; // 30 seconds default
private callbacks: PrioritizedShutdownCallback[] = [];
private signalHandlersRegistered = false;
constructor(options: ShutdownOptions = {}) {
this.shutdownTimeout = options.timeout || 30000;
if (options.autoRegister !== false) {
this.setupSignalHandlers();
}
}
/**
* Get or create singleton instance
*/
static getInstance(options?: ShutdownOptions): Shutdown {
if (!Shutdown.instance) {
Shutdown.instance = new Shutdown(options);
}
return Shutdown.instance;
}
/**
* Reset singleton instance (mainly for testing)
*/
static reset(): void {
Shutdown.instance = null;
}
/**
* Register a cleanup callback with priority (lower numbers = higher priority)
*/
onShutdown(callback: ShutdownCallback, priority: number = 50, name?: string): void {
if (this.isShuttingDown) {
return;
}
this.callbacks.push({ callback, priority, name });
}
/**
* Register a high priority shutdown callback (for queues, critical services)
*/
onShutdownHigh(callback: ShutdownCallback, name?: string): void {
this.onShutdown(callback, 10, name);
}
/**
* Register a medium priority shutdown callback (for databases, connections)
*/
onShutdownMedium(callback: ShutdownCallback, name?: string): void {
this.onShutdown(callback, 50, name);
}
/**
* Register a low priority shutdown callback (for loggers, cleanup)
*/
onShutdownLow(callback: ShutdownCallback, name?: string): void {
this.onShutdown(callback, 90, name);
}
/**
* Set shutdown timeout in milliseconds
*/
setTimeout(timeout: number): void {
if (timeout <= 0) {
throw new Error('Shutdown timeout must be positive');
}
this.shutdownTimeout = timeout;
}
/**
* Get current shutdown state
*/
isShutdownInProgress(): boolean {
return this.isShuttingDown;
}
/**
* Check if shutdown signal was received (for quick checks in running jobs)
*/
isShutdownSignalReceived(): boolean {
return this.signalReceived || this.isShuttingDown;
}
/**
* Get number of registered callbacks
*/
getCallbackCount(): number {
return this.callbacks.length;
}
/**
* Initiate graceful shutdown
*/
async shutdown(_signal?: string): Promise<ShutdownResult> {
if (this.isShuttingDown) {
return {
success: false,
callbacksExecuted: 0,
callbacksFailed: 0,
duration: 0,
error: 'Shutdown already in progress',
};
}
this.isShuttingDown = true;
const startTime = Date.now();
const shutdownPromise = this.executeCallbacks();
const timeoutPromise = new Promise<never>((_, reject) => {
setTimeout(() => reject(new Error('Shutdown timeout')), this.shutdownTimeout);
});
let result: ShutdownResult;
try {
const callbackResult = await Promise.race([shutdownPromise, timeoutPromise]);
const duration = Date.now() - startTime;
result = {
success: true,
callbacksExecuted: callbackResult.executed,
callbacksFailed: callbackResult.failed,
duration,
error: callbackResult.failed > 0 ? `${callbackResult.failed} callbacks failed` : undefined,
};
} catch (error) {
const duration = Date.now() - startTime;
const errorMessage = error instanceof Error ? error.message : String(error);
result = {
success: false,
callbacksExecuted: 0,
callbacksFailed: 0,
duration,
error: errorMessage,
};
}
// Don't call process.exit here - let the caller decide
return result;
}
/**
* Initiate shutdown and exit process
*/
async shutdownAndExit(signal?: string, exitCode = 0): Promise<never> {
const result = await this.shutdown(signal);
const finalExitCode = result.success ? exitCode : 1;
process.exit(finalExitCode);
}
/**
* Execute all registered callbacks in priority order
*/
private async executeCallbacks(): Promise<{ executed: number; failed: number }> {
if (this.callbacks.length === 0) {
return { executed: 0, failed: 0 };
}
// Sort callbacks by priority (lower numbers = higher priority = execute first)
const sortedCallbacks = [...this.callbacks].sort((a, b) => a.priority - b.priority);
let executed = 0;
let failed = 0;
// Execute callbacks in order by priority
for (const { callback, name, priority } of sortedCallbacks) {
try {
await callback();
executed++;
} catch (error) {
failed++;
if (name) {
this.logger.error(`Shutdown failed: ${name} (priority: ${priority})`, error);
}
}
}
return { executed, failed };
}
/**
* Setup signal handlers for graceful shutdown
*/
private setupSignalHandlers(): void {
if (this.signalHandlersRegistered) {
return;
}
// Platform-specific signals
const signals: NodeJS.Signals[] =
process.platform === 'win32' ? ['SIGINT', 'SIGTERM'] : ['SIGTERM', 'SIGINT', 'SIGUSR2'];
signals.forEach(signal => {
process.on(signal, () => {
// Only process if not already shutting down
if (!this.isShuttingDown) {
// Set signal flag immediately for quick checks
this.signalReceived = true;
// Also set global flag for workers/other processes
globalThis.__SHUTDOWN_SIGNAL_RECEIVED__ = true;
this.shutdownAndExit(signal).catch(() => {
process.exit(1);
});
}
});
});
// Handle uncaught exceptions
process.on('uncaughtException', () => {
this.signalReceived = true;
this.shutdownAndExit('uncaughtException', 1).catch(() => {
process.exit(1);
});
});
// Handle unhandled promise rejections
process.on('unhandledRejection', () => {
this.signalReceived = true;
this.shutdownAndExit('unhandledRejection', 1).catch(() => {
process.exit(1);
});
});
this.signalHandlersRegistered = true;
}
}

View file

@ -1,43 +0,0 @@
/**
* Types for shutdown functionality
*/
/**
* Callback function for shutdown cleanup
*/
export type ShutdownCallback = () => Promise<void> | void;
/**
* Shutdown callback with priority information
*/
export interface PrioritizedShutdownCallback {
callback: ShutdownCallback;
priority: number;
name?: string;
}
/**
* Options for configuring shutdown behavior
*/
export interface ShutdownOptions {
/** Timeout in milliseconds before forcing shutdown (default: 30000) */
timeout?: number;
/** Whether to automatically register signal handlers (default: true) */
autoRegister?: boolean;
}
/**
* Shutdown result information
*/
export interface ShutdownResult {
/** Whether shutdown completed successfully */
success: boolean;
/** Number of callbacks executed */
callbacksExecuted: number;
/** Number of callbacks that failed */
callbacksFailed: number;
/** Time taken for shutdown in milliseconds */
duration: number;
/** Error message if shutdown failed */
error?: string;
}

View file

@ -1,10 +0,0 @@
{
"extends": "../../../tsconfig.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src",
"composite": true
},
"include": ["src/**/*"],
"references": []
}