format
This commit is contained in:
parent
d858222af7
commit
7d9044ab29
202 changed files with 10755 additions and 10972 deletions
|
|
@ -91,4 +91,4 @@
|
|||
"apiKey": "",
|
||||
"apiUrl": "https://proxy.webshare.io/api/v2/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,4 +45,4 @@
|
|||
"webmasterId": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,4 +29,4 @@
|
|||
"retries": 5,
|
||||
"retryDelay": 2000
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,4 +39,4 @@
|
|||
"timeout": 5000,
|
||||
"retries": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,196 +1,193 @@
|
|||
#!/usr/bin/env bun
|
||||
/* eslint-disable no-console */
|
||||
import { parseArgs } from 'util';
|
||||
import { join } from 'path';
|
||||
import { ConfigManager } from './config-manager';
|
||||
import { appConfigSchema } from './schemas';
|
||||
import {
|
||||
validateConfig,
|
||||
formatValidationResult,
|
||||
checkDeprecations,
|
||||
checkRequiredEnvVars,
|
||||
validateCompleteness
|
||||
} from './utils/validation';
|
||||
import { redactSecrets } from './utils/secrets';
|
||||
import type { Environment } from './types';
|
||||
|
||||
interface CliOptions {
|
||||
config?: string;
|
||||
env?: string;
|
||||
validate?: boolean;
|
||||
show?: boolean;
|
||||
check?: boolean;
|
||||
json?: boolean;
|
||||
help?: boolean;
|
||||
}
|
||||
|
||||
const DEPRECATIONS = {
|
||||
'service.legacyMode': 'Use service.mode instead',
|
||||
'database.redis': 'Use database.dragonfly instead',
|
||||
};
|
||||
|
||||
const REQUIRED_PATHS = [
|
||||
'service.name',
|
||||
'service.port',
|
||||
'database.postgres.host',
|
||||
'database.postgres.database',
|
||||
];
|
||||
|
||||
const REQUIRED_ENV_VARS = [
|
||||
'NODE_ENV',
|
||||
];
|
||||
|
||||
const SECRET_PATHS = [
|
||||
'database.postgres.password',
|
||||
'database.mongodb.uri',
|
||||
'providers.quoteMedia.apiKey',
|
||||
'providers.interactiveBrokers.clientId',
|
||||
];
|
||||
|
||||
function printUsage() {
|
||||
console.log(`
|
||||
Stock Bot Configuration CLI
|
||||
|
||||
Usage: bun run config-cli [options]
|
||||
|
||||
Options:
|
||||
--config <path> Path to config directory (default: ./config)
|
||||
--env <env> Environment to use (development, test, production)
|
||||
--validate Validate configuration against schema
|
||||
--show Show current configuration (secrets redacted)
|
||||
--check Run all configuration checks
|
||||
--json Output in JSON format
|
||||
--help Show this help message
|
||||
|
||||
Examples:
|
||||
# Validate configuration
|
||||
bun run config-cli --validate
|
||||
|
||||
# Show configuration for production
|
||||
bun run config-cli --env production --show
|
||||
|
||||
# Run all checks
|
||||
bun run config-cli --check
|
||||
|
||||
# Output configuration as JSON
|
||||
bun run config-cli --show --json
|
||||
`);
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const { values } = parseArgs({
|
||||
args: process.argv.slice(2),
|
||||
options: {
|
||||
config: { type: 'string' },
|
||||
env: { type: 'string' },
|
||||
validate: { type: 'boolean' },
|
||||
show: { type: 'boolean' },
|
||||
check: { type: 'boolean' },
|
||||
json: { type: 'boolean' },
|
||||
help: { type: 'boolean' },
|
||||
},
|
||||
}) as { values: CliOptions };
|
||||
|
||||
if (values.help) {
|
||||
printUsage();
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const configPath = values.config || join(process.cwd(), 'config');
|
||||
const environment = values.env as Environment;
|
||||
|
||||
try {
|
||||
const manager = new ConfigManager({
|
||||
configPath,
|
||||
environment,
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
||||
if (values.validate) {
|
||||
const result = validateConfig(config, appConfigSchema);
|
||||
|
||||
if (values.json) {
|
||||
console.log(JSON.stringify(result, null, 2));
|
||||
} else {
|
||||
console.log(formatValidationResult(result));
|
||||
}
|
||||
|
||||
process.exit(result.valid ? 0 : 1);
|
||||
}
|
||||
|
||||
if (values.show) {
|
||||
const redacted = redactSecrets(config, SECRET_PATHS);
|
||||
|
||||
if (values.json) {
|
||||
console.log(JSON.stringify(redacted, null, 2));
|
||||
} else {
|
||||
console.log('Current Configuration:');
|
||||
console.log(JSON.stringify(redacted, null, 2));
|
||||
}
|
||||
}
|
||||
|
||||
if (values.check) {
|
||||
console.log('Running configuration checks...\n');
|
||||
|
||||
// Schema validation
|
||||
console.log('1. Schema Validation:');
|
||||
const schemaResult = validateConfig(config, appConfigSchema);
|
||||
console.log(formatValidationResult(schemaResult));
|
||||
console.log();
|
||||
|
||||
// Environment variables
|
||||
console.log('2. Required Environment Variables:');
|
||||
const envResult = checkRequiredEnvVars(REQUIRED_ENV_VARS);
|
||||
console.log(formatValidationResult(envResult));
|
||||
console.log();
|
||||
|
||||
// Required paths
|
||||
console.log('3. Required Configuration Paths:');
|
||||
const pathResult = validateCompleteness(config, REQUIRED_PATHS);
|
||||
console.log(formatValidationResult(pathResult));
|
||||
console.log();
|
||||
|
||||
// Deprecations
|
||||
console.log('4. Deprecation Warnings:');
|
||||
const warnings = checkDeprecations(config, DEPRECATIONS);
|
||||
if (warnings && warnings.length > 0) {
|
||||
for (const warning of warnings) {
|
||||
console.log(` ⚠️ ${warning.path}: ${warning.message}`);
|
||||
}
|
||||
} else {
|
||||
console.log(' ✅ No deprecated options found');
|
||||
}
|
||||
console.log();
|
||||
|
||||
// Overall result
|
||||
const allValid = schemaResult.valid && envResult.valid && pathResult.valid;
|
||||
|
||||
if (allValid) {
|
||||
console.log('✅ All configuration checks passed!');
|
||||
process.exit(0);
|
||||
} else {
|
||||
console.log('❌ Some configuration checks failed');
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (!values.validate && !values.show && !values.check) {
|
||||
console.log('No action specified. Use --help for usage information.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
if (values.json) {
|
||||
console.error(JSON.stringify({ error: String(error) }));
|
||||
} else {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run CLI
|
||||
if (import.meta.main) {
|
||||
main();
|
||||
}
|
||||
#!/usr/bin/env bun
|
||||
/* eslint-disable no-console */
|
||||
import { join } from 'path';
|
||||
import { parseArgs } from 'util';
|
||||
import { redactSecrets } from './utils/secrets';
|
||||
import {
|
||||
checkDeprecations,
|
||||
checkRequiredEnvVars,
|
||||
formatValidationResult,
|
||||
validateCompleteness,
|
||||
validateConfig,
|
||||
} from './utils/validation';
|
||||
import { ConfigManager } from './config-manager';
|
||||
import { appConfigSchema } from './schemas';
|
||||
import type { Environment } from './types';
|
||||
|
||||
interface CliOptions {
|
||||
config?: string;
|
||||
env?: string;
|
||||
validate?: boolean;
|
||||
show?: boolean;
|
||||
check?: boolean;
|
||||
json?: boolean;
|
||||
help?: boolean;
|
||||
}
|
||||
|
||||
const DEPRECATIONS = {
|
||||
'service.legacyMode': 'Use service.mode instead',
|
||||
'database.redis': 'Use database.dragonfly instead',
|
||||
};
|
||||
|
||||
const REQUIRED_PATHS = [
|
||||
'service.name',
|
||||
'service.port',
|
||||
'database.postgres.host',
|
||||
'database.postgres.database',
|
||||
];
|
||||
|
||||
const REQUIRED_ENV_VARS = ['NODE_ENV'];
|
||||
|
||||
const SECRET_PATHS = [
|
||||
'database.postgres.password',
|
||||
'database.mongodb.uri',
|
||||
'providers.quoteMedia.apiKey',
|
||||
'providers.interactiveBrokers.clientId',
|
||||
];
|
||||
|
||||
function printUsage() {
|
||||
console.log(`
|
||||
Stock Bot Configuration CLI
|
||||
|
||||
Usage: bun run config-cli [options]
|
||||
|
||||
Options:
|
||||
--config <path> Path to config directory (default: ./config)
|
||||
--env <env> Environment to use (development, test, production)
|
||||
--validate Validate configuration against schema
|
||||
--show Show current configuration (secrets redacted)
|
||||
--check Run all configuration checks
|
||||
--json Output in JSON format
|
||||
--help Show this help message
|
||||
|
||||
Examples:
|
||||
# Validate configuration
|
||||
bun run config-cli --validate
|
||||
|
||||
# Show configuration for production
|
||||
bun run config-cli --env production --show
|
||||
|
||||
# Run all checks
|
||||
bun run config-cli --check
|
||||
|
||||
# Output configuration as JSON
|
||||
bun run config-cli --show --json
|
||||
`);
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const { values } = parseArgs({
|
||||
args: process.argv.slice(2),
|
||||
options: {
|
||||
config: { type: 'string' },
|
||||
env: { type: 'string' },
|
||||
validate: { type: 'boolean' },
|
||||
show: { type: 'boolean' },
|
||||
check: { type: 'boolean' },
|
||||
json: { type: 'boolean' },
|
||||
help: { type: 'boolean' },
|
||||
},
|
||||
}) as { values: CliOptions };
|
||||
|
||||
if (values.help) {
|
||||
printUsage();
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const configPath = values.config || join(process.cwd(), 'config');
|
||||
const environment = values.env as Environment;
|
||||
|
||||
try {
|
||||
const manager = new ConfigManager({
|
||||
configPath,
|
||||
environment,
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
||||
if (values.validate) {
|
||||
const result = validateConfig(config, appConfigSchema);
|
||||
|
||||
if (values.json) {
|
||||
console.log(JSON.stringify(result, null, 2));
|
||||
} else {
|
||||
console.log(formatValidationResult(result));
|
||||
}
|
||||
|
||||
process.exit(result.valid ? 0 : 1);
|
||||
}
|
||||
|
||||
if (values.show) {
|
||||
const redacted = redactSecrets(config, SECRET_PATHS);
|
||||
|
||||
if (values.json) {
|
||||
console.log(JSON.stringify(redacted, null, 2));
|
||||
} else {
|
||||
console.log('Current Configuration:');
|
||||
console.log(JSON.stringify(redacted, null, 2));
|
||||
}
|
||||
}
|
||||
|
||||
if (values.check) {
|
||||
console.log('Running configuration checks...\n');
|
||||
|
||||
// Schema validation
|
||||
console.log('1. Schema Validation:');
|
||||
const schemaResult = validateConfig(config, appConfigSchema);
|
||||
console.log(formatValidationResult(schemaResult));
|
||||
console.log();
|
||||
|
||||
// Environment variables
|
||||
console.log('2. Required Environment Variables:');
|
||||
const envResult = checkRequiredEnvVars(REQUIRED_ENV_VARS);
|
||||
console.log(formatValidationResult(envResult));
|
||||
console.log();
|
||||
|
||||
// Required paths
|
||||
console.log('3. Required Configuration Paths:');
|
||||
const pathResult = validateCompleteness(config, REQUIRED_PATHS);
|
||||
console.log(formatValidationResult(pathResult));
|
||||
console.log();
|
||||
|
||||
// Deprecations
|
||||
console.log('4. Deprecation Warnings:');
|
||||
const warnings = checkDeprecations(config, DEPRECATIONS);
|
||||
if (warnings && warnings.length > 0) {
|
||||
for (const warning of warnings) {
|
||||
console.log(` ⚠️ ${warning.path}: ${warning.message}`);
|
||||
}
|
||||
} else {
|
||||
console.log(' ✅ No deprecated options found');
|
||||
}
|
||||
console.log();
|
||||
|
||||
// Overall result
|
||||
const allValid = schemaResult.valid && envResult.valid && pathResult.valid;
|
||||
|
||||
if (allValid) {
|
||||
console.log('✅ All configuration checks passed!');
|
||||
process.exit(0);
|
||||
} else {
|
||||
console.log('❌ Some configuration checks failed');
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (!values.validate && !values.show && !values.check) {
|
||||
console.log('No action specified. Use --help for usage information.');
|
||||
process.exit(1);
|
||||
}
|
||||
} catch (error) {
|
||||
if (values.json) {
|
||||
console.error(JSON.stringify({ error: String(error) }));
|
||||
} else {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run CLI
|
||||
if (import.meta.main) {
|
||||
main();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,15 +6,21 @@ export class ConfigError extends Error {
|
|||
}
|
||||
|
||||
export class ConfigValidationError extends ConfigError {
|
||||
constructor(message: string, public errors: unknown) {
|
||||
constructor(
|
||||
message: string,
|
||||
public errors: unknown
|
||||
) {
|
||||
super(message);
|
||||
this.name = 'ConfigValidationError';
|
||||
}
|
||||
}
|
||||
|
||||
export class ConfigLoaderError extends ConfigError {
|
||||
constructor(message: string, public loader: string) {
|
||||
constructor(
|
||||
message: string,
|
||||
public loader: string
|
||||
) {
|
||||
super(`${loader}: ${message}`);
|
||||
this.name = 'ConfigLoaderError';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,4 +7,4 @@ export const baseConfigSchema = z.object({
|
|||
name: z.string().optional(),
|
||||
version: z.string().optional(),
|
||||
debug: z.boolean().default(false),
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -61,4 +61,4 @@ export const databaseConfigSchema = z.object({
|
|||
questdb: questdbConfigSchema,
|
||||
mongodb: mongodbConfigSchema,
|
||||
dragonfly: dragonflyConfigSchema,
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,87 +1,105 @@
|
|||
export * from './base.schema';
|
||||
export * from './database.schema';
|
||||
export * from './provider.schema';
|
||||
export * from './service.schema';
|
||||
|
||||
import { z } from 'zod';
|
||||
import { baseConfigSchema, environmentSchema } from './base.schema';
|
||||
import { providerConfigSchema, webshareProviderConfigSchema } from './provider.schema';
|
||||
import { httpConfigSchema, queueConfigSchema } from './service.schema';
|
||||
|
||||
export * from './base.schema';
|
||||
export * from './database.schema';
|
||||
export * from './provider.schema';
|
||||
export * from './service.schema';
|
||||
|
||||
// Flexible service schema with defaults
|
||||
const flexibleServiceConfigSchema = z.object({
|
||||
name: z.string().default('default-service'),
|
||||
port: z.number().min(1).max(65535).default(3000),
|
||||
host: z.string().default('0.0.0.0'),
|
||||
healthCheckPath: z.string().default('/health'),
|
||||
metricsPath: z.string().default('/metrics'),
|
||||
shutdownTimeout: z.number().default(30000),
|
||||
cors: z.object({
|
||||
enabled: z.boolean().default(true),
|
||||
origin: z.union([z.string(), z.array(z.string())]).default('*'),
|
||||
credentials: z.boolean().default(true),
|
||||
}).default({}),
|
||||
}).default({});
|
||||
const flexibleServiceConfigSchema = z
|
||||
.object({
|
||||
name: z.string().default('default-service'),
|
||||
port: z.number().min(1).max(65535).default(3000),
|
||||
host: z.string().default('0.0.0.0'),
|
||||
healthCheckPath: z.string().default('/health'),
|
||||
metricsPath: z.string().default('/metrics'),
|
||||
shutdownTimeout: z.number().default(30000),
|
||||
cors: z
|
||||
.object({
|
||||
enabled: z.boolean().default(true),
|
||||
origin: z.union([z.string(), z.array(z.string())]).default('*'),
|
||||
credentials: z.boolean().default(true),
|
||||
})
|
||||
.default({}),
|
||||
})
|
||||
.default({});
|
||||
|
||||
// Flexible database schema with defaults
|
||||
const flexibleDatabaseConfigSchema = z.object({
|
||||
postgres: z.object({
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(5432),
|
||||
database: z.string().default('test_db'),
|
||||
user: z.string().default('test_user'),
|
||||
password: z.string().default('test_pass'),
|
||||
ssl: z.boolean().default(false),
|
||||
poolSize: z.number().min(1).max(100).default(10),
|
||||
connectionTimeout: z.number().default(30000),
|
||||
idleTimeout: z.number().default(10000),
|
||||
}).default({}),
|
||||
questdb: z.object({
|
||||
host: z.string().default('localhost'),
|
||||
ilpPort: z.number().default(9009),
|
||||
httpPort: z.number().default(9000),
|
||||
pgPort: z.number().default(8812),
|
||||
database: z.string().default('questdb'),
|
||||
user: z.string().default('admin'),
|
||||
password: z.string().default('quest'),
|
||||
bufferSize: z.number().default(65536),
|
||||
flushInterval: z.number().default(1000),
|
||||
}).default({}),
|
||||
mongodb: z.object({
|
||||
uri: z.string().url().optional(),
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(27017),
|
||||
database: z.string().default('test_mongo'),
|
||||
user: z.string().optional(),
|
||||
password: z.string().optional(),
|
||||
authSource: z.string().default('admin'),
|
||||
replicaSet: z.string().optional(),
|
||||
poolSize: z.number().min(1).max(100).default(10),
|
||||
}).default({}),
|
||||
dragonfly: z.object({
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(6379),
|
||||
password: z.string().optional(),
|
||||
db: z.number().min(0).max(15).default(0),
|
||||
keyPrefix: z.string().optional(),
|
||||
ttl: z.number().optional(),
|
||||
maxRetries: z.number().default(3),
|
||||
retryDelay: z.number().default(100),
|
||||
}).default({}),
|
||||
}).default({});
|
||||
const flexibleDatabaseConfigSchema = z
|
||||
.object({
|
||||
postgres: z
|
||||
.object({
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(5432),
|
||||
database: z.string().default('test_db'),
|
||||
user: z.string().default('test_user'),
|
||||
password: z.string().default('test_pass'),
|
||||
ssl: z.boolean().default(false),
|
||||
poolSize: z.number().min(1).max(100).default(10),
|
||||
connectionTimeout: z.number().default(30000),
|
||||
idleTimeout: z.number().default(10000),
|
||||
})
|
||||
.default({}),
|
||||
questdb: z
|
||||
.object({
|
||||
host: z.string().default('localhost'),
|
||||
ilpPort: z.number().default(9009),
|
||||
httpPort: z.number().default(9000),
|
||||
pgPort: z.number().default(8812),
|
||||
database: z.string().default('questdb'),
|
||||
user: z.string().default('admin'),
|
||||
password: z.string().default('quest'),
|
||||
bufferSize: z.number().default(65536),
|
||||
flushInterval: z.number().default(1000),
|
||||
})
|
||||
.default({}),
|
||||
mongodb: z
|
||||
.object({
|
||||
uri: z.string().url().optional(),
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(27017),
|
||||
database: z.string().default('test_mongo'),
|
||||
user: z.string().optional(),
|
||||
password: z.string().optional(),
|
||||
authSource: z.string().default('admin'),
|
||||
replicaSet: z.string().optional(),
|
||||
poolSize: z.number().min(1).max(100).default(10),
|
||||
})
|
||||
.default({}),
|
||||
dragonfly: z
|
||||
.object({
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(6379),
|
||||
password: z.string().optional(),
|
||||
db: z.number().min(0).max(15).default(0),
|
||||
keyPrefix: z.string().optional(),
|
||||
ttl: z.number().optional(),
|
||||
maxRetries: z.number().default(3),
|
||||
retryDelay: z.number().default(100),
|
||||
})
|
||||
.default({}),
|
||||
})
|
||||
.default({});
|
||||
|
||||
// Flexible log schema with defaults (renamed from logging)
|
||||
const flexibleLogConfigSchema = z.object({
|
||||
level: z.enum(['trace', 'debug', 'info', 'warn', 'error', 'fatal']).default('info'),
|
||||
format: z.enum(['json', 'pretty']).default('json'),
|
||||
hideObject: z.boolean().default(false),
|
||||
loki: z.object({
|
||||
enabled: z.boolean().default(false),
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(3100),
|
||||
labels: z.record(z.string()).default({}),
|
||||
}).optional(),
|
||||
}).default({});
|
||||
const flexibleLogConfigSchema = z
|
||||
.object({
|
||||
level: z.enum(['trace', 'debug', 'info', 'warn', 'error', 'fatal']).default('info'),
|
||||
format: z.enum(['json', 'pretty']).default('json'),
|
||||
hideObject: z.boolean().default(false),
|
||||
loki: z
|
||||
.object({
|
||||
enabled: z.boolean().default(false),
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(3100),
|
||||
labels: z.record(z.string()).default({}),
|
||||
})
|
||||
.optional(),
|
||||
})
|
||||
.default({});
|
||||
|
||||
// Complete application configuration schema
|
||||
export const appConfigSchema = baseConfigSchema.extend({
|
||||
|
|
@ -95,4 +113,4 @@ export const appConfigSchema = baseConfigSchema.extend({
|
|||
webshare: webshareProviderConfigSchema.optional(),
|
||||
});
|
||||
|
||||
export type AppConfig = z.infer<typeof appConfigSchema>;
|
||||
export type AppConfig = z.infer<typeof appConfigSchema>;
|
||||
|
|
|
|||
|
|
@ -5,10 +5,12 @@ export const baseProviderConfigSchema = z.object({
|
|||
name: z.string(),
|
||||
enabled: z.boolean().default(true),
|
||||
priority: z.number().default(0),
|
||||
rateLimit: z.object({
|
||||
maxRequests: z.number().default(100),
|
||||
windowMs: z.number().default(60000),
|
||||
}).optional(),
|
||||
rateLimit: z
|
||||
.object({
|
||||
maxRequests: z.number().default(100),
|
||||
windowMs: z.number().default(60000),
|
||||
})
|
||||
.optional(),
|
||||
timeout: z.number().default(30000),
|
||||
retries: z.number().default(3),
|
||||
});
|
||||
|
|
@ -71,4 +73,4 @@ export const providerSchemas = {
|
|||
qm: qmProviderConfigSchema,
|
||||
yahoo: yahooProviderConfigSchema,
|
||||
webshare: webshareProviderConfigSchema,
|
||||
} as const;
|
||||
} as const;
|
||||
|
|
|
|||
|
|
@ -8,23 +8,27 @@ export const serviceConfigSchema = z.object({
|
|||
healthCheckPath: z.string().default('/health'),
|
||||
metricsPath: z.string().default('/metrics'),
|
||||
shutdownTimeout: z.number().default(30000),
|
||||
cors: z.object({
|
||||
enabled: z.boolean().default(true),
|
||||
origin: z.union([z.string(), z.array(z.string())]).default('*'),
|
||||
credentials: z.boolean().default(true),
|
||||
}).default({}),
|
||||
cors: z
|
||||
.object({
|
||||
enabled: z.boolean().default(true),
|
||||
origin: z.union([z.string(), z.array(z.string())]).default('*'),
|
||||
credentials: z.boolean().default(true),
|
||||
})
|
||||
.default({}),
|
||||
});
|
||||
|
||||
// Logging configuration
|
||||
export const loggingConfigSchema = z.object({
|
||||
level: z.enum(['trace', 'debug', 'info', 'warn', 'error', 'fatal']).default('info'),
|
||||
format: z.enum(['json', 'pretty']).default('json'),
|
||||
loki: z.object({
|
||||
enabled: z.boolean().default(false),
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(3100),
|
||||
labels: z.record(z.string()).default({}),
|
||||
}).optional(),
|
||||
loki: z
|
||||
.object({
|
||||
enabled: z.boolean().default(false),
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(3100),
|
||||
labels: z.record(z.string()).default({}),
|
||||
})
|
||||
.optional(),
|
||||
});
|
||||
|
||||
// Queue configuration
|
||||
|
|
@ -35,15 +39,19 @@ export const queueConfigSchema = z.object({
|
|||
password: z.string().optional(),
|
||||
db: z.number().default(1),
|
||||
}),
|
||||
defaultJobOptions: z.object({
|
||||
attempts: z.number().default(3),
|
||||
backoff: z.object({
|
||||
type: z.enum(['exponential', 'fixed']).default('exponential'),
|
||||
delay: z.number().default(1000),
|
||||
}).default({}),
|
||||
removeOnComplete: z.number().default(10),
|
||||
removeOnFail: z.number().default(5),
|
||||
}).default({}),
|
||||
defaultJobOptions: z
|
||||
.object({
|
||||
attempts: z.number().default(3),
|
||||
backoff: z
|
||||
.object({
|
||||
type: z.enum(['exponential', 'fixed']).default('exponential'),
|
||||
delay: z.number().default(1000),
|
||||
})
|
||||
.default({}),
|
||||
removeOnComplete: z.number().default(10),
|
||||
removeOnFail: z.number().default(5),
|
||||
})
|
||||
.default({}),
|
||||
});
|
||||
|
||||
// HTTP client configuration
|
||||
|
|
@ -52,12 +60,16 @@ export const httpConfigSchema = z.object({
|
|||
retries: z.number().default(3),
|
||||
retryDelay: z.number().default(1000),
|
||||
userAgent: z.string().optional(),
|
||||
proxy: z.object({
|
||||
enabled: z.boolean().default(false),
|
||||
url: z.string().url().optional(),
|
||||
auth: z.object({
|
||||
username: z.string(),
|
||||
password: z.string(),
|
||||
}).optional(),
|
||||
}).optional(),
|
||||
});
|
||||
proxy: z
|
||||
.object({
|
||||
enabled: z.boolean().default(false),
|
||||
url: z.string().url().optional(),
|
||||
auth: z
|
||||
.object({
|
||||
username: z.string(),
|
||||
password: z.string(),
|
||||
})
|
||||
.optional(),
|
||||
})
|
||||
.optional(),
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,183 +1,178 @@
|
|||
import { z } from 'zod';
|
||||
|
||||
/**
|
||||
* Secret value wrapper to prevent accidental logging
|
||||
*/
|
||||
export class SecretValue<T = string> {
|
||||
private readonly value: T;
|
||||
private readonly masked: string;
|
||||
|
||||
constructor(value: T, mask: string = '***') {
|
||||
this.value = value;
|
||||
this.masked = mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the actual secret value
|
||||
* @param reason - Required reason for accessing the secret
|
||||
*/
|
||||
reveal(reason: string): T {
|
||||
if (!reason) {
|
||||
throw new Error('Reason required for revealing secret value');
|
||||
}
|
||||
return this.value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get masked representation
|
||||
*/
|
||||
toString(): string {
|
||||
return this.masked;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prevent JSON serialization of actual value
|
||||
*/
|
||||
toJSON(): string {
|
||||
return this.masked;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if value matches without revealing it
|
||||
*/
|
||||
equals(other: T): boolean {
|
||||
return this.value === other;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform the secret value
|
||||
*/
|
||||
map<R>(fn: (value: T) => R, reason: string): SecretValue<R> {
|
||||
return new SecretValue(fn(this.reveal(reason)));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Zod schema for secret values
|
||||
*/
|
||||
export const secretSchema = <T extends z.ZodTypeAny>(_schema: T) => {
|
||||
return z.custom<SecretValue<z.infer<T>>>(
|
||||
(val) => val instanceof SecretValue,
|
||||
{
|
||||
message: 'Expected SecretValue instance',
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Transform string to SecretValue in Zod schema
|
||||
*/
|
||||
export const secretStringSchema = z
|
||||
.string()
|
||||
.transform((val) => new SecretValue(val));
|
||||
|
||||
/**
|
||||
* Create a secret value
|
||||
*/
|
||||
export function secret<T = string>(value: T, mask?: string): SecretValue<T> {
|
||||
return new SecretValue(value, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a value is a secret
|
||||
*/
|
||||
export function isSecret(value: unknown): value is SecretValue {
|
||||
return value instanceof SecretValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Redact secrets from an object
|
||||
*/
|
||||
export function redactSecrets<T extends Record<string, any>>(
|
||||
obj: T,
|
||||
secretPaths: string[] = []
|
||||
): T {
|
||||
const result = { ...obj };
|
||||
|
||||
// Redact known secret paths
|
||||
for (const path of secretPaths) {
|
||||
const keys = path.split('.');
|
||||
let current: any = result;
|
||||
|
||||
for (let i = 0; i < keys.length - 1; i++) {
|
||||
const key = keys[i];
|
||||
if (key && current[key] && typeof current[key] === 'object') {
|
||||
current = current[key];
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
const lastKey = keys[keys.length - 1];
|
||||
if (current && lastKey && lastKey in current) {
|
||||
current[lastKey] = '***REDACTED***';
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively redact SecretValue instances
|
||||
function redactSecretValues(obj: any): any {
|
||||
if (obj === null || obj === undefined) {
|
||||
return obj;
|
||||
}
|
||||
|
||||
if (isSecret(obj)) {
|
||||
return obj.toString();
|
||||
}
|
||||
|
||||
if (Array.isArray(obj)) {
|
||||
return obj.map(redactSecretValues);
|
||||
}
|
||||
|
||||
if (typeof obj === 'object') {
|
||||
const result: any = {};
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
result[key] = redactSecretValues(value);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
return redactSecretValues(result);
|
||||
}
|
||||
|
||||
/**
|
||||
* Environment variable names that should be treated as secrets
|
||||
*/
|
||||
export const COMMON_SECRET_PATTERNS = [
|
||||
/password/i,
|
||||
/secret/i,
|
||||
/key/i,
|
||||
/token/i,
|
||||
/credential/i,
|
||||
/private/i,
|
||||
/auth/i,
|
||||
/api[-_]?key/i,
|
||||
];
|
||||
|
||||
/**
|
||||
* Check if an environment variable name indicates a secret
|
||||
*/
|
||||
export function isSecretEnvVar(name: string): boolean {
|
||||
return COMMON_SECRET_PATTERNS.some(pattern => pattern.test(name));
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrap environment variables that look like secrets
|
||||
*/
|
||||
export function wrapSecretEnvVars(
|
||||
env: Record<string, string | undefined>
|
||||
): Record<string, string | SecretValue | undefined> {
|
||||
const result: Record<string, string | SecretValue | undefined> = {};
|
||||
|
||||
for (const [key, value] of Object.entries(env)) {
|
||||
if (value !== undefined && isSecretEnvVar(key)) {
|
||||
result[key] = new SecretValue(value, `***${key}***`);
|
||||
} else {
|
||||
result[key] = value;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
import { z } from 'zod';
|
||||
|
||||
/**
|
||||
* Secret value wrapper to prevent accidental logging
|
||||
*/
|
||||
export class SecretValue<T = string> {
|
||||
private readonly value: T;
|
||||
private readonly masked: string;
|
||||
|
||||
constructor(value: T, mask: string = '***') {
|
||||
this.value = value;
|
||||
this.masked = mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the actual secret value
|
||||
* @param reason - Required reason for accessing the secret
|
||||
*/
|
||||
reveal(reason: string): T {
|
||||
if (!reason) {
|
||||
throw new Error('Reason required for revealing secret value');
|
||||
}
|
||||
return this.value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get masked representation
|
||||
*/
|
||||
toString(): string {
|
||||
return this.masked;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prevent JSON serialization of actual value
|
||||
*/
|
||||
toJSON(): string {
|
||||
return this.masked;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if value matches without revealing it
|
||||
*/
|
||||
equals(other: T): boolean {
|
||||
return this.value === other;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform the secret value
|
||||
*/
|
||||
map<R>(fn: (value: T) => R, reason: string): SecretValue<R> {
|
||||
return new SecretValue(fn(this.reveal(reason)));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Zod schema for secret values
|
||||
*/
|
||||
export const secretSchema = <T extends z.ZodTypeAny>(_schema: T) => {
|
||||
return z.custom<SecretValue<z.infer<T>>>(val => val instanceof SecretValue, {
|
||||
message: 'Expected SecretValue instance',
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Transform string to SecretValue in Zod schema
|
||||
*/
|
||||
export const secretStringSchema = z.string().transform(val => new SecretValue(val));
|
||||
|
||||
/**
|
||||
* Create a secret value
|
||||
*/
|
||||
export function secret<T = string>(value: T, mask?: string): SecretValue<T> {
|
||||
return new SecretValue(value, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a value is a secret
|
||||
*/
|
||||
export function isSecret(value: unknown): value is SecretValue {
|
||||
return value instanceof SecretValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Redact secrets from an object
|
||||
*/
|
||||
export function redactSecrets<T extends Record<string, any>>(
|
||||
obj: T,
|
||||
secretPaths: string[] = []
|
||||
): T {
|
||||
const result = { ...obj };
|
||||
|
||||
// Redact known secret paths
|
||||
for (const path of secretPaths) {
|
||||
const keys = path.split('.');
|
||||
let current: any = result;
|
||||
|
||||
for (let i = 0; i < keys.length - 1; i++) {
|
||||
const key = keys[i];
|
||||
if (key && current[key] && typeof current[key] === 'object') {
|
||||
current = current[key];
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
const lastKey = keys[keys.length - 1];
|
||||
if (current && lastKey && lastKey in current) {
|
||||
current[lastKey] = '***REDACTED***';
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively redact SecretValue instances
|
||||
function redactSecretValues(obj: any): any {
|
||||
if (obj === null || obj === undefined) {
|
||||
return obj;
|
||||
}
|
||||
|
||||
if (isSecret(obj)) {
|
||||
return obj.toString();
|
||||
}
|
||||
|
||||
if (Array.isArray(obj)) {
|
||||
return obj.map(redactSecretValues);
|
||||
}
|
||||
|
||||
if (typeof obj === 'object') {
|
||||
const result: any = {};
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
result[key] = redactSecretValues(value);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
return redactSecretValues(result);
|
||||
}
|
||||
|
||||
/**
|
||||
* Environment variable names that should be treated as secrets
|
||||
*/
|
||||
export const COMMON_SECRET_PATTERNS = [
|
||||
/password/i,
|
||||
/secret/i,
|
||||
/key/i,
|
||||
/token/i,
|
||||
/credential/i,
|
||||
/private/i,
|
||||
/auth/i,
|
||||
/api[-_]?key/i,
|
||||
];
|
||||
|
||||
/**
|
||||
* Check if an environment variable name indicates a secret
|
||||
*/
|
||||
export function isSecretEnvVar(name: string): boolean {
|
||||
return COMMON_SECRET_PATTERNS.some(pattern => pattern.test(name));
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrap environment variables that look like secrets
|
||||
*/
|
||||
export function wrapSecretEnvVars(
|
||||
env: Record<string, string | undefined>
|
||||
): Record<string, string | SecretValue | undefined> {
|
||||
const result: Record<string, string | SecretValue | undefined> = {};
|
||||
|
||||
for (const [key, value] of Object.entries(env)) {
|
||||
if (value !== undefined && isSecretEnvVar(key)) {
|
||||
result[key] = new SecretValue(value, `***${key}***`);
|
||||
} else {
|
||||
result[key] = value;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,195 +1,188 @@
|
|||
import { z } from 'zod';
|
||||
|
||||
export interface ValidationResult {
|
||||
valid: boolean;
|
||||
errors?: Array<{
|
||||
path: string;
|
||||
message: string;
|
||||
expected?: string;
|
||||
received?: string;
|
||||
}>;
|
||||
warnings?: Array<{
|
||||
path: string;
|
||||
message: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate configuration against a schema
|
||||
*/
|
||||
export function validateConfig<T>(
|
||||
config: unknown,
|
||||
schema: z.ZodSchema<T>
|
||||
): ValidationResult {
|
||||
try {
|
||||
schema.parse(config);
|
||||
return { valid: true };
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
const errors = error.errors.map(err => ({
|
||||
path: err.path.join('.'),
|
||||
message: err.message,
|
||||
expected: 'expected' in err ? String(err.expected) : undefined,
|
||||
received: 'received' in err ? String(err.received) : undefined,
|
||||
}));
|
||||
|
||||
return { valid: false, errors };
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for deprecated configuration options
|
||||
*/
|
||||
export function checkDeprecations(
|
||||
config: Record<string, unknown>,
|
||||
deprecations: Record<string, string>
|
||||
): ValidationResult['warnings'] {
|
||||
const warnings: ValidationResult['warnings'] = [];
|
||||
|
||||
function checkObject(obj: Record<string, unknown>, path: string[] = []): void {
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
const currentPath = [...path, key];
|
||||
const pathStr = currentPath.join('.');
|
||||
|
||||
if (pathStr in deprecations) {
|
||||
const deprecationMessage = deprecations[pathStr];
|
||||
if (deprecationMessage) {
|
||||
warnings?.push({
|
||||
path: pathStr,
|
||||
message: deprecationMessage,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (value && typeof value === 'object' && !Array.isArray(value)) {
|
||||
checkObject(value as Record<string, unknown>, currentPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkObject(config);
|
||||
return warnings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for required environment variables
|
||||
*/
|
||||
export function checkRequiredEnvVars(
|
||||
required: string[]
|
||||
): ValidationResult {
|
||||
const errors: ValidationResult['errors'] = [];
|
||||
|
||||
for (const envVar of required) {
|
||||
if (!process.env[envVar]) {
|
||||
errors.push({
|
||||
path: `env.${envVar}`,
|
||||
message: `Required environment variable ${envVar} is not set`,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors: errors.length > 0 ? errors : undefined,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate configuration completeness
|
||||
*/
|
||||
export function validateCompleteness(
|
||||
config: Record<string, any>,
|
||||
required: string[]
|
||||
): ValidationResult {
|
||||
const errors: ValidationResult['errors'] = [];
|
||||
|
||||
for (const path of required) {
|
||||
const keys = path.split('.');
|
||||
let current: any = config;
|
||||
let found = true;
|
||||
|
||||
for (const key of keys) {
|
||||
if (current && typeof current === 'object' && key in current) {
|
||||
current = current[key];
|
||||
} else {
|
||||
found = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found || current === undefined || current === null) {
|
||||
errors.push({
|
||||
path,
|
||||
message: `Required configuration value is missing`,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors: errors.length > 0 ? errors : undefined,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Format validation result for display
|
||||
*/
|
||||
export function formatValidationResult(result: ValidationResult): string {
|
||||
const lines: string[] = [];
|
||||
|
||||
if (result.valid) {
|
||||
lines.push('✅ Configuration is valid');
|
||||
} else {
|
||||
lines.push('❌ Configuration validation failed');
|
||||
}
|
||||
|
||||
if (result.errors && result.errors.length > 0) {
|
||||
lines.push('\nErrors:');
|
||||
for (const error of result.errors) {
|
||||
lines.push(` - ${error.path}: ${error.message}`);
|
||||
if (error.expected && error.received) {
|
||||
lines.push(` Expected: ${error.expected}, Received: ${error.received}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (result.warnings && result.warnings.length > 0) {
|
||||
lines.push('\nWarnings:');
|
||||
for (const warning of result.warnings) {
|
||||
lines.push(` - ${warning.path}: ${warning.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a strict schema that doesn't allow extra properties
|
||||
*/
|
||||
export function createStrictSchema<T extends z.ZodRawShape>(
|
||||
shape: T
|
||||
): z.ZodObject<T, 'strict'> {
|
||||
return z.object(shape).strict();
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge multiple schemas
|
||||
*/
|
||||
export function mergeSchemas<T extends z.ZodSchema[]>(
|
||||
...schemas: T
|
||||
): z.ZodIntersection<T[0], T[1]> {
|
||||
if (schemas.length < 2) {
|
||||
throw new Error('At least two schemas required for merge');
|
||||
}
|
||||
|
||||
let result = schemas[0]!.and(schemas[1]!);
|
||||
|
||||
for (let i = 2; i < schemas.length; i++) {
|
||||
result = result.and(schemas[i]!) as any;
|
||||
}
|
||||
|
||||
return result as any;
|
||||
}
|
||||
import { z } from 'zod';
|
||||
|
||||
export interface ValidationResult {
|
||||
valid: boolean;
|
||||
errors?: Array<{
|
||||
path: string;
|
||||
message: string;
|
||||
expected?: string;
|
||||
received?: string;
|
||||
}>;
|
||||
warnings?: Array<{
|
||||
path: string;
|
||||
message: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate configuration against a schema
|
||||
*/
|
||||
export function validateConfig<T>(config: unknown, schema: z.ZodSchema<T>): ValidationResult {
|
||||
try {
|
||||
schema.parse(config);
|
||||
return { valid: true };
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
const errors = error.errors.map(err => ({
|
||||
path: err.path.join('.'),
|
||||
message: err.message,
|
||||
expected: 'expected' in err ? String(err.expected) : undefined,
|
||||
received: 'received' in err ? String(err.received) : undefined,
|
||||
}));
|
||||
|
||||
return { valid: false, errors };
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for deprecated configuration options
|
||||
*/
|
||||
export function checkDeprecations(
|
||||
config: Record<string, unknown>,
|
||||
deprecations: Record<string, string>
|
||||
): ValidationResult['warnings'] {
|
||||
const warnings: ValidationResult['warnings'] = [];
|
||||
|
||||
function checkObject(obj: Record<string, unknown>, path: string[] = []): void {
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
const currentPath = [...path, key];
|
||||
const pathStr = currentPath.join('.');
|
||||
|
||||
if (pathStr in deprecations) {
|
||||
const deprecationMessage = deprecations[pathStr];
|
||||
if (deprecationMessage) {
|
||||
warnings?.push({
|
||||
path: pathStr,
|
||||
message: deprecationMessage,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (value && typeof value === 'object' && !Array.isArray(value)) {
|
||||
checkObject(value as Record<string, unknown>, currentPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkObject(config);
|
||||
return warnings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for required environment variables
|
||||
*/
|
||||
export function checkRequiredEnvVars(required: string[]): ValidationResult {
|
||||
const errors: ValidationResult['errors'] = [];
|
||||
|
||||
for (const envVar of required) {
|
||||
if (!process.env[envVar]) {
|
||||
errors.push({
|
||||
path: `env.${envVar}`,
|
||||
message: `Required environment variable ${envVar} is not set`,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors: errors.length > 0 ? errors : undefined,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate configuration completeness
|
||||
*/
|
||||
export function validateCompleteness(
|
||||
config: Record<string, any>,
|
||||
required: string[]
|
||||
): ValidationResult {
|
||||
const errors: ValidationResult['errors'] = [];
|
||||
|
||||
for (const path of required) {
|
||||
const keys = path.split('.');
|
||||
let current: any = config;
|
||||
let found = true;
|
||||
|
||||
for (const key of keys) {
|
||||
if (current && typeof current === 'object' && key in current) {
|
||||
current = current[key];
|
||||
} else {
|
||||
found = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found || current === undefined || current === null) {
|
||||
errors.push({
|
||||
path,
|
||||
message: `Required configuration value is missing`,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors: errors.length > 0 ? errors : undefined,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Format validation result for display
|
||||
*/
|
||||
export function formatValidationResult(result: ValidationResult): string {
|
||||
const lines: string[] = [];
|
||||
|
||||
if (result.valid) {
|
||||
lines.push('✅ Configuration is valid');
|
||||
} else {
|
||||
lines.push('❌ Configuration validation failed');
|
||||
}
|
||||
|
||||
if (result.errors && result.errors.length > 0) {
|
||||
lines.push('\nErrors:');
|
||||
for (const error of result.errors) {
|
||||
lines.push(` - ${error.path}: ${error.message}`);
|
||||
if (error.expected && error.received) {
|
||||
lines.push(` Expected: ${error.expected}, Received: ${error.received}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (result.warnings && result.warnings.length > 0) {
|
||||
lines.push('\nWarnings:');
|
||||
for (const warning of result.warnings) {
|
||||
lines.push(` - ${warning.path}: ${warning.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a strict schema that doesn't allow extra properties
|
||||
*/
|
||||
export function createStrictSchema<T extends z.ZodRawShape>(shape: T): z.ZodObject<T, 'strict'> {
|
||||
return z.object(shape).strict();
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge multiple schemas
|
||||
*/
|
||||
export function mergeSchemas<T extends z.ZodSchema[]>(
|
||||
...schemas: T
|
||||
): z.ZodIntersection<T[0], T[1]> {
|
||||
if (schemas.length < 2) {
|
||||
throw new Error('At least two schemas required for merge');
|
||||
}
|
||||
|
||||
let result = schemas[0]!.and(schemas[1]!);
|
||||
|
||||
for (let i = 2; i < schemas.length; i++) {
|
||||
result = result.and(schemas[i]!) as any;
|
||||
}
|
||||
|
||||
return result as any;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,215 +1,221 @@
|
|||
import { describe, test, expect, beforeEach } from 'bun:test';
|
||||
import { z } from 'zod';
|
||||
import { ConfigManager } from '../src/config-manager';
|
||||
import { ConfigLoader } from '../src/types';
|
||||
import { ConfigValidationError } from '../src/errors';
|
||||
|
||||
// Mock loader for testing
|
||||
class MockLoader implements ConfigLoader {
|
||||
priority = 0;
|
||||
|
||||
constructor(
|
||||
private data: Record<string, unknown>,
|
||||
public override priority: number = 0
|
||||
) {}
|
||||
|
||||
async load(): Promise<Record<string, unknown>> {
|
||||
return this.data;
|
||||
}
|
||||
}
|
||||
|
||||
// Test schema
|
||||
const testSchema = z.object({
|
||||
app: z.object({
|
||||
name: z.string(),
|
||||
version: z.string(),
|
||||
port: z.number().positive(),
|
||||
}),
|
||||
database: z.object({
|
||||
host: z.string(),
|
||||
port: z.number(),
|
||||
}),
|
||||
environment: z.enum(['development', 'test', 'production']),
|
||||
});
|
||||
|
||||
type TestConfig = z.infer<typeof testSchema>;
|
||||
|
||||
describe('ConfigManager', () => {
|
||||
let manager: ConfigManager<TestConfig>;
|
||||
|
||||
beforeEach(() => {
|
||||
manager = new ConfigManager<TestConfig>({
|
||||
loaders: [
|
||||
new MockLoader({
|
||||
app: {
|
||||
name: 'test-app',
|
||||
version: '1.0.0',
|
||||
port: 3000,
|
||||
},
|
||||
database: {
|
||||
host: 'localhost',
|
||||
port: 5432,
|
||||
},
|
||||
}),
|
||||
],
|
||||
environment: 'test',
|
||||
});
|
||||
});
|
||||
|
||||
test('should initialize configuration', async () => {
|
||||
const config = await manager.initialize(testSchema);
|
||||
|
||||
expect(config.app.name).toBe('test-app');
|
||||
expect(config.app.version).toBe('1.0.0');
|
||||
expect(config.environment).toBe('test');
|
||||
});
|
||||
|
||||
test('should merge multiple loaders by priority', async () => {
|
||||
manager = new ConfigManager<TestConfig>({
|
||||
loaders: [
|
||||
new MockLoader({ app: { name: 'base', port: 3000 } }, 0),
|
||||
new MockLoader({ app: { name: 'override', version: '2.0.0' } }, 10),
|
||||
new MockLoader({ database: { host: 'prod-db' } }, 5),
|
||||
],
|
||||
environment: 'test',
|
||||
});
|
||||
|
||||
const config = await manager.initialize();
|
||||
|
||||
expect(config.app.name).toBe('override');
|
||||
expect(config.app.version).toBe('2.0.0');
|
||||
expect(config.app.port).toBe(3000);
|
||||
expect(config.database.host).toBe('prod-db');
|
||||
});
|
||||
|
||||
test('should validate configuration with schema', async () => {
|
||||
manager = new ConfigManager<TestConfig>({
|
||||
loaders: [
|
||||
new MockLoader({
|
||||
app: {
|
||||
name: 'test-app',
|
||||
version: '1.0.0',
|
||||
port: 'invalid', // Should be number
|
||||
},
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
await expect(manager.initialize(testSchema)).rejects.toThrow(ConfigValidationError);
|
||||
});
|
||||
|
||||
test('should get configuration value by path', async () => {
|
||||
await manager.initialize(testSchema);
|
||||
|
||||
expect(manager.getValue('app.name')).toBe('test-app');
|
||||
expect(manager.getValue<number>('database.port')).toBe(5432);
|
||||
});
|
||||
|
||||
test('should check if configuration path exists', async () => {
|
||||
await manager.initialize(testSchema);
|
||||
|
||||
expect(manager.has('app.name')).toBe(true);
|
||||
expect(manager.has('app.nonexistent')).toBe(false);
|
||||
});
|
||||
|
||||
test('should update configuration at runtime', async () => {
|
||||
await manager.initialize(testSchema);
|
||||
|
||||
manager.set({
|
||||
app: {
|
||||
name: 'updated-app',
|
||||
},
|
||||
});
|
||||
|
||||
const config = manager.get();
|
||||
expect(config.app.name).toBe('updated-app');
|
||||
expect(config.app.version).toBe('1.0.0'); // Should preserve other values
|
||||
});
|
||||
|
||||
test('should validate updates against schema', async () => {
|
||||
await manager.initialize(testSchema);
|
||||
|
||||
expect(() => {
|
||||
manager.set({
|
||||
app: {
|
||||
port: 'invalid' as any,
|
||||
},
|
||||
});
|
||||
}).toThrow(ConfigValidationError);
|
||||
});
|
||||
|
||||
test('should reset configuration', async () => {
|
||||
await manager.initialize(testSchema);
|
||||
manager.reset();
|
||||
|
||||
expect(() => manager.get()).toThrow('Configuration not initialized');
|
||||
});
|
||||
|
||||
test('should create typed getter', async () => {
|
||||
await manager.initialize(testSchema);
|
||||
|
||||
const appSchema = z.object({
|
||||
app: z.object({
|
||||
name: z.string(),
|
||||
version: z.string(),
|
||||
}),
|
||||
});
|
||||
|
||||
const getAppConfig = manager.createTypedGetter(appSchema);
|
||||
const appConfig = getAppConfig();
|
||||
|
||||
expect(appConfig.app.name).toBe('test-app');
|
||||
});
|
||||
|
||||
test('should detect environment correctly', () => {
|
||||
const originalEnv = process.env.NODE_ENV;
|
||||
|
||||
process.env.NODE_ENV = 'production';
|
||||
const prodManager = new ConfigManager({ loaders: [] });
|
||||
expect(prodManager.getEnvironment()).toBe('production');
|
||||
|
||||
process.env.NODE_ENV = 'test';
|
||||
const testManager = new ConfigManager({ loaders: [] });
|
||||
expect(testManager.getEnvironment()).toBe('test');
|
||||
|
||||
process.env.NODE_ENV = originalEnv;
|
||||
});
|
||||
|
||||
test('should handle deep merge correctly', async () => {
|
||||
manager = new ConfigManager({
|
||||
loaders: [
|
||||
new MockLoader({
|
||||
app: {
|
||||
settings: {
|
||||
feature1: true,
|
||||
feature2: false,
|
||||
nested: {
|
||||
value: 'base',
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 0),
|
||||
new MockLoader({
|
||||
app: {
|
||||
settings: {
|
||||
feature2: true,
|
||||
feature3: true,
|
||||
nested: {
|
||||
value: 'override',
|
||||
extra: 'new',
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 10),
|
||||
],
|
||||
});
|
||||
|
||||
const config = await manager.initialize();
|
||||
|
||||
expect(config.app.settings.feature1).toBe(true);
|
||||
expect(config.app.settings.feature2).toBe(true);
|
||||
expect(config.app.settings.feature3).toBe(true);
|
||||
expect(config.app.settings.nested.value).toBe('override');
|
||||
expect(config.app.settings.nested.extra).toBe('new');
|
||||
});
|
||||
});
|
||||
import { beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { z } from 'zod';
|
||||
import { ConfigManager } from '../src/config-manager';
|
||||
import { ConfigValidationError } from '../src/errors';
|
||||
import { ConfigLoader } from '../src/types';
|
||||
|
||||
// Mock loader for testing
|
||||
class MockLoader implements ConfigLoader {
|
||||
priority = 0;
|
||||
|
||||
constructor(
|
||||
private data: Record<string, unknown>,
|
||||
public override priority: number = 0
|
||||
) {}
|
||||
|
||||
async load(): Promise<Record<string, unknown>> {
|
||||
return this.data;
|
||||
}
|
||||
}
|
||||
|
||||
// Test schema
|
||||
const testSchema = z.object({
|
||||
app: z.object({
|
||||
name: z.string(),
|
||||
version: z.string(),
|
||||
port: z.number().positive(),
|
||||
}),
|
||||
database: z.object({
|
||||
host: z.string(),
|
||||
port: z.number(),
|
||||
}),
|
||||
environment: z.enum(['development', 'test', 'production']),
|
||||
});
|
||||
|
||||
type TestConfig = z.infer<typeof testSchema>;
|
||||
|
||||
describe('ConfigManager', () => {
|
||||
let manager: ConfigManager<TestConfig>;
|
||||
|
||||
beforeEach(() => {
|
||||
manager = new ConfigManager<TestConfig>({
|
||||
loaders: [
|
||||
new MockLoader({
|
||||
app: {
|
||||
name: 'test-app',
|
||||
version: '1.0.0',
|
||||
port: 3000,
|
||||
},
|
||||
database: {
|
||||
host: 'localhost',
|
||||
port: 5432,
|
||||
},
|
||||
}),
|
||||
],
|
||||
environment: 'test',
|
||||
});
|
||||
});
|
||||
|
||||
test('should initialize configuration', async () => {
|
||||
const config = await manager.initialize(testSchema);
|
||||
|
||||
expect(config.app.name).toBe('test-app');
|
||||
expect(config.app.version).toBe('1.0.0');
|
||||
expect(config.environment).toBe('test');
|
||||
});
|
||||
|
||||
test('should merge multiple loaders by priority', async () => {
|
||||
manager = new ConfigManager<TestConfig>({
|
||||
loaders: [
|
||||
new MockLoader({ app: { name: 'base', port: 3000 } }, 0),
|
||||
new MockLoader({ app: { name: 'override', version: '2.0.0' } }, 10),
|
||||
new MockLoader({ database: { host: 'prod-db' } }, 5),
|
||||
],
|
||||
environment: 'test',
|
||||
});
|
||||
|
||||
const config = await manager.initialize();
|
||||
|
||||
expect(config.app.name).toBe('override');
|
||||
expect(config.app.version).toBe('2.0.0');
|
||||
expect(config.app.port).toBe(3000);
|
||||
expect(config.database.host).toBe('prod-db');
|
||||
});
|
||||
|
||||
test('should validate configuration with schema', async () => {
|
||||
manager = new ConfigManager<TestConfig>({
|
||||
loaders: [
|
||||
new MockLoader({
|
||||
app: {
|
||||
name: 'test-app',
|
||||
version: '1.0.0',
|
||||
port: 'invalid', // Should be number
|
||||
},
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
await expect(manager.initialize(testSchema)).rejects.toThrow(ConfigValidationError);
|
||||
});
|
||||
|
||||
test('should get configuration value by path', async () => {
|
||||
await manager.initialize(testSchema);
|
||||
|
||||
expect(manager.getValue('app.name')).toBe('test-app');
|
||||
expect(manager.getValue<number>('database.port')).toBe(5432);
|
||||
});
|
||||
|
||||
test('should check if configuration path exists', async () => {
|
||||
await manager.initialize(testSchema);
|
||||
|
||||
expect(manager.has('app.name')).toBe(true);
|
||||
expect(manager.has('app.nonexistent')).toBe(false);
|
||||
});
|
||||
|
||||
test('should update configuration at runtime', async () => {
|
||||
await manager.initialize(testSchema);
|
||||
|
||||
manager.set({
|
||||
app: {
|
||||
name: 'updated-app',
|
||||
},
|
||||
});
|
||||
|
||||
const config = manager.get();
|
||||
expect(config.app.name).toBe('updated-app');
|
||||
expect(config.app.version).toBe('1.0.0'); // Should preserve other values
|
||||
});
|
||||
|
||||
test('should validate updates against schema', async () => {
|
||||
await manager.initialize(testSchema);
|
||||
|
||||
expect(() => {
|
||||
manager.set({
|
||||
app: {
|
||||
port: 'invalid' as any,
|
||||
},
|
||||
});
|
||||
}).toThrow(ConfigValidationError);
|
||||
});
|
||||
|
||||
test('should reset configuration', async () => {
|
||||
await manager.initialize(testSchema);
|
||||
manager.reset();
|
||||
|
||||
expect(() => manager.get()).toThrow('Configuration not initialized');
|
||||
});
|
||||
|
||||
test('should create typed getter', async () => {
|
||||
await manager.initialize(testSchema);
|
||||
|
||||
const appSchema = z.object({
|
||||
app: z.object({
|
||||
name: z.string(),
|
||||
version: z.string(),
|
||||
}),
|
||||
});
|
||||
|
||||
const getAppConfig = manager.createTypedGetter(appSchema);
|
||||
const appConfig = getAppConfig();
|
||||
|
||||
expect(appConfig.app.name).toBe('test-app');
|
||||
});
|
||||
|
||||
test('should detect environment correctly', () => {
|
||||
const originalEnv = process.env.NODE_ENV;
|
||||
|
||||
process.env.NODE_ENV = 'production';
|
||||
const prodManager = new ConfigManager({ loaders: [] });
|
||||
expect(prodManager.getEnvironment()).toBe('production');
|
||||
|
||||
process.env.NODE_ENV = 'test';
|
||||
const testManager = new ConfigManager({ loaders: [] });
|
||||
expect(testManager.getEnvironment()).toBe('test');
|
||||
|
||||
process.env.NODE_ENV = originalEnv;
|
||||
});
|
||||
|
||||
test('should handle deep merge correctly', async () => {
|
||||
manager = new ConfigManager({
|
||||
loaders: [
|
||||
new MockLoader(
|
||||
{
|
||||
app: {
|
||||
settings: {
|
||||
feature1: true,
|
||||
feature2: false,
|
||||
nested: {
|
||||
value: 'base',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
0
|
||||
),
|
||||
new MockLoader(
|
||||
{
|
||||
app: {
|
||||
settings: {
|
||||
feature2: true,
|
||||
feature3: true,
|
||||
nested: {
|
||||
value: 'override',
|
||||
extra: 'new',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
10
|
||||
),
|
||||
],
|
||||
});
|
||||
|
||||
const config = await manager.initialize();
|
||||
|
||||
expect(config.app.settings.feature1).toBe(true);
|
||||
expect(config.app.settings.feature2).toBe(true);
|
||||
expect(config.app.settings.feature3).toBe(true);
|
||||
expect(config.app.settings.nested.value).toBe('override');
|
||||
expect(config.app.settings.nested.extra).toBe('new');
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
|
||||
import { existsSync, mkdirSync, rmSync, writeFileSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { mkdirSync, writeFileSync, rmSync, existsSync } from 'fs';
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { ConfigManager } from '../src/config-manager';
|
||||
import { FileLoader } from '../src/loaders/file.loader';
|
||||
import { EnvLoader } from '../src/loaders/env.loader';
|
||||
import { initializeConfig, initializeServiceConfig, resetConfig } from '../src/index';
|
||||
import { EnvLoader } from '../src/loaders/env.loader';
|
||||
import { FileLoader } from '../src/loaders/file.loader';
|
||||
import { appConfigSchema } from '../src/schemas';
|
||||
|
||||
// Test directories setup
|
||||
|
|
@ -23,33 +23,33 @@ describe('Dynamic Location Config Loading', () => {
|
|||
if (existsSync(TEST_ROOT)) {
|
||||
rmSync(TEST_ROOT, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
|
||||
// Reset config singleton
|
||||
resetConfig();
|
||||
|
||||
|
||||
// Create test directory structure
|
||||
setupTestScenarios();
|
||||
});
|
||||
|
||||
|
||||
afterEach(() => {
|
||||
// Clean up test directories
|
||||
if (existsSync(TEST_ROOT)) {
|
||||
rmSync(TEST_ROOT, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
|
||||
// Reset config singleton
|
||||
resetConfig();
|
||||
});
|
||||
|
||||
test('should load config from monorepo root', async () => {
|
||||
const originalCwd = process.cwd();
|
||||
|
||||
|
||||
try {
|
||||
// Change to monorepo root
|
||||
process.chdir(SCENARIOS.monorepoRoot);
|
||||
|
||||
|
||||
const config = await initializeConfig();
|
||||
|
||||
|
||||
expect(config.name).toBe('monorepo-root');
|
||||
expect(config.version).toBe('1.0.0');
|
||||
expect(config.database.postgres.host).toBe('localhost');
|
||||
|
|
@ -60,13 +60,13 @@ describe('Dynamic Location Config Loading', () => {
|
|||
|
||||
test('should load config from app service directory', async () => {
|
||||
const originalCwd = process.cwd();
|
||||
|
||||
|
||||
try {
|
||||
// Change to app service directory
|
||||
process.chdir(SCENARIOS.appService);
|
||||
|
||||
|
||||
const config = await initializeServiceConfig();
|
||||
|
||||
|
||||
// Should inherit from root + override with service config
|
||||
expect(config.name).toBe('test-service'); // Overridden by service
|
||||
expect(config.version).toBe('1.0.0'); // From root
|
||||
|
|
@ -79,13 +79,13 @@ describe('Dynamic Location Config Loading', () => {
|
|||
|
||||
test('should load config from lib directory', async () => {
|
||||
const originalCwd = process.cwd();
|
||||
|
||||
|
||||
try {
|
||||
// Change to lib directory
|
||||
process.chdir(SCENARIOS.libService);
|
||||
|
||||
|
||||
const config = await initializeServiceConfig();
|
||||
|
||||
|
||||
// Should inherit from root + override with lib config
|
||||
expect(config.name).toBe('test-lib'); // Overridden by lib
|
||||
expect(config.version).toBe('2.0.0'); // Overridden by lib
|
||||
|
|
@ -98,13 +98,13 @@ describe('Dynamic Location Config Loading', () => {
|
|||
|
||||
test('should load config from deeply nested service', async () => {
|
||||
const originalCwd = process.cwd();
|
||||
|
||||
|
||||
try {
|
||||
// Change to nested service directory
|
||||
process.chdir(SCENARIOS.nestedService);
|
||||
|
||||
|
||||
const config = await initializeServiceConfig();
|
||||
|
||||
|
||||
// Should inherit from root + override with nested service config
|
||||
expect(config.name).toBe('deep-service'); // Overridden by nested service
|
||||
// NOTE: Version inheritance doesn't work for deeply nested services (3+ levels)
|
||||
|
|
@ -119,13 +119,13 @@ describe('Dynamic Location Config Loading', () => {
|
|||
|
||||
test('should load config from standalone project', async () => {
|
||||
const originalCwd = process.cwd();
|
||||
|
||||
|
||||
try {
|
||||
// Change to standalone directory
|
||||
process.chdir(SCENARIOS.standalone);
|
||||
|
||||
|
||||
const config = await initializeConfig();
|
||||
|
||||
|
||||
expect(config.name).toBe('standalone-app');
|
||||
expect(config.version).toBe('0.1.0');
|
||||
expect(config.database.postgres.host).toBe('standalone-db');
|
||||
|
|
@ -136,16 +136,16 @@ describe('Dynamic Location Config Loading', () => {
|
|||
|
||||
test('should handle missing config files gracefully', async () => {
|
||||
const originalCwd = process.cwd();
|
||||
|
||||
|
||||
try {
|
||||
// Change to directory with no config files
|
||||
const emptyDir = join(TEST_ROOT, 'empty');
|
||||
mkdirSync(emptyDir, { recursive: true });
|
||||
process.chdir(emptyDir);
|
||||
|
||||
|
||||
// Should not throw but use defaults and env vars
|
||||
const config = await initializeConfig();
|
||||
|
||||
|
||||
// Should have default values from schema
|
||||
expect(config.environment).toBe('test'); // Tests run with NODE_ENV=test
|
||||
expect(typeof config.service).toBe('object');
|
||||
|
|
@ -157,18 +157,18 @@ describe('Dynamic Location Config Loading', () => {
|
|||
test('should prioritize environment variables over file configs', async () => {
|
||||
const originalCwd = process.cwd();
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
|
||||
try {
|
||||
// Set environment variables
|
||||
process.env.NAME = 'env-override';
|
||||
process.env.VERSION = '3.0.0';
|
||||
process.env.DATABASE_POSTGRES_HOST = 'env-db';
|
||||
|
||||
|
||||
process.chdir(SCENARIOS.appService);
|
||||
|
||||
|
||||
resetConfig(); // Reset to test env override
|
||||
const config = await initializeServiceConfig();
|
||||
|
||||
|
||||
// Environment variables should override file configs
|
||||
expect(config.name).toBe('env-override');
|
||||
expect(config.version).toBe('3.0.0');
|
||||
|
|
@ -181,18 +181,18 @@ describe('Dynamic Location Config Loading', () => {
|
|||
|
||||
test('should work with custom config paths', async () => {
|
||||
const originalCwd = process.cwd();
|
||||
|
||||
|
||||
try {
|
||||
process.chdir(SCENARIOS.monorepoRoot);
|
||||
|
||||
|
||||
// Initialize with custom config path
|
||||
resetConfig();
|
||||
const manager = new ConfigManager({
|
||||
configPath: join(SCENARIOS.appService, 'config')
|
||||
configPath: join(SCENARIOS.appService, 'config'),
|
||||
});
|
||||
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
||||
|
||||
// Should load from the custom path
|
||||
expect(config.name).toBe('test-service');
|
||||
expect(config.service.port).toBe(4000);
|
||||
|
|
@ -217,7 +217,7 @@ function setupTestScenarios() {
|
|||
version: '1.0.0',
|
||||
service: {
|
||||
name: 'monorepo-root',
|
||||
port: 3000
|
||||
port: 3000,
|
||||
},
|
||||
database: {
|
||||
postgres: {
|
||||
|
|
@ -225,32 +225,32 @@ function setupTestScenarios() {
|
|||
port: 5432,
|
||||
database: 'test_db',
|
||||
user: 'test_user',
|
||||
password: 'test_pass'
|
||||
password: 'test_pass',
|
||||
},
|
||||
questdb: {
|
||||
host: 'localhost',
|
||||
ilpPort: 9009
|
||||
ilpPort: 9009,
|
||||
},
|
||||
mongodb: {
|
||||
host: 'localhost',
|
||||
port: 27017,
|
||||
database: 'test_mongo'
|
||||
database: 'test_mongo',
|
||||
},
|
||||
dragonfly: {
|
||||
host: 'localhost',
|
||||
port: 6379
|
||||
}
|
||||
port: 6379,
|
||||
},
|
||||
},
|
||||
logging: {
|
||||
level: 'info'
|
||||
}
|
||||
level: 'info',
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(SCENARIOS.monorepoRoot, 'config', 'development.json'),
|
||||
JSON.stringify(rootConfig, null, 2)
|
||||
);
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(SCENARIOS.monorepoRoot, 'config', 'test.json'),
|
||||
JSON.stringify(rootConfig, null, 2)
|
||||
|
|
@ -261,20 +261,20 @@ function setupTestScenarios() {
|
|||
name: 'test-service',
|
||||
database: {
|
||||
postgres: {
|
||||
host: 'service-db'
|
||||
}
|
||||
host: 'service-db',
|
||||
},
|
||||
},
|
||||
service: {
|
||||
name: 'test-service',
|
||||
port: 4000
|
||||
}
|
||||
port: 4000,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(SCENARIOS.appService, 'config', 'development.json'),
|
||||
JSON.stringify(appServiceConfig, null, 2)
|
||||
);
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(SCENARIOS.appService, 'config', 'test.json'),
|
||||
JSON.stringify(appServiceConfig, null, 2)
|
||||
|
|
@ -286,15 +286,15 @@ function setupTestScenarios() {
|
|||
version: '2.0.0',
|
||||
service: {
|
||||
name: 'test-lib',
|
||||
port: 5000
|
||||
}
|
||||
port: 5000,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(SCENARIOS.libService, 'config', 'development.json'),
|
||||
JSON.stringify(libServiceConfig, null, 2)
|
||||
);
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(SCENARIOS.libService, 'config', 'test.json'),
|
||||
JSON.stringify(libServiceConfig, null, 2)
|
||||
|
|
@ -305,20 +305,20 @@ function setupTestScenarios() {
|
|||
name: 'deep-service',
|
||||
database: {
|
||||
postgres: {
|
||||
host: 'deep-db'
|
||||
}
|
||||
host: 'deep-db',
|
||||
},
|
||||
},
|
||||
service: {
|
||||
name: 'deep-service',
|
||||
port: 6000
|
||||
}
|
||||
port: 6000,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(SCENARIOS.nestedService, 'config', 'development.json'),
|
||||
JSON.stringify(nestedServiceConfig, null, 2)
|
||||
);
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(SCENARIOS.nestedService, 'config', 'test.json'),
|
||||
JSON.stringify(nestedServiceConfig, null, 2)
|
||||
|
|
@ -330,7 +330,7 @@ function setupTestScenarios() {
|
|||
version: '0.1.0',
|
||||
service: {
|
||||
name: 'standalone-app',
|
||||
port: 7000
|
||||
port: 7000,
|
||||
},
|
||||
database: {
|
||||
postgres: {
|
||||
|
|
@ -338,32 +338,32 @@ function setupTestScenarios() {
|
|||
port: 5432,
|
||||
database: 'standalone_db',
|
||||
user: 'standalone_user',
|
||||
password: 'standalone_pass'
|
||||
password: 'standalone_pass',
|
||||
},
|
||||
questdb: {
|
||||
host: 'localhost',
|
||||
ilpPort: 9009
|
||||
ilpPort: 9009,
|
||||
},
|
||||
mongodb: {
|
||||
host: 'localhost',
|
||||
port: 27017,
|
||||
database: 'standalone_mongo'
|
||||
database: 'standalone_mongo',
|
||||
},
|
||||
dragonfly: {
|
||||
host: 'localhost',
|
||||
port: 6379
|
||||
}
|
||||
port: 6379,
|
||||
},
|
||||
},
|
||||
logging: {
|
||||
level: 'debug'
|
||||
}
|
||||
level: 'debug',
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(SCENARIOS.standalone, 'config', 'development.json'),
|
||||
JSON.stringify(standaloneConfig, null, 2)
|
||||
);
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(SCENARIOS.standalone, 'config', 'test.json'),
|
||||
JSON.stringify(standaloneConfig, null, 2)
|
||||
|
|
@ -383,4 +383,4 @@ DEBUG=true
|
|||
APP_EXTRA_FEATURE=enabled
|
||||
`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
|
||||
import { chmodSync, existsSync, mkdirSync, rmSync, writeFileSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { mkdirSync, writeFileSync, rmSync, existsSync, chmodSync } from 'fs';
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { ConfigManager } from '../src/config-manager';
|
||||
import { FileLoader } from '../src/loaders/file.loader';
|
||||
import { EnvLoader } from '../src/loaders/env.loader';
|
||||
import { initializeConfig, initializeServiceConfig, resetConfig } from '../src/index';
|
||||
import { appConfigSchema } from '../src/schemas';
|
||||
import { ConfigError, ConfigValidationError } from '../src/errors';
|
||||
import { initializeConfig, initializeServiceConfig, resetConfig } from '../src/index';
|
||||
import { EnvLoader } from '../src/loaders/env.loader';
|
||||
import { FileLoader } from '../src/loaders/file.loader';
|
||||
import { appConfigSchema } from '../src/schemas';
|
||||
|
||||
const TEST_DIR = join(__dirname, 'edge-case-tests');
|
||||
|
||||
|
|
@ -17,9 +17,9 @@ describe('Edge Cases and Error Handling', () => {
|
|||
beforeEach(() => {
|
||||
originalEnv = { ...process.env };
|
||||
originalCwd = process.cwd();
|
||||
|
||||
|
||||
resetConfig();
|
||||
|
||||
|
||||
if (existsSync(TEST_DIR)) {
|
||||
rmSync(TEST_DIR, { recursive: true, force: true });
|
||||
}
|
||||
|
|
@ -30,7 +30,7 @@ describe('Edge Cases and Error Handling', () => {
|
|||
process.env = originalEnv;
|
||||
process.chdir(originalCwd);
|
||||
resetConfig();
|
||||
|
||||
|
||||
if (existsSync(TEST_DIR)) {
|
||||
rmSync(TEST_DIR, { recursive: true, force: true });
|
||||
}
|
||||
|
|
@ -39,7 +39,7 @@ describe('Edge Cases and Error Handling', () => {
|
|||
test('should handle missing .env files gracefully', async () => {
|
||||
// No .env file exists
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
// Should not throw even without .env file
|
||||
|
|
@ -50,15 +50,12 @@ describe('Edge Cases and Error Handling', () => {
|
|||
test('should handle corrupted JSON config files', async () => {
|
||||
const configDir = join(TEST_DIR, 'config');
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
|
||||
|
||||
// Create corrupted JSON file
|
||||
writeFileSync(
|
||||
join(configDir, 'development.json'),
|
||||
'{ "app": { "name": "test", invalid json }'
|
||||
);
|
||||
writeFileSync(join(configDir, 'development.json'), '{ "app": { "name": "test", invalid json }');
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new FileLoader(configDir, 'development')]
|
||||
loaders: [new FileLoader(configDir, 'development')],
|
||||
});
|
||||
|
||||
// Should throw error for invalid JSON
|
||||
|
|
@ -67,9 +64,9 @@ describe('Edge Cases and Error Handling', () => {
|
|||
|
||||
test('should handle missing config directories', async () => {
|
||||
const nonExistentDir = join(TEST_DIR, 'nonexistent');
|
||||
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new FileLoader(nonExistentDir, 'development')]
|
||||
loaders: [new FileLoader(nonExistentDir, 'development')],
|
||||
});
|
||||
|
||||
// Should not throw, should return empty config
|
||||
|
|
@ -80,16 +77,16 @@ describe('Edge Cases and Error Handling', () => {
|
|||
test('should handle permission denied on config files', async () => {
|
||||
const configDir = join(TEST_DIR, 'config');
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
|
||||
|
||||
const configFile = join(configDir, 'development.json');
|
||||
writeFileSync(configFile, JSON.stringify({ app: { name: 'test' } }));
|
||||
|
||||
|
||||
// Make file unreadable (this might not work on all systems)
|
||||
try {
|
||||
chmodSync(configFile, 0o000);
|
||||
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new FileLoader(configDir, 'development')]
|
||||
loaders: [new FileLoader(configDir, 'development')],
|
||||
});
|
||||
|
||||
// Should handle permission error gracefully
|
||||
|
|
@ -109,26 +106,23 @@ describe('Edge Cases and Error Handling', () => {
|
|||
// This tests deep merge with potential circular references
|
||||
const configDir = join(TEST_DIR, 'config');
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(configDir, 'development.json'),
|
||||
JSON.stringify({
|
||||
app: {
|
||||
name: 'test',
|
||||
settings: {
|
||||
ref: 'settings'
|
||||
}
|
||||
}
|
||||
ref: 'settings',
|
||||
},
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
process.env.APP_SETTINGS_NESTED_VALUE = 'deep-value';
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [
|
||||
new FileLoader(configDir, 'development'),
|
||||
new EnvLoader('')
|
||||
]
|
||||
loaders: [new FileLoader(configDir, 'development'), new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
|
@ -138,13 +132,13 @@ describe('Edge Cases and Error Handling', () => {
|
|||
test('should handle extremely deep nesting in environment variables', async () => {
|
||||
// Test very deep nesting
|
||||
process.env.LEVEL1_LEVEL2_LEVEL3_LEVEL4_LEVEL5_VALUE = 'deep-value';
|
||||
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('', { nestedDelimiter: '_' })]
|
||||
loaders: [new EnvLoader('', { nestedDelimiter: '_' })],
|
||||
});
|
||||
|
||||
const config = await manager.initialize();
|
||||
|
||||
|
||||
// Should create nested structure
|
||||
expect((config as any).level1?.level2?.level3?.level4?.level5?.value).toBe('deep-value');
|
||||
});
|
||||
|
|
@ -152,15 +146,15 @@ describe('Edge Cases and Error Handling', () => {
|
|||
test('should handle conflicting data types in config merging', async () => {
|
||||
const configDir = join(TEST_DIR, 'config');
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
|
||||
|
||||
// File config has object
|
||||
writeFileSync(
|
||||
join(configDir, 'development.json'),
|
||||
JSON.stringify({
|
||||
database: {
|
||||
host: 'localhost',
|
||||
port: 5432
|
||||
}
|
||||
port: 5432,
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
|
|
@ -168,14 +162,11 @@ describe('Edge Cases and Error Handling', () => {
|
|||
process.env.DATABASE = 'simple-string';
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [
|
||||
new FileLoader(configDir, 'development'),
|
||||
new EnvLoader('')
|
||||
]
|
||||
loaders: [new FileLoader(configDir, 'development'), new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
||||
|
||||
// Environment variable should win
|
||||
expect(config.database).toBe('simple-string');
|
||||
});
|
||||
|
|
@ -184,15 +175,15 @@ describe('Edge Cases and Error Handling', () => {
|
|||
// Create multiple config setups in different directories
|
||||
const dir1 = join(TEST_DIR, 'dir1');
|
||||
const dir2 = join(TEST_DIR, 'dir2');
|
||||
|
||||
|
||||
mkdirSync(join(dir1, 'config'), { recursive: true });
|
||||
mkdirSync(join(dir2, 'config'), { recursive: true });
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(dir1, 'config', 'development.json'),
|
||||
JSON.stringify({ app: { name: 'dir1-app' } })
|
||||
);
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(dir2, 'config', 'development.json'),
|
||||
JSON.stringify({ app: { name: 'dir2-app' } })
|
||||
|
|
@ -229,13 +220,13 @@ JSON_VALUE={"key": "value", "nested": {"array": [1, 2, 3]}}
|
|||
);
|
||||
|
||||
process.chdir(TEST_DIR);
|
||||
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize();
|
||||
|
||||
|
||||
// Should handle valid entries
|
||||
expect(process.env.VALID_KEY).toBe('valid_value');
|
||||
expect(process.env.KEY_WITH_QUOTES).toBe('quoted value');
|
||||
|
|
@ -245,12 +236,12 @@ JSON_VALUE={"key": "value", "nested": {"array": [1, 2, 3]}}
|
|||
test('should handle empty config files', async () => {
|
||||
const configDir = join(TEST_DIR, 'config');
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
|
||||
|
||||
// Create empty JSON file
|
||||
writeFileSync(join(configDir, 'development.json'), '{}');
|
||||
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new FileLoader(configDir, 'development')]
|
||||
loaders: [new FileLoader(configDir, 'development')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
|
@ -260,7 +251,7 @@ JSON_VALUE={"key": "value", "nested": {"array": [1, 2, 3]}}
|
|||
|
||||
test('should handle config initialization without schema', async () => {
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
// Initialize without schema
|
||||
|
|
@ -271,7 +262,7 @@ JSON_VALUE={"key": "value", "nested": {"array": [1, 2, 3]}}
|
|||
|
||||
test('should handle accessing config before initialization', () => {
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
// Should throw error when accessing uninitialized config
|
||||
|
|
@ -282,15 +273,15 @@ JSON_VALUE={"key": "value", "nested": {"array": [1, 2, 3]}}
|
|||
|
||||
test('should handle invalid config paths in getValue', async () => {
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
||||
|
||||
// Should throw for invalid paths
|
||||
expect(() => manager.getValue('nonexistent.path')).toThrow('Configuration key not found');
|
||||
expect(() => manager.getValue('app.nonexistent')).toThrow('Configuration key not found');
|
||||
|
||||
|
||||
// Should work for valid paths
|
||||
expect(() => manager.getValue('environment')).not.toThrow();
|
||||
});
|
||||
|
|
@ -301,11 +292,11 @@ JSON_VALUE={"key": "value", "nested": {"array": [1, 2, 3]}}
|
|||
process.env.EMPTY_VALUE = '';
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize();
|
||||
|
||||
|
||||
expect((config as any).null_value).toBe(null);
|
||||
expect((config as any).undefined_value).toBe(undefined);
|
||||
expect((config as any).empty_value).toBe('');
|
||||
|
|
@ -318,7 +309,7 @@ JSON_VALUE={"key": "value", "nested": {"array": [1, 2, 3]}}
|
|||
process.env.SERVICE_PORT = 'not-a-number'; // This should cause validation to fail
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
await expect(manager.initialize(appConfigSchema)).rejects.toThrow(ConfigValidationError);
|
||||
|
|
@ -326,7 +317,7 @@ JSON_VALUE={"key": "value", "nested": {"array": [1, 2, 3]}}
|
|||
|
||||
test('should handle config updates with invalid schema', async () => {
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
await manager.initialize(appConfigSchema);
|
||||
|
|
@ -335,8 +326,8 @@ JSON_VALUE={"key": "value", "nested": {"array": [1, 2, 3]}}
|
|||
expect(() => {
|
||||
manager.set({
|
||||
service: {
|
||||
port: 'invalid-port' as any
|
||||
}
|
||||
port: 'invalid-port' as any,
|
||||
},
|
||||
});
|
||||
}).toThrow(ConfigValidationError);
|
||||
});
|
||||
|
|
@ -344,7 +335,7 @@ JSON_VALUE={"key": "value", "nested": {"array": [1, 2, 3]}}
|
|||
test('should handle loader priority conflicts', async () => {
|
||||
const configDir = join(TEST_DIR, 'config');
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(configDir, 'development.json'),
|
||||
JSON.stringify({ app: { name: 'file-config' } })
|
||||
|
|
@ -356,12 +347,12 @@ JSON_VALUE={"key": "value", "nested": {"array": [1, 2, 3]}}
|
|||
const manager = new ConfigManager({
|
||||
loaders: [
|
||||
new FileLoader(configDir, 'development'), // priority 50
|
||||
new EnvLoader('') // priority 100
|
||||
]
|
||||
new EnvLoader(''), // priority 100
|
||||
],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
||||
|
||||
// Environment should win due to higher priority
|
||||
expect(config.app.name).toBe('env-config');
|
||||
});
|
||||
|
|
@ -369,16 +360,16 @@ JSON_VALUE={"key": "value", "nested": {"array": [1, 2, 3]}}
|
|||
test('should handle readonly environment variables', async () => {
|
||||
// Some system environment variables might be readonly
|
||||
const originalPath = process.env.PATH;
|
||||
|
||||
|
||||
// This should not cause the loader to fail
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize();
|
||||
expect(config).toBeDefined();
|
||||
|
||||
|
||||
// PATH should not be modified
|
||||
expect(process.env.PATH).toBe(originalPath);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,208 +1,202 @@
|
|||
import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
|
||||
import { writeFileSync, mkdirSync, rmSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import {
|
||||
initializeConfig,
|
||||
getConfig,
|
||||
getConfigManager,
|
||||
resetConfig,
|
||||
getDatabaseConfig,
|
||||
getServiceConfig,
|
||||
getLoggingConfig,
|
||||
getProviderConfig,
|
||||
isDevelopment,
|
||||
isProduction,
|
||||
isTest,
|
||||
} from '../src';
|
||||
|
||||
describe('Config Module', () => {
|
||||
const testConfigDir = join(process.cwd(), 'test-config-module');
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
beforeEach(() => {
|
||||
resetConfig();
|
||||
mkdirSync(testConfigDir, { recursive: true });
|
||||
|
||||
// Create test configuration files
|
||||
const config = {
|
||||
name: 'test-app',
|
||||
version: '1.0.0',
|
||||
service: {
|
||||
name: 'test-service',
|
||||
port: 3000,
|
||||
},
|
||||
database: {
|
||||
postgres: {
|
||||
host: 'localhost',
|
||||
port: 5432,
|
||||
database: 'testdb',
|
||||
user: 'testuser',
|
||||
password: 'testpass',
|
||||
},
|
||||
questdb: {
|
||||
host: 'localhost',
|
||||
httpPort: 9000,
|
||||
pgPort: 8812,
|
||||
},
|
||||
mongodb: {
|
||||
host: 'localhost',
|
||||
port: 27017,
|
||||
database: 'testdb',
|
||||
},
|
||||
dragonfly: {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
},
|
||||
},
|
||||
logging: {
|
||||
level: 'info',
|
||||
format: 'json',
|
||||
},
|
||||
providers: {
|
||||
yahoo: {
|
||||
enabled: true,
|
||||
rateLimit: 5,
|
||||
},
|
||||
qm: {
|
||||
enabled: false,
|
||||
apiKey: 'test-key',
|
||||
},
|
||||
},
|
||||
environment: 'test',
|
||||
};
|
||||
|
||||
writeFileSync(
|
||||
join(testConfigDir, 'default.json'),
|
||||
JSON.stringify(config, null, 2)
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
resetConfig();
|
||||
rmSync(testConfigDir, { recursive: true, force: true });
|
||||
process.env = { ...originalEnv };
|
||||
});
|
||||
|
||||
test('should initialize configuration', async () => {
|
||||
const config = await initializeConfig(testConfigDir);
|
||||
|
||||
expect(config.app.name).toBe('test-app');
|
||||
expect(config.service.port).toBe(3000);
|
||||
expect(config.environment).toBe('test');
|
||||
});
|
||||
|
||||
test('should get configuration after initialization', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
const config = getConfig();
|
||||
|
||||
expect(config.app.name).toBe('test-app');
|
||||
expect(config.database.postgres.host).toBe('localhost');
|
||||
});
|
||||
|
||||
test('should throw if getting config before initialization', () => {
|
||||
expect(() => getConfig()).toThrow('Configuration not initialized');
|
||||
});
|
||||
|
||||
test('should get config manager instance', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
const manager = getConfigManager();
|
||||
|
||||
expect(manager).toBeDefined();
|
||||
expect(manager.get().app.name).toBe('test-app');
|
||||
});
|
||||
|
||||
test('should get database configuration', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
const dbConfig = getDatabaseConfig();
|
||||
|
||||
expect(dbConfig.postgres.host).toBe('localhost');
|
||||
expect(dbConfig.questdb.httpPort).toBe(9000);
|
||||
expect(dbConfig.mongodb.database).toBe('testdb');
|
||||
});
|
||||
|
||||
test('should get service configuration', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
const serviceConfig = getServiceConfig();
|
||||
|
||||
expect(serviceConfig.name).toBe('test-service');
|
||||
expect(serviceConfig.port).toBe(3000);
|
||||
});
|
||||
|
||||
test('should get logging configuration', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
const loggingConfig = getLoggingConfig();
|
||||
|
||||
expect(loggingConfig.level).toBe('info');
|
||||
expect(loggingConfig.format).toBe('json');
|
||||
});
|
||||
|
||||
test('should get provider configuration', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
|
||||
const yahooConfig = getProviderConfig('yahoo');
|
||||
expect(yahooConfig.enabled).toBe(true);
|
||||
expect(yahooConfig.rateLimit).toBe(5);
|
||||
|
||||
const qmConfig = getProviderConfig('quoteMedia');
|
||||
expect(qmConfig.enabled).toBe(false);
|
||||
expect(qmConfig.apiKey).toBe('test-key');
|
||||
});
|
||||
|
||||
test('should throw for non-existent provider', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
|
||||
expect(() => getProviderConfig('nonexistent')).toThrow(
|
||||
'Provider configuration not found: nonexistent'
|
||||
);
|
||||
});
|
||||
|
||||
test('should check environment correctly', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
|
||||
expect(isTest()).toBe(true);
|
||||
expect(isDevelopment()).toBe(false);
|
||||
expect(isProduction()).toBe(false);
|
||||
});
|
||||
|
||||
test('should handle environment overrides', async () => {
|
||||
process.env.NODE_ENV = 'production';
|
||||
process.env.STOCKBOT_APP__NAME = 'env-override-app';
|
||||
process.env.STOCKBOT_DATABASE__POSTGRES__HOST = 'prod-db';
|
||||
|
||||
const prodConfig = {
|
||||
database: {
|
||||
postgres: {
|
||||
host: 'prod-host',
|
||||
port: 5432,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
writeFileSync(
|
||||
join(testConfigDir, 'production.json'),
|
||||
JSON.stringify(prodConfig, null, 2)
|
||||
);
|
||||
|
||||
const config = await initializeConfig(testConfigDir);
|
||||
|
||||
expect(config.environment).toBe('production');
|
||||
expect(config.app.name).toBe('env-override-app');
|
||||
expect(config.database.postgres.host).toBe('prod-db');
|
||||
expect(isProduction()).toBe(true);
|
||||
});
|
||||
|
||||
test('should reset configuration', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
expect(() => getConfig()).not.toThrow();
|
||||
|
||||
resetConfig();
|
||||
expect(() => getConfig()).toThrow('Configuration not initialized');
|
||||
});
|
||||
|
||||
test('should maintain singleton instance', async () => {
|
||||
const config1 = await initializeConfig(testConfigDir);
|
||||
const config2 = await initializeConfig(testConfigDir);
|
||||
|
||||
expect(config1).toBe(config2);
|
||||
});
|
||||
});
|
||||
import { mkdirSync, rmSync, writeFileSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import {
|
||||
getConfig,
|
||||
getConfigManager,
|
||||
getDatabaseConfig,
|
||||
getLoggingConfig,
|
||||
getProviderConfig,
|
||||
getServiceConfig,
|
||||
initializeConfig,
|
||||
isDevelopment,
|
||||
isProduction,
|
||||
isTest,
|
||||
resetConfig,
|
||||
} from '../src';
|
||||
|
||||
describe('Config Module', () => {
|
||||
const testConfigDir = join(process.cwd(), 'test-config-module');
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
beforeEach(() => {
|
||||
resetConfig();
|
||||
mkdirSync(testConfigDir, { recursive: true });
|
||||
|
||||
// Create test configuration files
|
||||
const config = {
|
||||
name: 'test-app',
|
||||
version: '1.0.0',
|
||||
service: {
|
||||
name: 'test-service',
|
||||
port: 3000,
|
||||
},
|
||||
database: {
|
||||
postgres: {
|
||||
host: 'localhost',
|
||||
port: 5432,
|
||||
database: 'testdb',
|
||||
user: 'testuser',
|
||||
password: 'testpass',
|
||||
},
|
||||
questdb: {
|
||||
host: 'localhost',
|
||||
httpPort: 9000,
|
||||
pgPort: 8812,
|
||||
},
|
||||
mongodb: {
|
||||
host: 'localhost',
|
||||
port: 27017,
|
||||
database: 'testdb',
|
||||
},
|
||||
dragonfly: {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
},
|
||||
},
|
||||
logging: {
|
||||
level: 'info',
|
||||
format: 'json',
|
||||
},
|
||||
providers: {
|
||||
yahoo: {
|
||||
enabled: true,
|
||||
rateLimit: 5,
|
||||
},
|
||||
qm: {
|
||||
enabled: false,
|
||||
apiKey: 'test-key',
|
||||
},
|
||||
},
|
||||
environment: 'test',
|
||||
};
|
||||
|
||||
writeFileSync(join(testConfigDir, 'default.json'), JSON.stringify(config, null, 2));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
resetConfig();
|
||||
rmSync(testConfigDir, { recursive: true, force: true });
|
||||
process.env = { ...originalEnv };
|
||||
});
|
||||
|
||||
test('should initialize configuration', async () => {
|
||||
const config = await initializeConfig(testConfigDir);
|
||||
|
||||
expect(config.app.name).toBe('test-app');
|
||||
expect(config.service.port).toBe(3000);
|
||||
expect(config.environment).toBe('test');
|
||||
});
|
||||
|
||||
test('should get configuration after initialization', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
const config = getConfig();
|
||||
|
||||
expect(config.app.name).toBe('test-app');
|
||||
expect(config.database.postgres.host).toBe('localhost');
|
||||
});
|
||||
|
||||
test('should throw if getting config before initialization', () => {
|
||||
expect(() => getConfig()).toThrow('Configuration not initialized');
|
||||
});
|
||||
|
||||
test('should get config manager instance', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
const manager = getConfigManager();
|
||||
|
||||
expect(manager).toBeDefined();
|
||||
expect(manager.get().app.name).toBe('test-app');
|
||||
});
|
||||
|
||||
test('should get database configuration', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
const dbConfig = getDatabaseConfig();
|
||||
|
||||
expect(dbConfig.postgres.host).toBe('localhost');
|
||||
expect(dbConfig.questdb.httpPort).toBe(9000);
|
||||
expect(dbConfig.mongodb.database).toBe('testdb');
|
||||
});
|
||||
|
||||
test('should get service configuration', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
const serviceConfig = getServiceConfig();
|
||||
|
||||
expect(serviceConfig.name).toBe('test-service');
|
||||
expect(serviceConfig.port).toBe(3000);
|
||||
});
|
||||
|
||||
test('should get logging configuration', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
const loggingConfig = getLoggingConfig();
|
||||
|
||||
expect(loggingConfig.level).toBe('info');
|
||||
expect(loggingConfig.format).toBe('json');
|
||||
});
|
||||
|
||||
test('should get provider configuration', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
|
||||
const yahooConfig = getProviderConfig('yahoo');
|
||||
expect(yahooConfig.enabled).toBe(true);
|
||||
expect(yahooConfig.rateLimit).toBe(5);
|
||||
|
||||
const qmConfig = getProviderConfig('quoteMedia');
|
||||
expect(qmConfig.enabled).toBe(false);
|
||||
expect(qmConfig.apiKey).toBe('test-key');
|
||||
});
|
||||
|
||||
test('should throw for non-existent provider', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
|
||||
expect(() => getProviderConfig('nonexistent')).toThrow(
|
||||
'Provider configuration not found: nonexistent'
|
||||
);
|
||||
});
|
||||
|
||||
test('should check environment correctly', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
|
||||
expect(isTest()).toBe(true);
|
||||
expect(isDevelopment()).toBe(false);
|
||||
expect(isProduction()).toBe(false);
|
||||
});
|
||||
|
||||
test('should handle environment overrides', async () => {
|
||||
process.env.NODE_ENV = 'production';
|
||||
process.env.STOCKBOT_APP__NAME = 'env-override-app';
|
||||
process.env.STOCKBOT_DATABASE__POSTGRES__HOST = 'prod-db';
|
||||
|
||||
const prodConfig = {
|
||||
database: {
|
||||
postgres: {
|
||||
host: 'prod-host',
|
||||
port: 5432,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
writeFileSync(join(testConfigDir, 'production.json'), JSON.stringify(prodConfig, null, 2));
|
||||
|
||||
const config = await initializeConfig(testConfigDir);
|
||||
|
||||
expect(config.environment).toBe('production');
|
||||
expect(config.app.name).toBe('env-override-app');
|
||||
expect(config.database.postgres.host).toBe('prod-db');
|
||||
expect(isProduction()).toBe(true);
|
||||
});
|
||||
|
||||
test('should reset configuration', async () => {
|
||||
await initializeConfig(testConfigDir);
|
||||
expect(() => getConfig()).not.toThrow();
|
||||
|
||||
resetConfig();
|
||||
expect(() => getConfig()).toThrow('Configuration not initialized');
|
||||
});
|
||||
|
||||
test('should maintain singleton instance', async () => {
|
||||
const config1 = await initializeConfig(testConfigDir);
|
||||
const config2 = await initializeConfig(testConfigDir);
|
||||
|
||||
expect(config1).toBe(config2);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,181 +1,166 @@
|
|||
import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
|
||||
import { writeFileSync, mkdirSync, rmSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { EnvLoader } from '../src/loaders/env.loader';
|
||||
import { FileLoader } from '../src/loaders/file.loader';
|
||||
|
||||
describe('EnvLoader', () => {
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
afterEach(() => {
|
||||
// Restore original environment
|
||||
process.env = { ...originalEnv };
|
||||
});
|
||||
|
||||
test('should load environment variables with prefix', async () => {
|
||||
process.env.TEST_APP_NAME = 'env-app';
|
||||
process.env.TEST_APP_VERSION = '1.0.0';
|
||||
process.env.TEST_DATABASE_HOST = 'env-host';
|
||||
process.env.TEST_DATABASE_PORT = '5432';
|
||||
process.env.OTHER_VAR = 'should-not-load';
|
||||
|
||||
const loader = new EnvLoader('TEST_', { convertCase: false, nestedDelimiter: null });
|
||||
const config = await loader.load();
|
||||
|
||||
expect(config.APP_NAME).toBe('env-app');
|
||||
expect(config.APP_VERSION).toBe('1.0.0');
|
||||
expect(config.DATABASE_HOST).toBe('env-host');
|
||||
expect(config.DATABASE_PORT).toBe(5432); // Should be parsed as number
|
||||
expect(config.OTHER_VAR).toBeUndefined();
|
||||
});
|
||||
|
||||
test('should convert snake_case to camelCase', async () => {
|
||||
process.env.TEST_DATABASE_CONNECTION_STRING = 'postgres://localhost';
|
||||
process.env.TEST_API_KEY_SECRET = 'secret123';
|
||||
|
||||
const loader = new EnvLoader('TEST_', { convertCase: true });
|
||||
const config = await loader.load();
|
||||
|
||||
expect(config.databaseConnectionString).toBe('postgres://localhost');
|
||||
expect(config.apiKeySecret).toBe('secret123');
|
||||
});
|
||||
|
||||
test('should parse JSON values', async () => {
|
||||
process.env.TEST_SETTINGS = '{"feature": true, "limit": 100}';
|
||||
process.env.TEST_NUMBERS = '[1, 2, 3]';
|
||||
|
||||
const loader = new EnvLoader('TEST_', { parseJson: true });
|
||||
const config = await loader.load();
|
||||
|
||||
expect(config.SETTINGS).toEqual({ feature: true, limit: 100 });
|
||||
expect(config.NUMBERS).toEqual([1, 2, 3]);
|
||||
});
|
||||
|
||||
test('should parse boolean and number values', async () => {
|
||||
process.env.TEST_ENABLED = 'true';
|
||||
process.env.TEST_DISABLED = 'false';
|
||||
process.env.TEST_PORT = '3000';
|
||||
process.env.TEST_RATIO = '0.75';
|
||||
|
||||
const loader = new EnvLoader('TEST_', { parseValues: true });
|
||||
const config = await loader.load();
|
||||
|
||||
expect(config.ENABLED).toBe(true);
|
||||
expect(config.DISABLED).toBe(false);
|
||||
expect(config.PORT).toBe(3000);
|
||||
expect(config.RATIO).toBe(0.75);
|
||||
});
|
||||
|
||||
test('should handle nested object structure', async () => {
|
||||
process.env.TEST_APP__NAME = 'nested-app';
|
||||
process.env.TEST_APP__SETTINGS__ENABLED = 'true';
|
||||
process.env.TEST_DATABASE__HOST = 'localhost';
|
||||
|
||||
const loader = new EnvLoader('TEST_', {
|
||||
parseValues: true,
|
||||
nestedDelimiter: '__'
|
||||
});
|
||||
const config = await loader.load();
|
||||
|
||||
expect(config.APP).toEqual({
|
||||
NAME: 'nested-app',
|
||||
SETTINGS: {
|
||||
ENABLED: true
|
||||
}
|
||||
});
|
||||
expect(config.DATABASE).toEqual({
|
||||
HOST: 'localhost'
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('FileLoader', () => {
|
||||
const testDir = join(process.cwd(), 'test-config');
|
||||
|
||||
beforeEach(() => {
|
||||
mkdirSync(testDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
rmSync(testDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
test('should load JSON configuration file', async () => {
|
||||
const config = {
|
||||
app: { name: 'file-app', version: '1.0.0' },
|
||||
database: { host: 'localhost', port: 5432 }
|
||||
};
|
||||
|
||||
writeFileSync(
|
||||
join(testDir, 'default.json'),
|
||||
JSON.stringify(config, null, 2)
|
||||
);
|
||||
|
||||
const loader = new FileLoader(testDir);
|
||||
const loaded = await loader.load();
|
||||
|
||||
expect(loaded).toEqual(config);
|
||||
});
|
||||
|
||||
test('should load environment-specific configuration', async () => {
|
||||
const defaultConfig = {
|
||||
app: { name: 'app', port: 3000 },
|
||||
database: { host: 'localhost' }
|
||||
};
|
||||
|
||||
const prodConfig = {
|
||||
app: { port: 8080 },
|
||||
database: { host: 'prod-db' }
|
||||
};
|
||||
|
||||
writeFileSync(
|
||||
join(testDir, 'default.json'),
|
||||
JSON.stringify(defaultConfig, null, 2)
|
||||
);
|
||||
|
||||
writeFileSync(
|
||||
join(testDir, 'production.json'),
|
||||
JSON.stringify(prodConfig, null, 2)
|
||||
);
|
||||
|
||||
const loader = new FileLoader(testDir, 'production');
|
||||
const loaded = await loader.load();
|
||||
|
||||
expect(loaded).toEqual({
|
||||
app: { name: 'app', port: 8080 },
|
||||
database: { host: 'prod-db' }
|
||||
});
|
||||
});
|
||||
|
||||
test('should handle missing configuration files gracefully', async () => {
|
||||
const loader = new FileLoader(testDir);
|
||||
const loaded = await loader.load();
|
||||
|
||||
expect(loaded).toEqual({});
|
||||
});
|
||||
|
||||
test('should throw on invalid JSON', async () => {
|
||||
writeFileSync(
|
||||
join(testDir, 'default.json'),
|
||||
'invalid json content'
|
||||
);
|
||||
|
||||
const loader = new FileLoader(testDir);
|
||||
|
||||
await expect(loader.load()).rejects.toThrow();
|
||||
});
|
||||
|
||||
test('should support custom configuration', async () => {
|
||||
const config = { custom: 'value' };
|
||||
|
||||
writeFileSync(
|
||||
join(testDir, 'custom.json'),
|
||||
JSON.stringify(config, null, 2)
|
||||
);
|
||||
|
||||
const loader = new FileLoader(testDir);
|
||||
const loaded = await loader.loadFile('custom.json');
|
||||
|
||||
expect(loaded).toEqual(config);
|
||||
});
|
||||
});
|
||||
import { mkdirSync, rmSync, writeFileSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { EnvLoader } from '../src/loaders/env.loader';
|
||||
import { FileLoader } from '../src/loaders/file.loader';
|
||||
|
||||
describe('EnvLoader', () => {
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
afterEach(() => {
|
||||
// Restore original environment
|
||||
process.env = { ...originalEnv };
|
||||
});
|
||||
|
||||
test('should load environment variables with prefix', async () => {
|
||||
process.env.TEST_APP_NAME = 'env-app';
|
||||
process.env.TEST_APP_VERSION = '1.0.0';
|
||||
process.env.TEST_DATABASE_HOST = 'env-host';
|
||||
process.env.TEST_DATABASE_PORT = '5432';
|
||||
process.env.OTHER_VAR = 'should-not-load';
|
||||
|
||||
const loader = new EnvLoader('TEST_', { convertCase: false, nestedDelimiter: null });
|
||||
const config = await loader.load();
|
||||
|
||||
expect(config.APP_NAME).toBe('env-app');
|
||||
expect(config.APP_VERSION).toBe('1.0.0');
|
||||
expect(config.DATABASE_HOST).toBe('env-host');
|
||||
expect(config.DATABASE_PORT).toBe(5432); // Should be parsed as number
|
||||
expect(config.OTHER_VAR).toBeUndefined();
|
||||
});
|
||||
|
||||
test('should convert snake_case to camelCase', async () => {
|
||||
process.env.TEST_DATABASE_CONNECTION_STRING = 'postgres://localhost';
|
||||
process.env.TEST_API_KEY_SECRET = 'secret123';
|
||||
|
||||
const loader = new EnvLoader('TEST_', { convertCase: true });
|
||||
const config = await loader.load();
|
||||
|
||||
expect(config.databaseConnectionString).toBe('postgres://localhost');
|
||||
expect(config.apiKeySecret).toBe('secret123');
|
||||
});
|
||||
|
||||
test('should parse JSON values', async () => {
|
||||
process.env.TEST_SETTINGS = '{"feature": true, "limit": 100}';
|
||||
process.env.TEST_NUMBERS = '[1, 2, 3]';
|
||||
|
||||
const loader = new EnvLoader('TEST_', { parseJson: true });
|
||||
const config = await loader.load();
|
||||
|
||||
expect(config.SETTINGS).toEqual({ feature: true, limit: 100 });
|
||||
expect(config.NUMBERS).toEqual([1, 2, 3]);
|
||||
});
|
||||
|
||||
test('should parse boolean and number values', async () => {
|
||||
process.env.TEST_ENABLED = 'true';
|
||||
process.env.TEST_DISABLED = 'false';
|
||||
process.env.TEST_PORT = '3000';
|
||||
process.env.TEST_RATIO = '0.75';
|
||||
|
||||
const loader = new EnvLoader('TEST_', { parseValues: true });
|
||||
const config = await loader.load();
|
||||
|
||||
expect(config.ENABLED).toBe(true);
|
||||
expect(config.DISABLED).toBe(false);
|
||||
expect(config.PORT).toBe(3000);
|
||||
expect(config.RATIO).toBe(0.75);
|
||||
});
|
||||
|
||||
test('should handle nested object structure', async () => {
|
||||
process.env.TEST_APP__NAME = 'nested-app';
|
||||
process.env.TEST_APP__SETTINGS__ENABLED = 'true';
|
||||
process.env.TEST_DATABASE__HOST = 'localhost';
|
||||
|
||||
const loader = new EnvLoader('TEST_', {
|
||||
parseValues: true,
|
||||
nestedDelimiter: '__',
|
||||
});
|
||||
const config = await loader.load();
|
||||
|
||||
expect(config.APP).toEqual({
|
||||
NAME: 'nested-app',
|
||||
SETTINGS: {
|
||||
ENABLED: true,
|
||||
},
|
||||
});
|
||||
expect(config.DATABASE).toEqual({
|
||||
HOST: 'localhost',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('FileLoader', () => {
|
||||
const testDir = join(process.cwd(), 'test-config');
|
||||
|
||||
beforeEach(() => {
|
||||
mkdirSync(testDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
rmSync(testDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
test('should load JSON configuration file', async () => {
|
||||
const config = {
|
||||
app: { name: 'file-app', version: '1.0.0' },
|
||||
database: { host: 'localhost', port: 5432 },
|
||||
};
|
||||
|
||||
writeFileSync(join(testDir, 'default.json'), JSON.stringify(config, null, 2));
|
||||
|
||||
const loader = new FileLoader(testDir);
|
||||
const loaded = await loader.load();
|
||||
|
||||
expect(loaded).toEqual(config);
|
||||
});
|
||||
|
||||
test('should load environment-specific configuration', async () => {
|
||||
const defaultConfig = {
|
||||
app: { name: 'app', port: 3000 },
|
||||
database: { host: 'localhost' },
|
||||
};
|
||||
|
||||
const prodConfig = {
|
||||
app: { port: 8080 },
|
||||
database: { host: 'prod-db' },
|
||||
};
|
||||
|
||||
writeFileSync(join(testDir, 'default.json'), JSON.stringify(defaultConfig, null, 2));
|
||||
|
||||
writeFileSync(join(testDir, 'production.json'), JSON.stringify(prodConfig, null, 2));
|
||||
|
||||
const loader = new FileLoader(testDir, 'production');
|
||||
const loaded = await loader.load();
|
||||
|
||||
expect(loaded).toEqual({
|
||||
app: { name: 'app', port: 8080 },
|
||||
database: { host: 'prod-db' },
|
||||
});
|
||||
});
|
||||
|
||||
test('should handle missing configuration files gracefully', async () => {
|
||||
const loader = new FileLoader(testDir);
|
||||
const loaded = await loader.load();
|
||||
|
||||
expect(loaded).toEqual({});
|
||||
});
|
||||
|
||||
test('should throw on invalid JSON', async () => {
|
||||
writeFileSync(join(testDir, 'default.json'), 'invalid json content');
|
||||
|
||||
const loader = new FileLoader(testDir);
|
||||
|
||||
await expect(loader.load()).rejects.toThrow();
|
||||
});
|
||||
|
||||
test('should support custom configuration', async () => {
|
||||
const config = { custom: 'value' };
|
||||
|
||||
writeFileSync(join(testDir, 'custom.json'), JSON.stringify(config, null, 2));
|
||||
|
||||
const loader = new FileLoader(testDir);
|
||||
const loaded = await loader.loadFile('custom.json');
|
||||
|
||||
expect(loaded).toEqual(config);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
|
||||
import { existsSync, mkdirSync, rmSync, writeFileSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { ConfigManager } from '../src/config-manager';
|
||||
import { getProviderConfig, resetConfig } from '../src/index';
|
||||
import { EnvLoader } from '../src/loaders/env.loader';
|
||||
import { FileLoader } from '../src/loaders/file.loader';
|
||||
import { appConfigSchema } from '../src/schemas';
|
||||
import { resetConfig, getProviderConfig } from '../src/index';
|
||||
import { join } from 'path';
|
||||
import { mkdirSync, writeFileSync, rmSync, existsSync } from 'fs';
|
||||
|
||||
const TEST_DIR = join(__dirname, 'provider-tests');
|
||||
|
||||
|
|
@ -15,10 +15,10 @@ describe('Provider Configuration Tests', () => {
|
|||
beforeEach(() => {
|
||||
// Save original environment
|
||||
originalEnv = { ...process.env };
|
||||
|
||||
|
||||
// Reset config singleton
|
||||
resetConfig();
|
||||
|
||||
|
||||
// Clean up test directory
|
||||
if (existsSync(TEST_DIR)) {
|
||||
rmSync(TEST_DIR, { recursive: true, force: true });
|
||||
|
|
@ -29,7 +29,7 @@ describe('Provider Configuration Tests', () => {
|
|||
afterEach(() => {
|
||||
// Restore original environment
|
||||
process.env = originalEnv;
|
||||
|
||||
|
||||
// Clean up
|
||||
resetConfig();
|
||||
if (existsSync(TEST_DIR)) {
|
||||
|
|
@ -44,7 +44,7 @@ describe('Provider Configuration Tests', () => {
|
|||
process.env.WEBSHARE_ENABLED = 'true';
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
|
@ -64,7 +64,7 @@ describe('Provider Configuration Tests', () => {
|
|||
process.env.EOD_PRIORITY = '10';
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
|
@ -88,7 +88,7 @@ describe('Provider Configuration Tests', () => {
|
|||
process.env.IB_PRIORITY = '5';
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
|
@ -113,7 +113,7 @@ describe('Provider Configuration Tests', () => {
|
|||
process.env.QM_PRIORITY = '15';
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
|
@ -136,7 +136,7 @@ describe('Provider Configuration Tests', () => {
|
|||
process.env.YAHOO_PRIORITY = '20';
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
|
@ -153,27 +153,31 @@ describe('Provider Configuration Tests', () => {
|
|||
// Create a config file
|
||||
const configDir = join(TEST_DIR, 'config');
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
|
||||
|
||||
writeFileSync(
|
||||
join(configDir, 'development.json'),
|
||||
JSON.stringify({
|
||||
providers: {
|
||||
eod: {
|
||||
name: 'EOD Historical Data',
|
||||
apiKey: 'file-eod-key',
|
||||
baseUrl: 'https://file.eod.com/api',
|
||||
tier: 'free',
|
||||
enabled: false,
|
||||
priority: 1
|
||||
JSON.stringify(
|
||||
{
|
||||
providers: {
|
||||
eod: {
|
||||
name: 'EOD Historical Data',
|
||||
apiKey: 'file-eod-key',
|
||||
baseUrl: 'https://file.eod.com/api',
|
||||
tier: 'free',
|
||||
enabled: false,
|
||||
priority: 1,
|
||||
},
|
||||
yahoo: {
|
||||
name: 'Yahoo Finance',
|
||||
baseUrl: 'https://file.yahoo.com',
|
||||
enabled: true,
|
||||
priority: 2,
|
||||
},
|
||||
},
|
||||
yahoo: {
|
||||
name: 'Yahoo Finance',
|
||||
baseUrl: 'https://file.yahoo.com',
|
||||
enabled: true,
|
||||
priority: 2
|
||||
}
|
||||
}
|
||||
}, null, 2)
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
);
|
||||
|
||||
// Set environment variables that should override file config
|
||||
|
|
@ -183,10 +187,7 @@ describe('Provider Configuration Tests', () => {
|
|||
process.env.YAHOO_PRIORITY = '25';
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [
|
||||
new FileLoader(configDir, 'development'),
|
||||
new EnvLoader('')
|
||||
]
|
||||
loaders: [new FileLoader(configDir, 'development'), new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
|
@ -211,7 +212,7 @@ describe('Provider Configuration Tests', () => {
|
|||
process.env.IB_GATEWAY_PORT = 'not-a-number'; // Should be a number
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
// Should throw validation error
|
||||
|
|
@ -226,7 +227,7 @@ describe('Provider Configuration Tests', () => {
|
|||
process.env.WEBSHARE_ENABLED = 'true';
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
await manager.initialize(appConfigSchema);
|
||||
|
|
@ -241,7 +242,9 @@ describe('Provider Configuration Tests', () => {
|
|||
expect((webshareConfig as any).apiKey).toBe('test-webshare-key');
|
||||
|
||||
// Test non-existent provider
|
||||
expect(() => getProviderConfig('nonexistent')).toThrow('Provider configuration not found: nonexistent');
|
||||
expect(() => getProviderConfig('nonexistent')).toThrow(
|
||||
'Provider configuration not found: nonexistent'
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle boolean string parsing correctly', async () => {
|
||||
|
|
@ -253,7 +256,7 @@ describe('Provider Configuration Tests', () => {
|
|||
process.env.WEBSHARE_ENABLED = 'yes'; // Should be treated as string, not boolean
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
|
@ -272,7 +275,7 @@ describe('Provider Configuration Tests', () => {
|
|||
process.env.IB_GATEWAY_CLIENT_ID = '999';
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
|
@ -300,9 +303,9 @@ YAHOO_BASE_URL=https://env-file.yahoo.com
|
|||
const originalCwd = process.cwd();
|
||||
try {
|
||||
process.chdir(TEST_DIR);
|
||||
|
||||
|
||||
const manager = new ConfigManager({
|
||||
loaders: [new EnvLoader('')]
|
||||
loaders: [new EnvLoader('')],
|
||||
});
|
||||
|
||||
const config = await manager.initialize(appConfigSchema);
|
||||
|
|
@ -317,4 +320,4 @@ YAHOO_BASE_URL=https://env-file.yahoo.com
|
|||
process.chdir(originalCwd);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { existsSync, mkdirSync, rmSync, writeFileSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import {
|
||||
getConfig,
|
||||
getDatabaseConfig,
|
||||
|
|
@ -11,7 +11,7 @@ import {
|
|||
isDevelopment,
|
||||
isProduction,
|
||||
isTest,
|
||||
resetConfig
|
||||
resetConfig,
|
||||
} from '../src/index';
|
||||
|
||||
const TEST_DIR = join(__dirname, 'real-usage-tests');
|
||||
|
|
@ -23,13 +23,13 @@ describe('Real Usage Scenarios', () => {
|
|||
beforeEach(() => {
|
||||
originalEnv = { ...process.env };
|
||||
originalCwd = process.cwd();
|
||||
|
||||
|
||||
resetConfig();
|
||||
|
||||
|
||||
if (existsSync(TEST_DIR)) {
|
||||
rmSync(TEST_DIR, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
|
||||
setupRealUsageScenarios();
|
||||
});
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ describe('Real Usage Scenarios', () => {
|
|||
process.env = originalEnv;
|
||||
process.chdir(originalCwd);
|
||||
resetConfig();
|
||||
|
||||
|
||||
if (existsSync(TEST_DIR)) {
|
||||
rmSync(TEST_DIR, { recursive: true, force: true });
|
||||
}
|
||||
|
|
@ -53,18 +53,18 @@ describe('Real Usage Scenarios', () => {
|
|||
// Test typical data-ingestion config access patterns
|
||||
expect(config.app.name).toBe('data-ingestion');
|
||||
expect(config.service.port).toBe(3001);
|
||||
|
||||
|
||||
// Test database config access
|
||||
const dbConfig = getDatabaseConfig();
|
||||
expect(dbConfig.postgres.host).toBe('localhost');
|
||||
expect(dbConfig.postgres.port).toBe(5432);
|
||||
expect(dbConfig.questdb.host).toBe('localhost');
|
||||
|
||||
|
||||
// Test provider access
|
||||
const yahooConfig = getProviderConfig('yahoo');
|
||||
expect(yahooConfig).toBeDefined();
|
||||
expect((yahooConfig as any).enabled).toBe(true);
|
||||
|
||||
|
||||
// Test environment helpers
|
||||
expect(isDevelopment()).toBe(true);
|
||||
expect(isProduction()).toBe(false);
|
||||
|
|
@ -78,11 +78,11 @@ describe('Real Usage Scenarios', () => {
|
|||
|
||||
expect(config.app.name).toBe('web-api');
|
||||
expect(config.service.port).toBe(4000);
|
||||
|
||||
|
||||
// Web API should have access to all the same configs
|
||||
const serviceConfig = getServiceConfig();
|
||||
expect(serviceConfig.name).toBe('web-api');
|
||||
|
||||
|
||||
const loggingConfig = getLoggingConfig();
|
||||
expect(loggingConfig.level).toBe('info');
|
||||
});
|
||||
|
|
@ -96,7 +96,7 @@ describe('Real Usage Scenarios', () => {
|
|||
// Libraries should inherit from root config
|
||||
expect(config.app.name).toBe('cache-lib');
|
||||
expect(config.app.version).toBe('1.0.0'); // From root
|
||||
|
||||
|
||||
// Should have access to cache config
|
||||
const dbConfig = getDatabaseConfig();
|
||||
expect(dbConfig.dragonfly).toBeDefined();
|
||||
|
|
@ -106,7 +106,7 @@ describe('Real Usage Scenarios', () => {
|
|||
|
||||
test('should handle production environment correctly', async () => {
|
||||
process.env.NODE_ENV = 'production';
|
||||
|
||||
|
||||
const dataServiceDir = join(TEST_DIR, 'apps', 'data-ingestion');
|
||||
process.chdir(dataServiceDir);
|
||||
|
||||
|
|
@ -115,14 +115,14 @@ describe('Real Usage Scenarios', () => {
|
|||
|
||||
expect(config.environment).toBe('production');
|
||||
expect(config.logging.level).toBe('warn'); // Production should use different log level
|
||||
|
||||
|
||||
expect(isProduction()).toBe(true);
|
||||
expect(isDevelopment()).toBe(false);
|
||||
});
|
||||
|
||||
test('should handle test environment correctly', async () => {
|
||||
process.env.NODE_ENV = 'test';
|
||||
|
||||
|
||||
const dataServiceDir = join(TEST_DIR, 'apps', 'data-ingestion');
|
||||
process.chdir(dataServiceDir);
|
||||
|
||||
|
|
@ -131,7 +131,7 @@ describe('Real Usage Scenarios', () => {
|
|||
|
||||
expect(config.environment).toBe('test');
|
||||
expect(config.logging.level).toBe('debug'); // Test should use debug level
|
||||
|
||||
|
||||
expect(isTest()).toBe(true);
|
||||
expect(isDevelopment()).toBe(false);
|
||||
});
|
||||
|
|
@ -153,10 +153,10 @@ describe('Real Usage Scenarios', () => {
|
|||
const dbConfig = getDatabaseConfig();
|
||||
expect(dbConfig.postgres.host).toBe('prod-db.example.com');
|
||||
expect(dbConfig.postgres.port).toBe(5433);
|
||||
|
||||
|
||||
const serviceConfig = getServiceConfig();
|
||||
expect(serviceConfig.port).toBe(8080);
|
||||
|
||||
|
||||
const eodConfig = getProviderConfig('eod');
|
||||
expect((eodConfig as any).apiKey).toBe('prod-eod-key');
|
||||
});
|
||||
|
|
@ -168,8 +168,10 @@ describe('Real Usage Scenarios', () => {
|
|||
const config = await initializeServiceConfig();
|
||||
|
||||
// Should throw for non-existent providers
|
||||
expect(() => getProviderConfig('nonexistent')).toThrow('Provider configuration not found: nonexistent');
|
||||
|
||||
expect(() => getProviderConfig('nonexistent')).toThrow(
|
||||
'Provider configuration not found: nonexistent'
|
||||
);
|
||||
|
||||
// Should work for providers that exist but might not be configured
|
||||
// (they should have defaults from schema)
|
||||
const yahooConfig = getProviderConfig('yahoo');
|
||||
|
|
@ -181,18 +183,18 @@ describe('Real Usage Scenarios', () => {
|
|||
process.chdir(dataServiceDir);
|
||||
|
||||
const config = await initializeServiceConfig();
|
||||
|
||||
|
||||
// Test various access patterns used in real applications
|
||||
const configManager = (await import('../src/index')).getConfigManager();
|
||||
|
||||
|
||||
// Direct path access
|
||||
expect(configManager.getValue('app.name')).toBe('data-ingestion');
|
||||
expect(configManager.getValue('service.port')).toBe(3001);
|
||||
|
||||
|
||||
// Check if paths exist
|
||||
expect(configManager.has('app.name')).toBe(true);
|
||||
expect(configManager.has('nonexistent.path')).toBe(false);
|
||||
|
||||
|
||||
// Typed access
|
||||
const port = configManager.getValue<number>('service.port');
|
||||
expect(typeof port).toBe('number');
|
||||
|
|
@ -205,39 +207,39 @@ describe('Real Usage Scenarios', () => {
|
|||
|
||||
await initializeServiceConfig();
|
||||
const configManager = (await import('../src/index')).getConfigManager();
|
||||
|
||||
|
||||
// Update config at runtime (useful for testing)
|
||||
configManager.set({
|
||||
service: {
|
||||
port: 9999
|
||||
}
|
||||
port: 9999,
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
const updatedConfig = getConfig();
|
||||
expect(updatedConfig.service.port).toBe(9999);
|
||||
|
||||
|
||||
// Other values should be preserved
|
||||
expect(updatedConfig.app.name).toBe('data-ingestion');
|
||||
});
|
||||
|
||||
test('should work across multiple service initializations', async () => {
|
||||
// Simulate multiple services in the same process (like tests)
|
||||
|
||||
|
||||
// First service
|
||||
const dataServiceDir = join(TEST_DIR, 'apps', 'data-ingestion');
|
||||
process.chdir(dataServiceDir);
|
||||
|
||||
|
||||
let config = await initializeServiceConfig();
|
||||
expect(config.app.name).toBe('data-ingestion');
|
||||
|
||||
// Reset and switch to another service
|
||||
|
||||
// Reset and switch to another service
|
||||
resetConfig();
|
||||
const webApiDir = join(TEST_DIR, 'apps', 'web-api');
|
||||
process.chdir(webApiDir);
|
||||
|
||||
|
||||
config = await initializeServiceConfig();
|
||||
expect(config.app.name).toBe('web-api');
|
||||
|
||||
|
||||
// Each service should get its own config
|
||||
expect(config.service.port).toBe(4000); // web-api port
|
||||
});
|
||||
|
|
@ -263,7 +265,7 @@ function setupRealUsageScenarios() {
|
|||
development: {
|
||||
app: {
|
||||
name: 'stock-bot-monorepo',
|
||||
version: '1.0.0'
|
||||
version: '1.0.0',
|
||||
},
|
||||
database: {
|
||||
postgres: {
|
||||
|
|
@ -271,116 +273,125 @@ function setupRealUsageScenarios() {
|
|||
port: 5432,
|
||||
database: 'trading_bot',
|
||||
username: 'trading_user',
|
||||
password: 'trading_pass_dev'
|
||||
password: 'trading_pass_dev',
|
||||
},
|
||||
questdb: {
|
||||
host: 'localhost',
|
||||
port: 9009,
|
||||
database: 'questdb'
|
||||
database: 'questdb',
|
||||
},
|
||||
mongodb: {
|
||||
host: 'localhost',
|
||||
port: 27017,
|
||||
database: 'stock'
|
||||
database: 'stock',
|
||||
},
|
||||
dragonfly: {
|
||||
host: 'localhost',
|
||||
port: 6379
|
||||
}
|
||||
port: 6379,
|
||||
},
|
||||
},
|
||||
logging: {
|
||||
level: 'info',
|
||||
format: 'json'
|
||||
format: 'json',
|
||||
},
|
||||
providers: {
|
||||
yahoo: {
|
||||
name: 'Yahoo Finance',
|
||||
enabled: true,
|
||||
priority: 1,
|
||||
baseUrl: 'https://query1.finance.yahoo.com'
|
||||
baseUrl: 'https://query1.finance.yahoo.com',
|
||||
},
|
||||
eod: {
|
||||
name: 'EOD Historical Data',
|
||||
enabled: false,
|
||||
priority: 2,
|
||||
apiKey: 'demo-api-key',
|
||||
baseUrl: 'https://eodhistoricaldata.com/api'
|
||||
}
|
||||
}
|
||||
baseUrl: 'https://eodhistoricaldata.com/api',
|
||||
},
|
||||
},
|
||||
},
|
||||
production: {
|
||||
logging: {
|
||||
level: 'warn'
|
||||
level: 'warn',
|
||||
},
|
||||
database: {
|
||||
postgres: {
|
||||
host: 'prod-postgres.internal',
|
||||
port: 5432
|
||||
}
|
||||
}
|
||||
port: 5432,
|
||||
},
|
||||
},
|
||||
},
|
||||
test: {
|
||||
logging: {
|
||||
level: 'debug'
|
||||
level: 'debug',
|
||||
},
|
||||
database: {
|
||||
postgres: {
|
||||
database: 'trading_bot_test'
|
||||
}
|
||||
}
|
||||
}
|
||||
database: 'trading_bot_test',
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
Object.entries(rootConfigs).forEach(([env, config]) => {
|
||||
writeFileSync(
|
||||
join(scenarios.root, 'config', `${env}.json`),
|
||||
JSON.stringify(config, null, 2)
|
||||
);
|
||||
writeFileSync(join(scenarios.root, 'config', `${env}.json`), JSON.stringify(config, null, 2));
|
||||
});
|
||||
|
||||
// Data service config
|
||||
writeFileSync(
|
||||
join(scenarios.dataService, 'config', 'development.json'),
|
||||
JSON.stringify({
|
||||
app: {
|
||||
name: 'data-ingestion'
|
||||
JSON.stringify(
|
||||
{
|
||||
app: {
|
||||
name: 'data-ingestion',
|
||||
},
|
||||
service: {
|
||||
name: 'data-ingestion',
|
||||
port: 3001,
|
||||
workers: 2,
|
||||
},
|
||||
},
|
||||
service: {
|
||||
name: 'data-ingestion',
|
||||
port: 3001,
|
||||
workers: 2
|
||||
}
|
||||
}, null, 2)
|
||||
null,
|
||||
2
|
||||
)
|
||||
);
|
||||
|
||||
// Web API config
|
||||
writeFileSync(
|
||||
join(scenarios.webApi, 'config', 'development.json'),
|
||||
JSON.stringify({
|
||||
app: {
|
||||
name: 'web-api'
|
||||
JSON.stringify(
|
||||
{
|
||||
app: {
|
||||
name: 'web-api',
|
||||
},
|
||||
service: {
|
||||
name: 'web-api',
|
||||
port: 4000,
|
||||
cors: {
|
||||
origin: ['http://localhost:3000', 'http://localhost:4200'],
|
||||
},
|
||||
},
|
||||
},
|
||||
service: {
|
||||
name: 'web-api',
|
||||
port: 4000,
|
||||
cors: {
|
||||
origin: ['http://localhost:3000', 'http://localhost:4200']
|
||||
}
|
||||
}
|
||||
}, null, 2)
|
||||
null,
|
||||
2
|
||||
)
|
||||
);
|
||||
|
||||
// Cache lib config
|
||||
writeFileSync(
|
||||
join(scenarios.cacheLib, 'config', 'development.json'),
|
||||
JSON.stringify({
|
||||
app: {
|
||||
name: 'cache-lib'
|
||||
JSON.stringify(
|
||||
{
|
||||
app: {
|
||||
name: 'cache-lib',
|
||||
},
|
||||
service: {
|
||||
name: 'cache-lib',
|
||||
},
|
||||
},
|
||||
service: {
|
||||
name: 'cache-lib'
|
||||
}
|
||||
}, null, 2)
|
||||
null,
|
||||
2
|
||||
)
|
||||
);
|
||||
|
||||
// Root .env file
|
||||
|
|
@ -401,4 +412,4 @@ WEBSHARE_API_KEY=demo-webshare-key
|
|||
DATA_SERVICE_RATE_LIMIT=1000
|
||||
`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,5 @@
|
|||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
]
|
||||
}
|
||||
"references": []
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,17 +1,17 @@
|
|||
{
|
||||
"name": "@stock-bot/di",
|
||||
"version": "1.0.0",
|
||||
"main": "./src/index.ts",
|
||||
"types": "./src/index.ts",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"clean": "rm -rf dist"
|
||||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/config": "workspace:*",
|
||||
"@stock-bot/logger": "workspace:*"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/pg": "^8.10.7"
|
||||
}
|
||||
}
|
||||
{
|
||||
"name": "@stock-bot/di",
|
||||
"version": "1.0.0",
|
||||
"main": "./src/index.ts",
|
||||
"types": "./src/index.ts",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"clean": "rm -rf dist"
|
||||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/config": "workspace:*",
|
||||
"@stock-bot/logger": "workspace:*"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/pg": "^8.10.7"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,294 +1,301 @@
|
|||
/**
|
||||
* Awilix DI Container Setup
|
||||
* Creates a decoupled, reusable dependency injection container
|
||||
*/
|
||||
|
||||
import { Browser } from '@stock-bot/browser';
|
||||
import { createCache, type CacheProvider } from '@stock-bot/cache';
|
||||
import type { IServiceContainer } from '@stock-bot/handlers';
|
||||
import { getLogger, type Logger } from '@stock-bot/logger';
|
||||
import { MongoDBClient } from '@stock-bot/mongodb';
|
||||
import { PostgreSQLClient } from '@stock-bot/postgres';
|
||||
import { ProxyManager } from '@stock-bot/proxy';
|
||||
import { QuestDBClient } from '@stock-bot/questdb';
|
||||
import { type QueueManager } from '@stock-bot/queue';
|
||||
import { asFunction, asValue, createContainer, InjectionMode, type AwilixContainer } from 'awilix';
|
||||
import { z } from 'zod';
|
||||
|
||||
// Configuration schema with validation
|
||||
const appConfigSchema = z.object({
|
||||
redis: z.object({
|
||||
enabled: z.boolean().optional(),
|
||||
host: z.string(),
|
||||
port: z.number(),
|
||||
password: z.string().optional(),
|
||||
username: z.string().optional(),
|
||||
db: z.number().optional(),
|
||||
}),
|
||||
mongodb: z.object({
|
||||
enabled: z.boolean().optional(),
|
||||
uri: z.string(),
|
||||
database: z.string(),
|
||||
}),
|
||||
postgres: z.object({
|
||||
enabled: z.boolean().optional(),
|
||||
host: z.string(),
|
||||
port: z.number(),
|
||||
database: z.string(),
|
||||
user: z.string(),
|
||||
password: z.string(),
|
||||
}),
|
||||
questdb: z.object({
|
||||
enabled: z.boolean().optional(),
|
||||
host: z.string(),
|
||||
httpPort: z.number().optional(),
|
||||
pgPort: z.number().optional(),
|
||||
influxPort: z.number().optional(),
|
||||
database: z.string().optional(),
|
||||
}).optional(),
|
||||
proxy: z.object({
|
||||
cachePrefix: z.string().optional(),
|
||||
ttl: z.number().optional(),
|
||||
}).optional(),
|
||||
browser: z.object({
|
||||
headless: z.boolean().optional(),
|
||||
timeout: z.number().optional(),
|
||||
}).optional(),
|
||||
});
|
||||
|
||||
export type AppConfig = z.infer<typeof appConfigSchema>;
|
||||
|
||||
/**
|
||||
* Service type definitions for type-safe resolution
|
||||
*/
|
||||
export interface ServiceDefinitions {
|
||||
// Configuration
|
||||
config: AppConfig;
|
||||
logger: Logger;
|
||||
|
||||
// Core services
|
||||
cache: CacheProvider | null;
|
||||
proxyManager: ProxyManager | null;
|
||||
browser: Browser;
|
||||
queueManager: QueueManager | null;
|
||||
|
||||
// Database clients
|
||||
mongoClient: MongoDBClient | null;
|
||||
postgresClient: PostgreSQLClient | null;
|
||||
questdbClient: QuestDBClient | null;
|
||||
|
||||
// Aggregate service container
|
||||
serviceContainer: IServiceContainer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create and configure the DI container with type safety
|
||||
*/
|
||||
export function createServiceContainer(rawConfig: unknown): AwilixContainer<ServiceDefinitions> {
|
||||
// Validate configuration
|
||||
const config = appConfigSchema.parse(rawConfig);
|
||||
|
||||
const container = createContainer<ServiceDefinitions>({
|
||||
injectionMode: InjectionMode.PROXY,
|
||||
});
|
||||
|
||||
// Register configuration values
|
||||
const registrations: any = {
|
||||
// Configuration
|
||||
config: asValue(config),
|
||||
redisConfig: asValue(config.redis),
|
||||
mongoConfig: asValue(config.mongodb),
|
||||
postgresConfig: asValue(config.postgres),
|
||||
questdbConfig: asValue(config.questdb || { host: 'localhost', httpPort: 9000, pgPort: 8812, influxPort: 9009 }),
|
||||
|
||||
// Core services with dependency injection
|
||||
logger: asFunction(() => getLogger('app')).singleton(),
|
||||
};
|
||||
|
||||
// Conditionally register cache/dragonfly
|
||||
if (config.redis?.enabled !== false) {
|
||||
registrations.cache = asFunction(({ redisConfig, logger }) =>
|
||||
createCache({
|
||||
redisConfig,
|
||||
logger,
|
||||
keyPrefix: 'cache:',
|
||||
ttl: 3600,
|
||||
enableMetrics: true,
|
||||
})
|
||||
).singleton();
|
||||
} else {
|
||||
registrations.cache = asValue(null);
|
||||
}
|
||||
|
||||
// Proxy manager depends on cache
|
||||
registrations.proxyManager = asFunction(({ cache, config, logger }) => {
|
||||
if (!cache) {
|
||||
logger.warn('Cache is disabled, ProxyManager will have limited functionality');
|
||||
return null;
|
||||
}
|
||||
const manager = new ProxyManager(
|
||||
cache,
|
||||
config.proxy || {},
|
||||
logger
|
||||
);
|
||||
return manager;
|
||||
}).singleton();
|
||||
|
||||
// Conditionally register MongoDB client
|
||||
if (config.mongodb?.enabled !== false) {
|
||||
registrations.mongoClient = asFunction(({ mongoConfig, logger }) => {
|
||||
return new MongoDBClient(mongoConfig, logger);
|
||||
}).singleton();
|
||||
} else {
|
||||
registrations.mongoClient = asValue(null);
|
||||
}
|
||||
|
||||
// Conditionally register PostgreSQL client
|
||||
if (config.postgres?.enabled !== false) {
|
||||
registrations.postgresClient = asFunction(({ postgresConfig, logger }) => {
|
||||
return new PostgreSQLClient(
|
||||
{
|
||||
host: postgresConfig.host,
|
||||
port: postgresConfig.port,
|
||||
database: postgresConfig.database,
|
||||
username: postgresConfig.user,
|
||||
password: postgresConfig.password,
|
||||
},
|
||||
logger
|
||||
);
|
||||
}).singleton();
|
||||
} else {
|
||||
registrations.postgresClient = asValue(null);
|
||||
}
|
||||
|
||||
// Conditionally register QuestDB client
|
||||
if (config.questdb?.enabled !== false) {
|
||||
registrations.questdbClient = asFunction(({ questdbConfig, logger }) => {
|
||||
console.log('Creating QuestDB client with config:', questdbConfig);
|
||||
return new QuestDBClient(
|
||||
{
|
||||
host: questdbConfig.host,
|
||||
httpPort: questdbConfig.httpPort,
|
||||
pgPort: questdbConfig.pgPort,
|
||||
influxPort: questdbConfig.influxPort,
|
||||
database: questdbConfig.database,
|
||||
// QuestDB appears to require default credentials
|
||||
user: 'admin',
|
||||
password: 'quest',
|
||||
},
|
||||
logger
|
||||
);
|
||||
}).singleton();
|
||||
} else {
|
||||
registrations.questdbClient = asValue(null);
|
||||
}
|
||||
|
||||
// Queue manager - placeholder until decoupled from singleton
|
||||
registrations.queueManager = asFunction(({ redisConfig, cache, logger }) => {
|
||||
// Import dynamically to avoid circular dependency
|
||||
const { QueueManager } = require('@stock-bot/queue');
|
||||
|
||||
// Check if already initialized (singleton pattern)
|
||||
if (QueueManager.isInitialized()) {
|
||||
return QueueManager.getInstance();
|
||||
}
|
||||
|
||||
// Initialize if not already done
|
||||
return QueueManager.initialize({
|
||||
redis: { host: redisConfig.host, port: redisConfig.port, db: redisConfig.db },
|
||||
enableScheduledJobs: true,
|
||||
delayWorkerStart: true // We'll start workers manually
|
||||
});
|
||||
}).singleton();
|
||||
|
||||
// Browser automation
|
||||
registrations.browser = asFunction(({ config, logger }) => {
|
||||
return new Browser(logger, config.browser);
|
||||
}).singleton();
|
||||
|
||||
// Build the IServiceContainer for handlers
|
||||
registrations.serviceContainer = asFunction((cradle) => ({
|
||||
logger: cradle.logger,
|
||||
cache: cradle.cache,
|
||||
proxy: cradle.proxyManager,
|
||||
browser: cradle.browser,
|
||||
mongodb: cradle.mongoClient,
|
||||
postgres: cradle.postgresClient,
|
||||
questdb: cradle.questdbClient,
|
||||
queue: cradle.queueManager,
|
||||
} as IServiceContainer)).singleton();
|
||||
|
||||
container.register(registrations);
|
||||
return container;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize async services after container creation
|
||||
*/
|
||||
export async function initializeServices(container: AwilixContainer): Promise<void> {
|
||||
const logger = container.resolve('logger');
|
||||
const config = container.resolve('config');
|
||||
|
||||
try {
|
||||
// Wait for cache to be ready first (if enabled)
|
||||
const cache = container.resolve('cache');
|
||||
if (cache && typeof cache.waitForReady === 'function') {
|
||||
await cache.waitForReady(10000);
|
||||
logger.info('Cache is ready');
|
||||
} else if (config.redis?.enabled === false) {
|
||||
logger.info('Cache is disabled');
|
||||
}
|
||||
|
||||
// Initialize proxy manager (depends on cache)
|
||||
const proxyManager = container.resolve('proxyManager');
|
||||
if (proxyManager && typeof proxyManager.initialize === 'function') {
|
||||
await proxyManager.initialize();
|
||||
logger.info('Proxy manager initialized');
|
||||
} else {
|
||||
logger.info('Proxy manager is disabled (requires cache)');
|
||||
}
|
||||
|
||||
// Connect MongoDB client (if enabled)
|
||||
const mongoClient = container.resolve('mongoClient');
|
||||
if (mongoClient && typeof mongoClient.connect === 'function') {
|
||||
await mongoClient.connect();
|
||||
logger.info('MongoDB connected');
|
||||
} else if (config.mongodb?.enabled === false) {
|
||||
logger.info('MongoDB is disabled');
|
||||
}
|
||||
|
||||
// Connect PostgreSQL client (if enabled)
|
||||
const postgresClient = container.resolve('postgresClient');
|
||||
if (postgresClient && typeof postgresClient.connect === 'function') {
|
||||
await postgresClient.connect();
|
||||
logger.info('PostgreSQL connected');
|
||||
} else if (config.postgres?.enabled === false) {
|
||||
logger.info('PostgreSQL is disabled');
|
||||
}
|
||||
|
||||
// Connect QuestDB client (if enabled)
|
||||
const questdbClient = container.resolve('questdbClient');
|
||||
if (questdbClient && typeof questdbClient.connect === 'function') {
|
||||
await questdbClient.connect();
|
||||
logger.info('QuestDB connected');
|
||||
} else if (config.questdb?.enabled === false) {
|
||||
logger.info('QuestDB is disabled');
|
||||
}
|
||||
|
||||
// Initialize browser if configured
|
||||
const browser = container.resolve('browser');
|
||||
if (browser && typeof browser.initialize === 'function') {
|
||||
await browser.initialize();
|
||||
logger.info('Browser initialized');
|
||||
}
|
||||
|
||||
logger.info('All services initialized successfully');
|
||||
} catch (error) {
|
||||
logger.error('Failed to initialize services', { error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Export typed container
|
||||
export type ServiceContainer = AwilixContainer<ServiceDefinitions>;
|
||||
export type ServiceCradle = ServiceDefinitions;
|
||||
/**
|
||||
* Awilix DI Container Setup
|
||||
* Creates a decoupled, reusable dependency injection container
|
||||
*/
|
||||
|
||||
import { asFunction, asValue, createContainer, InjectionMode, type AwilixContainer } from 'awilix';
|
||||
import { z } from 'zod';
|
||||
import { Browser } from '@stock-bot/browser';
|
||||
import { createCache, type CacheProvider } from '@stock-bot/cache';
|
||||
import type { IServiceContainer } from '@stock-bot/handlers';
|
||||
import { getLogger, type Logger } from '@stock-bot/logger';
|
||||
import { MongoDBClient } from '@stock-bot/mongodb';
|
||||
import { PostgreSQLClient } from '@stock-bot/postgres';
|
||||
import { ProxyManager } from '@stock-bot/proxy';
|
||||
import { QuestDBClient } from '@stock-bot/questdb';
|
||||
import { type QueueManager } from '@stock-bot/queue';
|
||||
|
||||
// Configuration schema with validation
|
||||
const appConfigSchema = z.object({
|
||||
redis: z.object({
|
||||
enabled: z.boolean().optional(),
|
||||
host: z.string(),
|
||||
port: z.number(),
|
||||
password: z.string().optional(),
|
||||
username: z.string().optional(),
|
||||
db: z.number().optional(),
|
||||
}),
|
||||
mongodb: z.object({
|
||||
enabled: z.boolean().optional(),
|
||||
uri: z.string(),
|
||||
database: z.string(),
|
||||
}),
|
||||
postgres: z.object({
|
||||
enabled: z.boolean().optional(),
|
||||
host: z.string(),
|
||||
port: z.number(),
|
||||
database: z.string(),
|
||||
user: z.string(),
|
||||
password: z.string(),
|
||||
}),
|
||||
questdb: z
|
||||
.object({
|
||||
enabled: z.boolean().optional(),
|
||||
host: z.string(),
|
||||
httpPort: z.number().optional(),
|
||||
pgPort: z.number().optional(),
|
||||
influxPort: z.number().optional(),
|
||||
database: z.string().optional(),
|
||||
})
|
||||
.optional(),
|
||||
proxy: z
|
||||
.object({
|
||||
cachePrefix: z.string().optional(),
|
||||
ttl: z.number().optional(),
|
||||
})
|
||||
.optional(),
|
||||
browser: z
|
||||
.object({
|
||||
headless: z.boolean().optional(),
|
||||
timeout: z.number().optional(),
|
||||
})
|
||||
.optional(),
|
||||
});
|
||||
|
||||
export type AppConfig = z.infer<typeof appConfigSchema>;
|
||||
|
||||
/**
|
||||
* Service type definitions for type-safe resolution
|
||||
*/
|
||||
export interface ServiceDefinitions {
|
||||
// Configuration
|
||||
config: AppConfig;
|
||||
logger: Logger;
|
||||
|
||||
// Core services
|
||||
cache: CacheProvider | null;
|
||||
proxyManager: ProxyManager | null;
|
||||
browser: Browser;
|
||||
queueManager: QueueManager | null;
|
||||
|
||||
// Database clients
|
||||
mongoClient: MongoDBClient | null;
|
||||
postgresClient: PostgreSQLClient | null;
|
||||
questdbClient: QuestDBClient | null;
|
||||
|
||||
// Aggregate service container
|
||||
serviceContainer: IServiceContainer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create and configure the DI container with type safety
|
||||
*/
|
||||
export function createServiceContainer(rawConfig: unknown): AwilixContainer<ServiceDefinitions> {
|
||||
// Validate configuration
|
||||
const config = appConfigSchema.parse(rawConfig);
|
||||
|
||||
const container = createContainer<ServiceDefinitions>({
|
||||
injectionMode: InjectionMode.PROXY,
|
||||
});
|
||||
|
||||
// Register configuration values
|
||||
const registrations: any = {
|
||||
// Configuration
|
||||
config: asValue(config),
|
||||
redisConfig: asValue(config.redis),
|
||||
mongoConfig: asValue(config.mongodb),
|
||||
postgresConfig: asValue(config.postgres),
|
||||
questdbConfig: asValue(
|
||||
config.questdb || { host: 'localhost', httpPort: 9000, pgPort: 8812, influxPort: 9009 }
|
||||
),
|
||||
|
||||
// Core services with dependency injection
|
||||
logger: asFunction(() => getLogger('app')).singleton(),
|
||||
};
|
||||
|
||||
// Conditionally register cache/dragonfly
|
||||
if (config.redis?.enabled !== false) {
|
||||
registrations.cache = asFunction(({ redisConfig, logger }) =>
|
||||
createCache({
|
||||
redisConfig,
|
||||
logger,
|
||||
keyPrefix: 'cache:',
|
||||
ttl: 3600,
|
||||
enableMetrics: true,
|
||||
})
|
||||
).singleton();
|
||||
} else {
|
||||
registrations.cache = asValue(null);
|
||||
}
|
||||
|
||||
// Proxy manager depends on cache
|
||||
registrations.proxyManager = asFunction(({ cache, config, logger }) => {
|
||||
if (!cache) {
|
||||
logger.warn('Cache is disabled, ProxyManager will have limited functionality');
|
||||
return null;
|
||||
}
|
||||
const manager = new ProxyManager(cache, config.proxy || {}, logger);
|
||||
return manager;
|
||||
}).singleton();
|
||||
|
||||
// Conditionally register MongoDB client
|
||||
if (config.mongodb?.enabled !== false) {
|
||||
registrations.mongoClient = asFunction(({ mongoConfig, logger }) => {
|
||||
return new MongoDBClient(mongoConfig, logger);
|
||||
}).singleton();
|
||||
} else {
|
||||
registrations.mongoClient = asValue(null);
|
||||
}
|
||||
|
||||
// Conditionally register PostgreSQL client
|
||||
if (config.postgres?.enabled !== false) {
|
||||
registrations.postgresClient = asFunction(({ postgresConfig, logger }) => {
|
||||
return new PostgreSQLClient(
|
||||
{
|
||||
host: postgresConfig.host,
|
||||
port: postgresConfig.port,
|
||||
database: postgresConfig.database,
|
||||
username: postgresConfig.user,
|
||||
password: postgresConfig.password,
|
||||
},
|
||||
logger
|
||||
);
|
||||
}).singleton();
|
||||
} else {
|
||||
registrations.postgresClient = asValue(null);
|
||||
}
|
||||
|
||||
// Conditionally register QuestDB client
|
||||
if (config.questdb?.enabled !== false) {
|
||||
registrations.questdbClient = asFunction(({ questdbConfig, logger }) => {
|
||||
console.log('Creating QuestDB client with config:', questdbConfig);
|
||||
return new QuestDBClient(
|
||||
{
|
||||
host: questdbConfig.host,
|
||||
httpPort: questdbConfig.httpPort,
|
||||
pgPort: questdbConfig.pgPort,
|
||||
influxPort: questdbConfig.influxPort,
|
||||
database: questdbConfig.database,
|
||||
// QuestDB appears to require default credentials
|
||||
user: 'admin',
|
||||
password: 'quest',
|
||||
},
|
||||
logger
|
||||
);
|
||||
}).singleton();
|
||||
} else {
|
||||
registrations.questdbClient = asValue(null);
|
||||
}
|
||||
|
||||
// Queue manager - placeholder until decoupled from singleton
|
||||
registrations.queueManager = asFunction(({ redisConfig, cache, logger }) => {
|
||||
// Import dynamically to avoid circular dependency
|
||||
const { QueueManager } = require('@stock-bot/queue');
|
||||
|
||||
// Check if already initialized (singleton pattern)
|
||||
if (QueueManager.isInitialized()) {
|
||||
return QueueManager.getInstance();
|
||||
}
|
||||
|
||||
// Initialize if not already done
|
||||
return QueueManager.initialize({
|
||||
redis: { host: redisConfig.host, port: redisConfig.port, db: redisConfig.db },
|
||||
enableScheduledJobs: true,
|
||||
delayWorkerStart: true, // We'll start workers manually
|
||||
});
|
||||
}).singleton();
|
||||
|
||||
// Browser automation
|
||||
registrations.browser = asFunction(({ config, logger }) => {
|
||||
return new Browser(logger, config.browser);
|
||||
}).singleton();
|
||||
|
||||
// Build the IServiceContainer for handlers
|
||||
registrations.serviceContainer = asFunction(
|
||||
cradle =>
|
||||
({
|
||||
logger: cradle.logger,
|
||||
cache: cradle.cache,
|
||||
proxy: cradle.proxyManager,
|
||||
browser: cradle.browser,
|
||||
mongodb: cradle.mongoClient,
|
||||
postgres: cradle.postgresClient,
|
||||
questdb: cradle.questdbClient,
|
||||
queue: cradle.queueManager,
|
||||
}) as IServiceContainer
|
||||
).singleton();
|
||||
|
||||
container.register(registrations);
|
||||
return container;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize async services after container creation
|
||||
*/
|
||||
export async function initializeServices(container: AwilixContainer): Promise<void> {
|
||||
const logger = container.resolve('logger');
|
||||
const config = container.resolve('config');
|
||||
|
||||
try {
|
||||
// Wait for cache to be ready first (if enabled)
|
||||
const cache = container.resolve('cache');
|
||||
if (cache && typeof cache.waitForReady === 'function') {
|
||||
await cache.waitForReady(10000);
|
||||
logger.info('Cache is ready');
|
||||
} else if (config.redis?.enabled === false) {
|
||||
logger.info('Cache is disabled');
|
||||
}
|
||||
|
||||
// Initialize proxy manager (depends on cache)
|
||||
const proxyManager = container.resolve('proxyManager');
|
||||
if (proxyManager && typeof proxyManager.initialize === 'function') {
|
||||
await proxyManager.initialize();
|
||||
logger.info('Proxy manager initialized');
|
||||
} else {
|
||||
logger.info('Proxy manager is disabled (requires cache)');
|
||||
}
|
||||
|
||||
// Connect MongoDB client (if enabled)
|
||||
const mongoClient = container.resolve('mongoClient');
|
||||
if (mongoClient && typeof mongoClient.connect === 'function') {
|
||||
await mongoClient.connect();
|
||||
logger.info('MongoDB connected');
|
||||
} else if (config.mongodb?.enabled === false) {
|
||||
logger.info('MongoDB is disabled');
|
||||
}
|
||||
|
||||
// Connect PostgreSQL client (if enabled)
|
||||
const postgresClient = container.resolve('postgresClient');
|
||||
if (postgresClient && typeof postgresClient.connect === 'function') {
|
||||
await postgresClient.connect();
|
||||
logger.info('PostgreSQL connected');
|
||||
} else if (config.postgres?.enabled === false) {
|
||||
logger.info('PostgreSQL is disabled');
|
||||
}
|
||||
|
||||
// Connect QuestDB client (if enabled)
|
||||
const questdbClient = container.resolve('questdbClient');
|
||||
if (questdbClient && typeof questdbClient.connect === 'function') {
|
||||
await questdbClient.connect();
|
||||
logger.info('QuestDB connected');
|
||||
} else if (config.questdb?.enabled === false) {
|
||||
logger.info('QuestDB is disabled');
|
||||
}
|
||||
|
||||
// Initialize browser if configured
|
||||
const browser = container.resolve('browser');
|
||||
if (browser && typeof browser.initialize === 'function') {
|
||||
await browser.initialize();
|
||||
logger.info('Browser initialized');
|
||||
}
|
||||
|
||||
logger.info('All services initialized successfully');
|
||||
} catch (error) {
|
||||
logger.error('Failed to initialize services', { error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Export typed container
|
||||
export type ServiceContainer = AwilixContainer<ServiceDefinitions>;
|
||||
export type ServiceCradle = ServiceDefinitions;
|
||||
|
|
|
|||
|
|
@ -1,13 +1,13 @@
|
|||
// Export all dependency injection components
|
||||
export * from './operation-context';
|
||||
export * from './pool-size-calculator';
|
||||
export * from './types';
|
||||
|
||||
// Awilix container exports
|
||||
export {
|
||||
createServiceContainer,
|
||||
initializeServices,
|
||||
type AppConfig,
|
||||
type ServiceCradle,
|
||||
type ServiceContainer
|
||||
} from './awilix-container';
|
||||
// Export all dependency injection components
|
||||
export * from './operation-context';
|
||||
export * from './pool-size-calculator';
|
||||
export * from './types';
|
||||
|
||||
// Awilix container exports
|
||||
export {
|
||||
createServiceContainer,
|
||||
initializeServices,
|
||||
type AppConfig,
|
||||
type ServiceCradle,
|
||||
type ServiceContainer,
|
||||
} from './awilix-container';
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
*/
|
||||
|
||||
import { getLogger, type Logger } from '@stock-bot/logger';
|
||||
|
||||
interface ServiceResolver {
|
||||
resolve<T>(serviceName: string): T;
|
||||
resolveAsync<T>(serviceName: string): Promise<T>;
|
||||
|
|
@ -23,17 +24,19 @@ export class OperationContext {
|
|||
public readonly metadata: Record<string, any>;
|
||||
private readonly container?: ServiceResolver;
|
||||
private readonly startTime: Date;
|
||||
|
||||
|
||||
constructor(options: OperationContextOptions) {
|
||||
this.container = options.container;
|
||||
this.metadata = options.metadata || {};
|
||||
this.traceId = options.traceId || this.generateTraceId();
|
||||
this.startTime = new Date();
|
||||
|
||||
this.logger = options.parentLogger || getLogger(`${options.handlerName}:${options.operationName}`, {
|
||||
traceId: this.traceId,
|
||||
metadata: this.metadata,
|
||||
});
|
||||
|
||||
this.logger =
|
||||
options.parentLogger ||
|
||||
getLogger(`${options.handlerName}:${options.operationName}`, {
|
||||
traceId: this.traceId,
|
||||
metadata: this.metadata,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -42,8 +45,8 @@ export class OperationContext {
|
|||
static create(
|
||||
handlerName: string,
|
||||
operationName: string,
|
||||
options: {
|
||||
container?: ServiceResolver;
|
||||
options: {
|
||||
container?: ServiceResolver;
|
||||
parentLogger?: Logger;
|
||||
metadata?: Record<string, any>;
|
||||
traceId?: string;
|
||||
|
|
@ -95,7 +98,7 @@ export class OperationContext {
|
|||
*/
|
||||
logCompletion(success: boolean, error?: Error): void {
|
||||
const executionTime = this.getExecutionTime();
|
||||
|
||||
|
||||
if (success) {
|
||||
this.logger.info('Operation completed successfully', {
|
||||
executionTime,
|
||||
|
|
@ -138,4 +141,4 @@ export class OperationContext {
|
|||
private generateTraceId(): string {
|
||||
return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,80 +1,82 @@
|
|||
import type { ConnectionPoolConfig } from './types';
|
||||
|
||||
export interface PoolSizeRecommendation {
|
||||
min: number;
|
||||
max: number;
|
||||
idle: number;
|
||||
}
|
||||
|
||||
export class PoolSizeCalculator {
|
||||
private static readonly DEFAULT_SIZES: Record<string, PoolSizeRecommendation> = {
|
||||
// Service-level defaults
|
||||
'data-ingestion': { min: 5, max: 50, idle: 10 },
|
||||
'data-pipeline': { min: 3, max: 30, idle: 5 },
|
||||
'processing-service': { min: 2, max: 20, idle: 3 },
|
||||
'web-api': { min: 2, max: 10, idle: 2 },
|
||||
'portfolio-service': { min: 2, max: 15, idle: 3 },
|
||||
'strategy-service': { min: 3, max: 25, idle: 5 },
|
||||
'execution-service': { min: 2, max: 10, idle: 2 },
|
||||
|
||||
// Handler-level defaults
|
||||
'batch-import': { min: 10, max: 100, idle: 20 },
|
||||
'real-time': { min: 2, max: 10, idle: 3 },
|
||||
'analytics': { min: 5, max: 30, idle: 10 },
|
||||
'reporting': { min: 3, max: 20, idle: 5 },
|
||||
};
|
||||
|
||||
static calculate(
|
||||
serviceName: string,
|
||||
handlerName?: string,
|
||||
customConfig?: Partial<ConnectionPoolConfig>
|
||||
): PoolSizeRecommendation {
|
||||
// Check for custom configuration first
|
||||
if (customConfig?.minConnections && customConfig?.maxConnections) {
|
||||
return {
|
||||
min: customConfig.minConnections,
|
||||
max: customConfig.maxConnections,
|
||||
idle: Math.floor((customConfig.minConnections + customConfig.maxConnections) / 4),
|
||||
};
|
||||
}
|
||||
|
||||
// Try handler-specific sizes first, then service-level
|
||||
const key = handlerName || serviceName;
|
||||
const recommendation = this.DEFAULT_SIZES[key] || this.DEFAULT_SIZES[serviceName];
|
||||
|
||||
if (recommendation) {
|
||||
return { ...recommendation };
|
||||
}
|
||||
|
||||
// Fall back to generic defaults
|
||||
return {
|
||||
min: 2,
|
||||
max: 10,
|
||||
idle: 3,
|
||||
};
|
||||
}
|
||||
|
||||
static getOptimalPoolSize(
|
||||
expectedConcurrency: number,
|
||||
averageQueryTimeMs: number,
|
||||
targetLatencyMs: number
|
||||
): number {
|
||||
// Little's Law: L = λ * W
|
||||
// L = number of connections needed
|
||||
// λ = arrival rate (requests per second)
|
||||
// W = average time in system (seconds)
|
||||
|
||||
const requestsPerSecond = expectedConcurrency;
|
||||
const averageTimeInSystem = averageQueryTimeMs / 1000;
|
||||
|
||||
const minConnections = Math.ceil(requestsPerSecond * averageTimeInSystem);
|
||||
|
||||
// Add buffer for burst traffic (20% overhead)
|
||||
const recommendedSize = Math.ceil(minConnections * 1.2);
|
||||
|
||||
// Ensure we meet target latency
|
||||
const latencyBasedSize = Math.ceil(expectedConcurrency * (averageQueryTimeMs / targetLatencyMs));
|
||||
|
||||
return Math.max(recommendedSize, latencyBasedSize, 2); // Minimum 2 connections
|
||||
}
|
||||
}
|
||||
import type { ConnectionPoolConfig } from './types';
|
||||
|
||||
export interface PoolSizeRecommendation {
|
||||
min: number;
|
||||
max: number;
|
||||
idle: number;
|
||||
}
|
||||
|
||||
export class PoolSizeCalculator {
|
||||
private static readonly DEFAULT_SIZES: Record<string, PoolSizeRecommendation> = {
|
||||
// Service-level defaults
|
||||
'data-ingestion': { min: 5, max: 50, idle: 10 },
|
||||
'data-pipeline': { min: 3, max: 30, idle: 5 },
|
||||
'processing-service': { min: 2, max: 20, idle: 3 },
|
||||
'web-api': { min: 2, max: 10, idle: 2 },
|
||||
'portfolio-service': { min: 2, max: 15, idle: 3 },
|
||||
'strategy-service': { min: 3, max: 25, idle: 5 },
|
||||
'execution-service': { min: 2, max: 10, idle: 2 },
|
||||
|
||||
// Handler-level defaults
|
||||
'batch-import': { min: 10, max: 100, idle: 20 },
|
||||
'real-time': { min: 2, max: 10, idle: 3 },
|
||||
analytics: { min: 5, max: 30, idle: 10 },
|
||||
reporting: { min: 3, max: 20, idle: 5 },
|
||||
};
|
||||
|
||||
static calculate(
|
||||
serviceName: string,
|
||||
handlerName?: string,
|
||||
customConfig?: Partial<ConnectionPoolConfig>
|
||||
): PoolSizeRecommendation {
|
||||
// Check for custom configuration first
|
||||
if (customConfig?.minConnections && customConfig?.maxConnections) {
|
||||
return {
|
||||
min: customConfig.minConnections,
|
||||
max: customConfig.maxConnections,
|
||||
idle: Math.floor((customConfig.minConnections + customConfig.maxConnections) / 4),
|
||||
};
|
||||
}
|
||||
|
||||
// Try handler-specific sizes first, then service-level
|
||||
const key = handlerName || serviceName;
|
||||
const recommendation = this.DEFAULT_SIZES[key] || this.DEFAULT_SIZES[serviceName];
|
||||
|
||||
if (recommendation) {
|
||||
return { ...recommendation };
|
||||
}
|
||||
|
||||
// Fall back to generic defaults
|
||||
return {
|
||||
min: 2,
|
||||
max: 10,
|
||||
idle: 3,
|
||||
};
|
||||
}
|
||||
|
||||
static getOptimalPoolSize(
|
||||
expectedConcurrency: number,
|
||||
averageQueryTimeMs: number,
|
||||
targetLatencyMs: number
|
||||
): number {
|
||||
// Little's Law: L = λ * W
|
||||
// L = number of connections needed
|
||||
// λ = arrival rate (requests per second)
|
||||
// W = average time in system (seconds)
|
||||
|
||||
const requestsPerSecond = expectedConcurrency;
|
||||
const averageTimeInSystem = averageQueryTimeMs / 1000;
|
||||
|
||||
const minConnections = Math.ceil(requestsPerSecond * averageTimeInSystem);
|
||||
|
||||
// Add buffer for burst traffic (20% overhead)
|
||||
const recommendedSize = Math.ceil(minConnections * 1.2);
|
||||
|
||||
// Ensure we meet target latency
|
||||
const latencyBasedSize = Math.ceil(
|
||||
expectedConcurrency * (averageQueryTimeMs / targetLatencyMs)
|
||||
);
|
||||
|
||||
return Math.max(recommendedSize, latencyBasedSize, 2); // Minimum 2 connections
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,68 +1,71 @@
|
|||
// Generic types to avoid circular dependencies
|
||||
export interface GenericClientConfig {
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
export interface ConnectionPoolConfig {
|
||||
name: string;
|
||||
poolSize?: number;
|
||||
minConnections?: number;
|
||||
maxConnections?: number;
|
||||
idleTimeoutMillis?: number;
|
||||
connectionTimeoutMillis?: number;
|
||||
enableMetrics?: boolean;
|
||||
}
|
||||
|
||||
export interface MongoDBPoolConfig extends ConnectionPoolConfig {
|
||||
config: GenericClientConfig;
|
||||
}
|
||||
|
||||
export interface PostgreSQLPoolConfig extends ConnectionPoolConfig {
|
||||
config: GenericClientConfig;
|
||||
}
|
||||
|
||||
export interface CachePoolConfig extends ConnectionPoolConfig {
|
||||
config: GenericClientConfig;
|
||||
}
|
||||
|
||||
export interface QueuePoolConfig extends ConnectionPoolConfig {
|
||||
config: GenericClientConfig;
|
||||
}
|
||||
|
||||
export interface ConnectionFactoryConfig {
|
||||
service: string;
|
||||
environment: 'development' | 'production' | 'test';
|
||||
pools?: {
|
||||
mongodb?: Partial<MongoDBPoolConfig>;
|
||||
postgres?: Partial<PostgreSQLPoolConfig>;
|
||||
cache?: Partial<CachePoolConfig>;
|
||||
queue?: Partial<QueuePoolConfig>;
|
||||
};
|
||||
}
|
||||
|
||||
export interface ConnectionPool<T> {
|
||||
name: string;
|
||||
client: T;
|
||||
metrics: PoolMetrics;
|
||||
health(): Promise<boolean>;
|
||||
dispose(): Promise<void>;
|
||||
}
|
||||
|
||||
export interface PoolMetrics {
|
||||
created: Date;
|
||||
totalConnections: number;
|
||||
activeConnections: number;
|
||||
idleConnections: number;
|
||||
waitingRequests: number;
|
||||
errors: number;
|
||||
}
|
||||
|
||||
export interface ConnectionFactory {
|
||||
createMongoDB(config: MongoDBPoolConfig): Promise<ConnectionPool<any>>;
|
||||
createPostgreSQL(config: PostgreSQLPoolConfig): Promise<ConnectionPool<any>>;
|
||||
createCache(config: CachePoolConfig): Promise<ConnectionPool<any>>;
|
||||
createQueue(config: QueuePoolConfig): Promise<ConnectionPool<any>>;
|
||||
getPool(type: 'mongodb' | 'postgres' | 'cache' | 'queue', name: string): ConnectionPool<any> | undefined;
|
||||
listPools(): Array<{ type: string; name: string; metrics: PoolMetrics }>;
|
||||
disposeAll(): Promise<void>;
|
||||
}
|
||||
// Generic types to avoid circular dependencies
|
||||
export interface GenericClientConfig {
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
export interface ConnectionPoolConfig {
|
||||
name: string;
|
||||
poolSize?: number;
|
||||
minConnections?: number;
|
||||
maxConnections?: number;
|
||||
idleTimeoutMillis?: number;
|
||||
connectionTimeoutMillis?: number;
|
||||
enableMetrics?: boolean;
|
||||
}
|
||||
|
||||
export interface MongoDBPoolConfig extends ConnectionPoolConfig {
|
||||
config: GenericClientConfig;
|
||||
}
|
||||
|
||||
export interface PostgreSQLPoolConfig extends ConnectionPoolConfig {
|
||||
config: GenericClientConfig;
|
||||
}
|
||||
|
||||
export interface CachePoolConfig extends ConnectionPoolConfig {
|
||||
config: GenericClientConfig;
|
||||
}
|
||||
|
||||
export interface QueuePoolConfig extends ConnectionPoolConfig {
|
||||
config: GenericClientConfig;
|
||||
}
|
||||
|
||||
export interface ConnectionFactoryConfig {
|
||||
service: string;
|
||||
environment: 'development' | 'production' | 'test';
|
||||
pools?: {
|
||||
mongodb?: Partial<MongoDBPoolConfig>;
|
||||
postgres?: Partial<PostgreSQLPoolConfig>;
|
||||
cache?: Partial<CachePoolConfig>;
|
||||
queue?: Partial<QueuePoolConfig>;
|
||||
};
|
||||
}
|
||||
|
||||
export interface ConnectionPool<T> {
|
||||
name: string;
|
||||
client: T;
|
||||
metrics: PoolMetrics;
|
||||
health(): Promise<boolean>;
|
||||
dispose(): Promise<void>;
|
||||
}
|
||||
|
||||
export interface PoolMetrics {
|
||||
created: Date;
|
||||
totalConnections: number;
|
||||
activeConnections: number;
|
||||
idleConnections: number;
|
||||
waitingRequests: number;
|
||||
errors: number;
|
||||
}
|
||||
|
||||
export interface ConnectionFactory {
|
||||
createMongoDB(config: MongoDBPoolConfig): Promise<ConnectionPool<any>>;
|
||||
createPostgreSQL(config: PostgreSQLPoolConfig): Promise<ConnectionPool<any>>;
|
||||
createCache(config: CachePoolConfig): Promise<ConnectionPool<any>>;
|
||||
createQueue(config: QueuePoolConfig): Promise<ConnectionPool<any>>;
|
||||
getPool(
|
||||
type: 'mongodb' | 'postgres' | 'cache' | 'queue',
|
||||
name: string
|
||||
): ConnectionPool<any> | undefined;
|
||||
listPools(): Array<{ type: string; name: string; metrics: PoolMetrics }>;
|
||||
disposeAll(): Promise<void>;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,178 +1,183 @@
|
|||
/**
|
||||
* Test DI library functionality
|
||||
*/
|
||||
import { test, expect, describe } from 'bun:test';
|
||||
import { ServiceContainer, ConnectionFactory, OperationContext, PoolSizeCalculator } from '../src/index';
|
||||
|
||||
describe('DI Library', () => {
|
||||
test('ServiceContainer - sync resolution', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
|
||||
container.register({
|
||||
name: 'testService',
|
||||
factory: () => ({ value: 'test' }),
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
const service = container.resolve<{ value: string }>('testService');
|
||||
expect(service.value).toBe('test');
|
||||
});
|
||||
|
||||
test('ServiceContainer - async resolution', async () => {
|
||||
const container = new ServiceContainer('test');
|
||||
|
||||
container.register({
|
||||
name: 'asyncService',
|
||||
factory: async () => ({ value: 'async-test' }),
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
const service = await container.resolveAsync<{ value: string }>('asyncService');
|
||||
expect(service.value).toBe('async-test');
|
||||
});
|
||||
|
||||
test('ServiceContainer - scoped container', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
|
||||
container.register({
|
||||
name: 'testService',
|
||||
factory: () => ({ value: 'test' }),
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
const scopedContainer = container.createScope();
|
||||
const service = scopedContainer.resolve<{ value: string }>('testService');
|
||||
expect(service.value).toBe('test');
|
||||
});
|
||||
|
||||
test('ServiceContainer - error on unregistered service', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
|
||||
expect(() => {
|
||||
container.resolve('nonexistent');
|
||||
}).toThrow('Service nonexistent not registered');
|
||||
});
|
||||
|
||||
test('ServiceContainer - async service throws error on sync resolve', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
|
||||
container.register({
|
||||
name: 'asyncService',
|
||||
factory: async () => ({ value: 'async' }),
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
expect(() => {
|
||||
container.resolve('asyncService');
|
||||
}).toThrow('Service asyncService is async. Use resolveAsync() instead.');
|
||||
});
|
||||
|
||||
test('ServiceContainer - disposal', async () => {
|
||||
const container = new ServiceContainer('test');
|
||||
let disposed = false;
|
||||
|
||||
container.register({
|
||||
name: 'disposableService',
|
||||
factory: () => ({ value: 'test' }),
|
||||
singleton: true,
|
||||
dispose: async () => {
|
||||
disposed = true;
|
||||
},
|
||||
});
|
||||
|
||||
// Create instance
|
||||
container.resolve('disposableService');
|
||||
|
||||
// Dispose container
|
||||
await container.dispose();
|
||||
expect(disposed).toBe(true);
|
||||
});
|
||||
|
||||
test('OperationContext - enhanced functionality', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
const context = OperationContext.create('test-handler', 'test-operation', {
|
||||
container,
|
||||
metadata: { userId: '123' },
|
||||
});
|
||||
|
||||
expect(context).toBeDefined();
|
||||
expect(context.logger).toBeDefined();
|
||||
expect(context.traceId).toBeDefined();
|
||||
expect(context.metadata.userId).toBe('123');
|
||||
expect(context.getExecutionTime()).toBeGreaterThanOrEqual(0);
|
||||
});
|
||||
|
||||
test('OperationContext - service resolution', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
|
||||
container.register({
|
||||
name: 'testService',
|
||||
factory: () => ({ value: 'resolved' }),
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
const context = OperationContext.create('test-handler', 'test-operation', {
|
||||
container,
|
||||
});
|
||||
|
||||
const service = context.resolve<{ value: string }>('testService');
|
||||
expect(service.value).toBe('resolved');
|
||||
});
|
||||
|
||||
test('ConnectionFactory - creation', () => {
|
||||
const factory = new ConnectionFactory({
|
||||
service: 'test',
|
||||
environment: 'development',
|
||||
});
|
||||
|
||||
expect(factory).toBeDefined();
|
||||
expect(factory.listPools()).toEqual([]);
|
||||
});
|
||||
|
||||
test('OperationContext - creation', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
const context = OperationContext.create('test-handler', 'test-operation', {
|
||||
container,
|
||||
});
|
||||
|
||||
expect(context).toBeDefined();
|
||||
expect(context.logger).toBeDefined();
|
||||
});
|
||||
|
||||
test('OperationContext - child context', () => {
|
||||
const context = OperationContext.create('test-handler', 'test-operation');
|
||||
const child = context.createChild('child-operation');
|
||||
|
||||
expect(child).toBeDefined();
|
||||
expect(child.logger).toBeDefined();
|
||||
});
|
||||
|
||||
test('PoolSizeCalculator - service defaults', () => {
|
||||
const poolSize = PoolSizeCalculator.calculate('data-ingestion');
|
||||
expect(poolSize).toEqual({ min: 5, max: 50, idle: 10 });
|
||||
});
|
||||
|
||||
test('PoolSizeCalculator - handler defaults', () => {
|
||||
const poolSize = PoolSizeCalculator.calculate('unknown-service', 'batch-import');
|
||||
expect(poolSize).toEqual({ min: 10, max: 100, idle: 20 });
|
||||
});
|
||||
|
||||
test('PoolSizeCalculator - fallback defaults', () => {
|
||||
const poolSize = PoolSizeCalculator.calculate('unknown-service', 'unknown-handler');
|
||||
expect(poolSize).toEqual({ min: 2, max: 10, idle: 3 });
|
||||
});
|
||||
|
||||
test('PoolSizeCalculator - custom config', () => {
|
||||
const poolSize = PoolSizeCalculator.calculate('test-service', undefined, {
|
||||
minConnections: 5,
|
||||
maxConnections: 15,
|
||||
});
|
||||
expect(poolSize).toEqual({ min: 5, max: 15, idle: 5 });
|
||||
});
|
||||
|
||||
test('PoolSizeCalculator - optimal size calculation', () => {
|
||||
const optimalSize = PoolSizeCalculator.getOptimalPoolSize(10, 100, 50);
|
||||
expect(optimalSize).toBeGreaterThan(0);
|
||||
expect(typeof optimalSize).toBe('number');
|
||||
});
|
||||
});
|
||||
/**
|
||||
* Test DI library functionality
|
||||
*/
|
||||
import { describe, expect, test } from 'bun:test';
|
||||
import {
|
||||
ConnectionFactory,
|
||||
OperationContext,
|
||||
PoolSizeCalculator,
|
||||
ServiceContainer,
|
||||
} from '../src/index';
|
||||
|
||||
describe('DI Library', () => {
|
||||
test('ServiceContainer - sync resolution', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
|
||||
container.register({
|
||||
name: 'testService',
|
||||
factory: () => ({ value: 'test' }),
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
const service = container.resolve<{ value: string }>('testService');
|
||||
expect(service.value).toBe('test');
|
||||
});
|
||||
|
||||
test('ServiceContainer - async resolution', async () => {
|
||||
const container = new ServiceContainer('test');
|
||||
|
||||
container.register({
|
||||
name: 'asyncService',
|
||||
factory: async () => ({ value: 'async-test' }),
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
const service = await container.resolveAsync<{ value: string }>('asyncService');
|
||||
expect(service.value).toBe('async-test');
|
||||
});
|
||||
|
||||
test('ServiceContainer - scoped container', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
|
||||
container.register({
|
||||
name: 'testService',
|
||||
factory: () => ({ value: 'test' }),
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
const scopedContainer = container.createScope();
|
||||
const service = scopedContainer.resolve<{ value: string }>('testService');
|
||||
expect(service.value).toBe('test');
|
||||
});
|
||||
|
||||
test('ServiceContainer - error on unregistered service', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
|
||||
expect(() => {
|
||||
container.resolve('nonexistent');
|
||||
}).toThrow('Service nonexistent not registered');
|
||||
});
|
||||
|
||||
test('ServiceContainer - async service throws error on sync resolve', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
|
||||
container.register({
|
||||
name: 'asyncService',
|
||||
factory: async () => ({ value: 'async' }),
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
expect(() => {
|
||||
container.resolve('asyncService');
|
||||
}).toThrow('Service asyncService is async. Use resolveAsync() instead.');
|
||||
});
|
||||
|
||||
test('ServiceContainer - disposal', async () => {
|
||||
const container = new ServiceContainer('test');
|
||||
let disposed = false;
|
||||
|
||||
container.register({
|
||||
name: 'disposableService',
|
||||
factory: () => ({ value: 'test' }),
|
||||
singleton: true,
|
||||
dispose: async () => {
|
||||
disposed = true;
|
||||
},
|
||||
});
|
||||
|
||||
// Create instance
|
||||
container.resolve('disposableService');
|
||||
|
||||
// Dispose container
|
||||
await container.dispose();
|
||||
expect(disposed).toBe(true);
|
||||
});
|
||||
|
||||
test('OperationContext - enhanced functionality', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
const context = OperationContext.create('test-handler', 'test-operation', {
|
||||
container,
|
||||
metadata: { userId: '123' },
|
||||
});
|
||||
|
||||
expect(context).toBeDefined();
|
||||
expect(context.logger).toBeDefined();
|
||||
expect(context.traceId).toBeDefined();
|
||||
expect(context.metadata.userId).toBe('123');
|
||||
expect(context.getExecutionTime()).toBeGreaterThanOrEqual(0);
|
||||
});
|
||||
|
||||
test('OperationContext - service resolution', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
|
||||
container.register({
|
||||
name: 'testService',
|
||||
factory: () => ({ value: 'resolved' }),
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
const context = OperationContext.create('test-handler', 'test-operation', {
|
||||
container,
|
||||
});
|
||||
|
||||
const service = context.resolve<{ value: string }>('testService');
|
||||
expect(service.value).toBe('resolved');
|
||||
});
|
||||
|
||||
test('ConnectionFactory - creation', () => {
|
||||
const factory = new ConnectionFactory({
|
||||
service: 'test',
|
||||
environment: 'development',
|
||||
});
|
||||
|
||||
expect(factory).toBeDefined();
|
||||
expect(factory.listPools()).toEqual([]);
|
||||
});
|
||||
|
||||
test('OperationContext - creation', () => {
|
||||
const container = new ServiceContainer('test');
|
||||
const context = OperationContext.create('test-handler', 'test-operation', {
|
||||
container,
|
||||
});
|
||||
|
||||
expect(context).toBeDefined();
|
||||
expect(context.logger).toBeDefined();
|
||||
});
|
||||
|
||||
test('OperationContext - child context', () => {
|
||||
const context = OperationContext.create('test-handler', 'test-operation');
|
||||
const child = context.createChild('child-operation');
|
||||
|
||||
expect(child).toBeDefined();
|
||||
expect(child.logger).toBeDefined();
|
||||
});
|
||||
|
||||
test('PoolSizeCalculator - service defaults', () => {
|
||||
const poolSize = PoolSizeCalculator.calculate('data-ingestion');
|
||||
expect(poolSize).toEqual({ min: 5, max: 50, idle: 10 });
|
||||
});
|
||||
|
||||
test('PoolSizeCalculator - handler defaults', () => {
|
||||
const poolSize = PoolSizeCalculator.calculate('unknown-service', 'batch-import');
|
||||
expect(poolSize).toEqual({ min: 10, max: 100, idle: 20 });
|
||||
});
|
||||
|
||||
test('PoolSizeCalculator - fallback defaults', () => {
|
||||
const poolSize = PoolSizeCalculator.calculate('unknown-service', 'unknown-handler');
|
||||
expect(poolSize).toEqual({ min: 2, max: 10, idle: 3 });
|
||||
});
|
||||
|
||||
test('PoolSizeCalculator - custom config', () => {
|
||||
const poolSize = PoolSizeCalculator.calculate('test-service', undefined, {
|
||||
minConnections: 5,
|
||||
maxConnections: 15,
|
||||
});
|
||||
expect(poolSize).toEqual({ min: 5, max: 15, idle: 5 });
|
||||
});
|
||||
|
||||
test('PoolSizeCalculator - optimal size calculation', () => {
|
||||
const optimalSize = PoolSizeCalculator.getOptimalPoolSize(10, 100, 50);
|
||||
expect(optimalSize).toBeGreaterThan(0);
|
||||
expect(typeof optimalSize).toBe('number');
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,17 +1,14 @@
|
|||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"rootDir": "./src",
|
||||
"outDir": "./dist",
|
||||
"composite": true,
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"types": ["node", "bun-types"]
|
||||
},
|
||||
"include": ["src/**/*.ts"],
|
||||
"exclude": ["node_modules", "dist", "test"],
|
||||
"references": [
|
||||
{ "path": "../config" },
|
||||
{ "path": "../logger" }
|
||||
]
|
||||
}
|
||||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"rootDir": "./src",
|
||||
"outDir": "./dist",
|
||||
"composite": true,
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"types": ["node", "bun-types"]
|
||||
},
|
||||
"include": ["src/**/*.ts"],
|
||||
"exclude": ["node_modules", "dist", "test"],
|
||||
"references": [{ "path": "../config" }, { "path": "../logger" }]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,23 +1,23 @@
|
|||
{
|
||||
"name": "@stock-bot/handlers",
|
||||
"version": "1.0.0",
|
||||
"description": "Universal handler system for queue and event-driven operations",
|
||||
"main": "./src/index.ts",
|
||||
"types": "./src/index.ts",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"clean": "rimraf dist",
|
||||
"test": "bun test"
|
||||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/config": "workspace:*",
|
||||
"@stock-bot/logger": "workspace:*",
|
||||
"@stock-bot/types": "workspace:*",
|
||||
"@stock-bot/di": "workspace:*"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.11.0",
|
||||
"typescript": "^5.3.0",
|
||||
"bun-types": "^1.2.15"
|
||||
}
|
||||
}
|
||||
{
|
||||
"name": "@stock-bot/handlers",
|
||||
"version": "1.0.0",
|
||||
"description": "Universal handler system for queue and event-driven operations",
|
||||
"main": "./src/index.ts",
|
||||
"types": "./src/index.ts",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"clean": "rimraf dist",
|
||||
"test": "bun test"
|
||||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/config": "workspace:*",
|
||||
"@stock-bot/logger": "workspace:*",
|
||||
"@stock-bot/types": "workspace:*",
|
||||
"@stock-bot/di": "workspace:*"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.11.0",
|
||||
"typescript": "^5.3.0",
|
||||
"bun-types": "^1.2.15"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,297 +1,307 @@
|
|||
import { getLogger } from '@stock-bot/logger';
|
||||
import { createJobHandler, handlerRegistry, type HandlerConfigWithSchedule } from '@stock-bot/types';
|
||||
import { fetch } from '@stock-bot/utils';
|
||||
import type { Collection } from 'mongodb';
|
||||
import type { IServiceContainer } from '../types/service-container';
|
||||
import type { ExecutionContext, IHandler } from '../types/types';
|
||||
|
||||
/**
|
||||
* Abstract base class for all handlers with improved DI
|
||||
* Provides common functionality and structure for queue/event operations
|
||||
*/
|
||||
export abstract class BaseHandler implements IHandler {
|
||||
// Direct service properties - flattened for cleaner access
|
||||
readonly logger;
|
||||
readonly cache;
|
||||
readonly queue;
|
||||
readonly proxy;
|
||||
readonly browser;
|
||||
readonly mongodb;
|
||||
readonly postgres;
|
||||
readonly questdb;
|
||||
|
||||
private handlerName: string;
|
||||
|
||||
constructor(services: IServiceContainer, handlerName?: string) {
|
||||
// Flatten all services onto the handler instance
|
||||
this.logger = getLogger(this.constructor.name);
|
||||
this.cache = services.cache;
|
||||
this.queue = services.queue;
|
||||
this.proxy = services.proxy;
|
||||
this.browser = services.browser;
|
||||
this.mongodb = services.mongodb;
|
||||
this.postgres = services.postgres;
|
||||
this.questdb = services.questdb;
|
||||
|
||||
// Read handler name from decorator first, then fallback to parameter or class name
|
||||
const constructor = this.constructor as any;
|
||||
this.handlerName = constructor.__handlerName || handlerName || this.constructor.name.toLowerCase();
|
||||
}
|
||||
|
||||
/**
|
||||
* Main execution method - automatically routes to decorated methods
|
||||
* Works with queue (events commented for future)
|
||||
*/
|
||||
async execute(operation: string, input: unknown, context: ExecutionContext): Promise<unknown> {
|
||||
const constructor = this.constructor as any;
|
||||
const operations = constructor.__operations || [];
|
||||
|
||||
// Debug logging
|
||||
this.logger.debug('Handler execute called', {
|
||||
handler: this.handlerName,
|
||||
operation,
|
||||
availableOperations: operations.map((op: any) => ({ name: op.name, method: op.method }))
|
||||
});
|
||||
|
||||
// Find the operation metadata
|
||||
const operationMeta = operations.find((op: any) => op.name === operation);
|
||||
if (!operationMeta) {
|
||||
this.logger.error('Operation not found', {
|
||||
requestedOperation: operation,
|
||||
availableOperations: operations.map((op: any) => op.name)
|
||||
});
|
||||
throw new Error(`Unknown operation: ${operation}`);
|
||||
}
|
||||
|
||||
// Get the method from the instance and call it
|
||||
const method = (this as any)[operationMeta.method];
|
||||
if (typeof method !== 'function') {
|
||||
throw new Error(`Operation method '${operationMeta.method}' not found on handler`);
|
||||
}
|
||||
|
||||
this.logger.debug('Executing operation method', {
|
||||
operation,
|
||||
method: operationMeta.method
|
||||
});
|
||||
|
||||
return await method.call(this, input, context);
|
||||
}
|
||||
|
||||
async scheduleOperation(operation: string, payload: unknown, delay?: number): Promise<void> {
|
||||
if (!this.queue) {
|
||||
throw new Error('Queue service is not available');
|
||||
}
|
||||
const queue = this.queue.getQueue(this.handlerName);
|
||||
const jobData = {
|
||||
handler: this.handlerName,
|
||||
operation,
|
||||
payload
|
||||
};
|
||||
await queue.add(operation, jobData, { delay });
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Create execution context for operations
|
||||
*/
|
||||
protected createExecutionContext(type: 'http' | 'queue' | 'scheduled', metadata: Record<string, any> = {}): ExecutionContext {
|
||||
return {
|
||||
type,
|
||||
metadata: {
|
||||
...metadata,
|
||||
timestamp: Date.now(),
|
||||
traceId: `${this.constructor.name}-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper methods for common operations
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get a MongoDB collection with type safety
|
||||
*/
|
||||
protected collection<T extends {} = any>(name: string): Collection<T> {
|
||||
if (!this.mongodb) {
|
||||
throw new Error('MongoDB service is not available');
|
||||
}
|
||||
return this.mongodb.collection(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set cache with handler-prefixed key
|
||||
*/
|
||||
protected async cacheSet(key: string, value: any, ttl?: number): Promise<void> {
|
||||
if (!this.cache) {
|
||||
return;
|
||||
}
|
||||
return this.cache.set(`${this.handlerName}:${key}`, value, ttl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache with handler-prefixed key
|
||||
*/
|
||||
protected async cacheGet<T = any>(key: string): Promise<T | null> {
|
||||
if (!this.cache) {
|
||||
return null;
|
||||
}
|
||||
return this.cache.get(`${this.handlerName}:${key}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete cache with handler-prefixed key
|
||||
*/
|
||||
protected async cacheDel(key: string): Promise<void> {
|
||||
if (!this.cache) {
|
||||
return;
|
||||
}
|
||||
return this.cache.del(`${this.handlerName}:${key}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule operation with delay in seconds
|
||||
*/
|
||||
protected async scheduleIn(operation: string, payload: unknown, delaySeconds: number): Promise<void> {
|
||||
return this.scheduleOperation(operation, payload, delaySeconds * 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Log with handler context
|
||||
*/
|
||||
protected log(level: 'info' | 'warn' | 'error' | 'debug', message: string, meta?: any): void {
|
||||
this.logger[level](message, { handler: this.handlerName, ...meta });
|
||||
}
|
||||
|
||||
/**
|
||||
* HTTP client helper using fetch from utils
|
||||
*/
|
||||
protected get http() {
|
||||
return {
|
||||
get: (url: string, options?: any) =>
|
||||
fetch(url, { ...options, method: 'GET', logger: this.logger }),
|
||||
post: (url: string, data?: any, options?: any) =>
|
||||
fetch(url, {
|
||||
...options,
|
||||
method: 'POST',
|
||||
body: JSON.stringify(data),
|
||||
headers: { 'Content-Type': 'application/json', ...options?.headers },
|
||||
logger: this.logger
|
||||
}),
|
||||
put: (url: string, data?: any, options?: any) =>
|
||||
fetch(url, {
|
||||
...options,
|
||||
method: 'PUT',
|
||||
body: JSON.stringify(data),
|
||||
headers: { 'Content-Type': 'application/json', ...options?.headers },
|
||||
logger: this.logger
|
||||
}),
|
||||
delete: (url: string, options?: any) =>
|
||||
fetch(url, { ...options, method: 'DELETE', logger: this.logger }),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a service is available
|
||||
*/
|
||||
protected hasService(name: keyof IServiceContainer): boolean {
|
||||
const service = this[name as keyof this];
|
||||
return service !== null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Event methods - commented for future
|
||||
*/
|
||||
// protected async publishEvent(eventName: string, payload: unknown): Promise<void> {
|
||||
// const eventBus = await this.container.resolveAsync('eventBus');
|
||||
// await eventBus.publish(eventName, payload);
|
||||
// }
|
||||
|
||||
/**
|
||||
* Register this handler using decorator metadata
|
||||
* Automatically reads @Handler, @Operation, and @QueueSchedule decorators
|
||||
*/
|
||||
register(): void {
|
||||
const constructor = this.constructor as any;
|
||||
const handlerName = constructor.__handlerName || this.handlerName;
|
||||
const operations = constructor.__operations || [];
|
||||
const schedules = constructor.__schedules || [];
|
||||
|
||||
// Create operation handlers from decorator metadata
|
||||
const operationHandlers: Record<string, any> = {};
|
||||
for (const op of operations) {
|
||||
operationHandlers[op.name] = createJobHandler(async (payload) => {
|
||||
const context: ExecutionContext = {
|
||||
type: 'queue',
|
||||
metadata: { source: 'queue', timestamp: Date.now() }
|
||||
};
|
||||
return await this.execute(op.name, payload, context);
|
||||
});
|
||||
}
|
||||
|
||||
// Create scheduled jobs from decorator metadata
|
||||
const scheduledJobs = schedules.map((schedule: any) => {
|
||||
// Find the operation name from the method name
|
||||
const operation = operations.find((op: any) => op.method === schedule.operation);
|
||||
return {
|
||||
type: `${handlerName}-${schedule.operation}`,
|
||||
operation: operation?.name || schedule.operation,
|
||||
cronPattern: schedule.cronPattern,
|
||||
priority: schedule.priority || 5,
|
||||
immediately: schedule.immediately || false,
|
||||
description: schedule.description || `${handlerName} ${schedule.operation}`,
|
||||
payload: this.getScheduledJobPayload?.(schedule.operation),
|
||||
};
|
||||
});
|
||||
|
||||
const config: HandlerConfigWithSchedule = {
|
||||
name: handlerName,
|
||||
operations: operationHandlers,
|
||||
scheduledJobs,
|
||||
};
|
||||
|
||||
handlerRegistry.registerWithSchedule(config);
|
||||
this.logger.info('Handler registered using decorator metadata', {
|
||||
handlerName,
|
||||
operations: operations.map((op: any) => ({ name: op.name, method: op.method })),
|
||||
scheduledJobs: scheduledJobs.map((job: any) => ({
|
||||
operation: job.operation,
|
||||
cronPattern: job.cronPattern,
|
||||
immediately: job.immediately
|
||||
}))
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method to provide payloads for scheduled jobs
|
||||
* @param operation The operation name that needs a payload
|
||||
* @returns The payload for the scheduled job, or undefined
|
||||
*/
|
||||
protected getScheduledJobPayload?(operation: string): any;
|
||||
|
||||
/**
|
||||
* Lifecycle hooks - can be overridden by subclasses
|
||||
*/
|
||||
async onInit?(): Promise<void>;
|
||||
async onStart?(): Promise<void>;
|
||||
async onStop?(): Promise<void>;
|
||||
async onDispose?(): Promise<void>;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Specialized handler for operations that have scheduled jobs
|
||||
*/
|
||||
export abstract class ScheduledHandler extends BaseHandler {
|
||||
/**
|
||||
* Get scheduled job configurations for this handler
|
||||
* Override in subclasses to define schedules
|
||||
*/
|
||||
getScheduledJobs?(): Array<{
|
||||
operation: string;
|
||||
cronPattern: string;
|
||||
priority?: number;
|
||||
immediately?: boolean;
|
||||
description?: string;
|
||||
}>;
|
||||
}
|
||||
import type { Collection } from 'mongodb';
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import {
|
||||
createJobHandler,
|
||||
handlerRegistry,
|
||||
type HandlerConfigWithSchedule,
|
||||
} from '@stock-bot/types';
|
||||
import { fetch } from '@stock-bot/utils';
|
||||
import type { IServiceContainer } from '../types/service-container';
|
||||
import type { ExecutionContext, IHandler } from '../types/types';
|
||||
|
||||
/**
|
||||
* Abstract base class for all handlers with improved DI
|
||||
* Provides common functionality and structure for queue/event operations
|
||||
*/
|
||||
export abstract class BaseHandler implements IHandler {
|
||||
// Direct service properties - flattened for cleaner access
|
||||
readonly logger;
|
||||
readonly cache;
|
||||
readonly queue;
|
||||
readonly proxy;
|
||||
readonly browser;
|
||||
readonly mongodb;
|
||||
readonly postgres;
|
||||
readonly questdb;
|
||||
|
||||
private handlerName: string;
|
||||
|
||||
constructor(services: IServiceContainer, handlerName?: string) {
|
||||
// Flatten all services onto the handler instance
|
||||
this.logger = getLogger(this.constructor.name);
|
||||
this.cache = services.cache;
|
||||
this.queue = services.queue;
|
||||
this.proxy = services.proxy;
|
||||
this.browser = services.browser;
|
||||
this.mongodb = services.mongodb;
|
||||
this.postgres = services.postgres;
|
||||
this.questdb = services.questdb;
|
||||
|
||||
// Read handler name from decorator first, then fallback to parameter or class name
|
||||
const constructor = this.constructor as any;
|
||||
this.handlerName =
|
||||
constructor.__handlerName || handlerName || this.constructor.name.toLowerCase();
|
||||
}
|
||||
|
||||
/**
|
||||
* Main execution method - automatically routes to decorated methods
|
||||
* Works with queue (events commented for future)
|
||||
*/
|
||||
async execute(operation: string, input: unknown, context: ExecutionContext): Promise<unknown> {
|
||||
const constructor = this.constructor as any;
|
||||
const operations = constructor.__operations || [];
|
||||
|
||||
// Debug logging
|
||||
this.logger.debug('Handler execute called', {
|
||||
handler: this.handlerName,
|
||||
operation,
|
||||
availableOperations: operations.map((op: any) => ({ name: op.name, method: op.method })),
|
||||
});
|
||||
|
||||
// Find the operation metadata
|
||||
const operationMeta = operations.find((op: any) => op.name === operation);
|
||||
if (!operationMeta) {
|
||||
this.logger.error('Operation not found', {
|
||||
requestedOperation: operation,
|
||||
availableOperations: operations.map((op: any) => op.name),
|
||||
});
|
||||
throw new Error(`Unknown operation: ${operation}`);
|
||||
}
|
||||
|
||||
// Get the method from the instance and call it
|
||||
const method = (this as any)[operationMeta.method];
|
||||
if (typeof method !== 'function') {
|
||||
throw new Error(`Operation method '${operationMeta.method}' not found on handler`);
|
||||
}
|
||||
|
||||
this.logger.debug('Executing operation method', {
|
||||
operation,
|
||||
method: operationMeta.method,
|
||||
});
|
||||
|
||||
return await method.call(this, input, context);
|
||||
}
|
||||
|
||||
async scheduleOperation(operation: string, payload: unknown, delay?: number): Promise<void> {
|
||||
if (!this.queue) {
|
||||
throw new Error('Queue service is not available');
|
||||
}
|
||||
const queue = this.queue.getQueue(this.handlerName);
|
||||
const jobData = {
|
||||
handler: this.handlerName,
|
||||
operation,
|
||||
payload,
|
||||
};
|
||||
await queue.add(operation, jobData, { delay });
|
||||
}
|
||||
|
||||
/**
|
||||
* Create execution context for operations
|
||||
*/
|
||||
protected createExecutionContext(
|
||||
type: 'http' | 'queue' | 'scheduled',
|
||||
metadata: Record<string, any> = {}
|
||||
): ExecutionContext {
|
||||
return {
|
||||
type,
|
||||
metadata: {
|
||||
...metadata,
|
||||
timestamp: Date.now(),
|
||||
traceId: `${this.constructor.name}-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper methods for common operations
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get a MongoDB collection with type safety
|
||||
*/
|
||||
protected collection<T extends {} = any>(name: string): Collection<T> {
|
||||
if (!this.mongodb) {
|
||||
throw new Error('MongoDB service is not available');
|
||||
}
|
||||
return this.mongodb.collection(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set cache with handler-prefixed key
|
||||
*/
|
||||
protected async cacheSet(key: string, value: any, ttl?: number): Promise<void> {
|
||||
if (!this.cache) {
|
||||
return;
|
||||
}
|
||||
return this.cache.set(`${this.handlerName}:${key}`, value, ttl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache with handler-prefixed key
|
||||
*/
|
||||
protected async cacheGet<T = any>(key: string): Promise<T | null> {
|
||||
if (!this.cache) {
|
||||
return null;
|
||||
}
|
||||
return this.cache.get(`${this.handlerName}:${key}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete cache with handler-prefixed key
|
||||
*/
|
||||
protected async cacheDel(key: string): Promise<void> {
|
||||
if (!this.cache) {
|
||||
return;
|
||||
}
|
||||
return this.cache.del(`${this.handlerName}:${key}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule operation with delay in seconds
|
||||
*/
|
||||
protected async scheduleIn(
|
||||
operation: string,
|
||||
payload: unknown,
|
||||
delaySeconds: number
|
||||
): Promise<void> {
|
||||
return this.scheduleOperation(operation, payload, delaySeconds * 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Log with handler context
|
||||
*/
|
||||
protected log(level: 'info' | 'warn' | 'error' | 'debug', message: string, meta?: any): void {
|
||||
this.logger[level](message, { handler: this.handlerName, ...meta });
|
||||
}
|
||||
|
||||
/**
|
||||
* HTTP client helper using fetch from utils
|
||||
*/
|
||||
protected get http() {
|
||||
return {
|
||||
get: (url: string, options?: any) =>
|
||||
fetch(url, { ...options, method: 'GET', logger: this.logger }),
|
||||
post: (url: string, data?: any, options?: any) =>
|
||||
fetch(url, {
|
||||
...options,
|
||||
method: 'POST',
|
||||
body: JSON.stringify(data),
|
||||
headers: { 'Content-Type': 'application/json', ...options?.headers },
|
||||
logger: this.logger,
|
||||
}),
|
||||
put: (url: string, data?: any, options?: any) =>
|
||||
fetch(url, {
|
||||
...options,
|
||||
method: 'PUT',
|
||||
body: JSON.stringify(data),
|
||||
headers: { 'Content-Type': 'application/json', ...options?.headers },
|
||||
logger: this.logger,
|
||||
}),
|
||||
delete: (url: string, options?: any) =>
|
||||
fetch(url, { ...options, method: 'DELETE', logger: this.logger }),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a service is available
|
||||
*/
|
||||
protected hasService(name: keyof IServiceContainer): boolean {
|
||||
const service = this[name as keyof this];
|
||||
return service !== null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Event methods - commented for future
|
||||
*/
|
||||
// protected async publishEvent(eventName: string, payload: unknown): Promise<void> {
|
||||
// const eventBus = await this.container.resolveAsync('eventBus');
|
||||
// await eventBus.publish(eventName, payload);
|
||||
// }
|
||||
|
||||
/**
|
||||
* Register this handler using decorator metadata
|
||||
* Automatically reads @Handler, @Operation, and @QueueSchedule decorators
|
||||
*/
|
||||
register(): void {
|
||||
const constructor = this.constructor as any;
|
||||
const handlerName = constructor.__handlerName || this.handlerName;
|
||||
const operations = constructor.__operations || [];
|
||||
const schedules = constructor.__schedules || [];
|
||||
|
||||
// Create operation handlers from decorator metadata
|
||||
const operationHandlers: Record<string, any> = {};
|
||||
for (const op of operations) {
|
||||
operationHandlers[op.name] = createJobHandler(async payload => {
|
||||
const context: ExecutionContext = {
|
||||
type: 'queue',
|
||||
metadata: { source: 'queue', timestamp: Date.now() },
|
||||
};
|
||||
return await this.execute(op.name, payload, context);
|
||||
});
|
||||
}
|
||||
|
||||
// Create scheduled jobs from decorator metadata
|
||||
const scheduledJobs = schedules.map((schedule: any) => {
|
||||
// Find the operation name from the method name
|
||||
const operation = operations.find((op: any) => op.method === schedule.operation);
|
||||
return {
|
||||
type: `${handlerName}-${schedule.operation}`,
|
||||
operation: operation?.name || schedule.operation,
|
||||
cronPattern: schedule.cronPattern,
|
||||
priority: schedule.priority || 5,
|
||||
immediately: schedule.immediately || false,
|
||||
description: schedule.description || `${handlerName} ${schedule.operation}`,
|
||||
payload: this.getScheduledJobPayload?.(schedule.operation),
|
||||
};
|
||||
});
|
||||
|
||||
const config: HandlerConfigWithSchedule = {
|
||||
name: handlerName,
|
||||
operations: operationHandlers,
|
||||
scheduledJobs,
|
||||
};
|
||||
|
||||
handlerRegistry.registerWithSchedule(config);
|
||||
this.logger.info('Handler registered using decorator metadata', {
|
||||
handlerName,
|
||||
operations: operations.map((op: any) => ({ name: op.name, method: op.method })),
|
||||
scheduledJobs: scheduledJobs.map((job: any) => ({
|
||||
operation: job.operation,
|
||||
cronPattern: job.cronPattern,
|
||||
immediately: job.immediately,
|
||||
})),
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method to provide payloads for scheduled jobs
|
||||
* @param operation The operation name that needs a payload
|
||||
* @returns The payload for the scheduled job, or undefined
|
||||
*/
|
||||
protected getScheduledJobPayload?(operation: string): any;
|
||||
|
||||
/**
|
||||
* Lifecycle hooks - can be overridden by subclasses
|
||||
*/
|
||||
async onInit?(): Promise<void>;
|
||||
async onStart?(): Promise<void>;
|
||||
async onStop?(): Promise<void>;
|
||||
async onDispose?(): Promise<void>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specialized handler for operations that have scheduled jobs
|
||||
*/
|
||||
export abstract class ScheduledHandler extends BaseHandler {
|
||||
/**
|
||||
* Get scheduled job configurations for this handler
|
||||
* Override in subclasses to define schedules
|
||||
*/
|
||||
getScheduledJobs?(): Array<{
|
||||
operation: string;
|
||||
cronPattern: string;
|
||||
priority?: number;
|
||||
immediately?: boolean;
|
||||
description?: string;
|
||||
}>;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,148 +1,130 @@
|
|||
// Bun-compatible decorators (hybrid approach)
|
||||
|
||||
/**
|
||||
* Handler decorator - marks a class as a handler
|
||||
* @param name Handler name for registration
|
||||
*/
|
||||
export function Handler(name: string) {
|
||||
return function <T extends { new (...args: any[]): {} }>(
|
||||
target: T,
|
||||
_context?: any
|
||||
) {
|
||||
// Store handler name on the constructor
|
||||
(target as any).__handlerName = name;
|
||||
(target as any).__needsAutoRegistration = true;
|
||||
|
||||
return target;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Operation decorator - marks a method as an operation
|
||||
* @param name Operation name
|
||||
*/
|
||||
export function Operation(name: string): any {
|
||||
return function (
|
||||
target: any,
|
||||
methodName: string,
|
||||
descriptor?: PropertyDescriptor
|
||||
): any {
|
||||
// Store metadata directly on the class constructor
|
||||
const constructor = target.constructor;
|
||||
|
||||
if (!constructor.__operations) {
|
||||
constructor.__operations = [];
|
||||
}
|
||||
constructor.__operations.push({
|
||||
name,
|
||||
method: methodName,
|
||||
});
|
||||
|
||||
return descriptor;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Queue schedule decorator - marks an operation as scheduled
|
||||
* @param cronPattern Cron pattern for scheduling
|
||||
* @param options Additional scheduling options
|
||||
*/
|
||||
export function QueueSchedule(
|
||||
cronPattern: string,
|
||||
options?: {
|
||||
priority?: number;
|
||||
immediately?: boolean;
|
||||
description?: string;
|
||||
}
|
||||
): any {
|
||||
return function (
|
||||
target: any,
|
||||
methodName: string,
|
||||
descriptor?: PropertyDescriptor
|
||||
): any {
|
||||
// Store metadata directly on the class constructor
|
||||
const constructor = target.constructor;
|
||||
|
||||
if (!constructor.__schedules) {
|
||||
constructor.__schedules = [];
|
||||
}
|
||||
constructor.__schedules.push({
|
||||
operation: methodName,
|
||||
cronPattern,
|
||||
...options,
|
||||
});
|
||||
|
||||
return descriptor;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Disabled decorator - marks a handler as disabled for auto-registration
|
||||
* Handlers marked with @Disabled() will be skipped during auto-registration
|
||||
*/
|
||||
export function Disabled() {
|
||||
return function <T extends { new (...args: any[]): {} }>(
|
||||
target: T,
|
||||
_context?: any
|
||||
) {
|
||||
// Store disabled flag on the constructor
|
||||
(target as any).__disabled = true;
|
||||
|
||||
return target;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Combined decorator for scheduled operations
|
||||
* Automatically creates both an operation and a schedule
|
||||
* @param name Operation name
|
||||
* @param cronPattern Cron pattern for scheduling
|
||||
* @param options Schedule options
|
||||
*/
|
||||
export function ScheduledOperation(
|
||||
name: string,
|
||||
cronPattern: string,
|
||||
options?: {
|
||||
priority?: number;
|
||||
immediately?: boolean;
|
||||
description?: string;
|
||||
}
|
||||
): any {
|
||||
return function (
|
||||
target: any,
|
||||
methodName: string,
|
||||
descriptor?: PropertyDescriptor
|
||||
): any {
|
||||
// Apply both decorators
|
||||
Operation(name)(target, methodName, descriptor);
|
||||
QueueSchedule(cronPattern, options)(target, methodName, descriptor);
|
||||
return descriptor;
|
||||
};
|
||||
}
|
||||
|
||||
// Future event decorators - commented for now
|
||||
// export function EventListener(eventName: string) {
|
||||
// return function (target: any, propertyName: string, descriptor: PropertyDescriptor) {
|
||||
// if (!target.constructor.__eventListeners) {
|
||||
// target.constructor.__eventListeners = [];
|
||||
// }
|
||||
// target.constructor.__eventListeners.push({
|
||||
// eventName,
|
||||
// method: propertyName,
|
||||
// });
|
||||
// return descriptor;
|
||||
// };
|
||||
// }
|
||||
|
||||
// export function EventPublisher(eventName: string) {
|
||||
// return function (target: any, propertyName: string, descriptor: PropertyDescriptor) {
|
||||
// if (!target.constructor.__eventPublishers) {
|
||||
// target.constructor.__eventPublishers = [];
|
||||
// }
|
||||
// target.constructor.__eventPublishers.push({
|
||||
// eventName,
|
||||
// method: propertyName,
|
||||
// });
|
||||
// return descriptor;
|
||||
// };
|
||||
// }
|
||||
// Bun-compatible decorators (hybrid approach)
|
||||
|
||||
/**
|
||||
* Handler decorator - marks a class as a handler
|
||||
* @param name Handler name for registration
|
||||
*/
|
||||
export function Handler(name: string) {
|
||||
return function <T extends { new (...args: any[]): {} }>(target: T, _context?: any) {
|
||||
// Store handler name on the constructor
|
||||
(target as any).__handlerName = name;
|
||||
(target as any).__needsAutoRegistration = true;
|
||||
|
||||
return target;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Operation decorator - marks a method as an operation
|
||||
* @param name Operation name
|
||||
*/
|
||||
export function Operation(name: string): any {
|
||||
return function (target: any, methodName: string, descriptor?: PropertyDescriptor): any {
|
||||
// Store metadata directly on the class constructor
|
||||
const constructor = target.constructor;
|
||||
|
||||
if (!constructor.__operations) {
|
||||
constructor.__operations = [];
|
||||
}
|
||||
constructor.__operations.push({
|
||||
name,
|
||||
method: methodName,
|
||||
});
|
||||
|
||||
return descriptor;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Queue schedule decorator - marks an operation as scheduled
|
||||
* @param cronPattern Cron pattern for scheduling
|
||||
* @param options Additional scheduling options
|
||||
*/
|
||||
export function QueueSchedule(
|
||||
cronPattern: string,
|
||||
options?: {
|
||||
priority?: number;
|
||||
immediately?: boolean;
|
||||
description?: string;
|
||||
}
|
||||
): any {
|
||||
return function (target: any, methodName: string, descriptor?: PropertyDescriptor): any {
|
||||
// Store metadata directly on the class constructor
|
||||
const constructor = target.constructor;
|
||||
|
||||
if (!constructor.__schedules) {
|
||||
constructor.__schedules = [];
|
||||
}
|
||||
constructor.__schedules.push({
|
||||
operation: methodName,
|
||||
cronPattern,
|
||||
...options,
|
||||
});
|
||||
|
||||
return descriptor;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Disabled decorator - marks a handler as disabled for auto-registration
|
||||
* Handlers marked with @Disabled() will be skipped during auto-registration
|
||||
*/
|
||||
export function Disabled() {
|
||||
return function <T extends { new (...args: any[]): {} }>(target: T, _context?: any) {
|
||||
// Store disabled flag on the constructor
|
||||
(target as any).__disabled = true;
|
||||
|
||||
return target;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Combined decorator for scheduled operations
|
||||
* Automatically creates both an operation and a schedule
|
||||
* @param name Operation name
|
||||
* @param cronPattern Cron pattern for scheduling
|
||||
* @param options Schedule options
|
||||
*/
|
||||
export function ScheduledOperation(
|
||||
name: string,
|
||||
cronPattern: string,
|
||||
options?: {
|
||||
priority?: number;
|
||||
immediately?: boolean;
|
||||
description?: string;
|
||||
}
|
||||
): any {
|
||||
return function (target: any, methodName: string, descriptor?: PropertyDescriptor): any {
|
||||
// Apply both decorators
|
||||
Operation(name)(target, methodName, descriptor);
|
||||
QueueSchedule(cronPattern, options)(target, methodName, descriptor);
|
||||
return descriptor;
|
||||
};
|
||||
}
|
||||
|
||||
// Future event decorators - commented for now
|
||||
// export function EventListener(eventName: string) {
|
||||
// return function (target: any, propertyName: string, descriptor: PropertyDescriptor) {
|
||||
// if (!target.constructor.__eventListeners) {
|
||||
// target.constructor.__eventListeners = [];
|
||||
// }
|
||||
// target.constructor.__eventListeners.push({
|
||||
// eventName,
|
||||
// method: propertyName,
|
||||
// });
|
||||
// return descriptor;
|
||||
// };
|
||||
// }
|
||||
|
||||
// export function EventPublisher(eventName: string) {
|
||||
// return function (target: any, propertyName: string, descriptor: PropertyDescriptor) {
|
||||
// if (!target.constructor.__eventPublishers) {
|
||||
// target.constructor.__eventPublishers = [];
|
||||
// }
|
||||
// target.constructor.__eventPublishers.push({
|
||||
// eventName,
|
||||
// method: propertyName,
|
||||
// });
|
||||
// return descriptor;
|
||||
// };
|
||||
// }
|
||||
|
|
|
|||
|
|
@ -1,31 +1,37 @@
|
|||
// Base handler classes
|
||||
export { BaseHandler, ScheduledHandler } from './base/BaseHandler';
|
||||
|
||||
// Handler registry (re-exported from types to avoid circular deps)
|
||||
export { handlerRegistry } from '@stock-bot/types';
|
||||
|
||||
// Types
|
||||
export type {
|
||||
ExecutionContext,
|
||||
IHandler,
|
||||
JobHandler,
|
||||
ScheduledJob,
|
||||
HandlerConfig,
|
||||
HandlerConfigWithSchedule,
|
||||
TypedJobHandler,
|
||||
HandlerMetadata,
|
||||
OperationMetadata,
|
||||
} from './types/types';
|
||||
|
||||
export type { IServiceContainer } from './types/service-container';
|
||||
|
||||
export { createJobHandler } from './types/types';
|
||||
|
||||
// Decorators
|
||||
export { Handler, Operation, QueueSchedule, ScheduledOperation, Disabled } from './decorators/decorators';
|
||||
|
||||
// Auto-registration utilities
|
||||
export { autoRegisterHandlers, createAutoHandlerRegistry } from './registry/auto-register';
|
||||
|
||||
// Future exports - commented for now
|
||||
// export { EventListener, EventPublisher } from './decorators/decorators';
|
||||
// Base handler classes
|
||||
export { BaseHandler, ScheduledHandler } from './base/BaseHandler';
|
||||
|
||||
// Handler registry (re-exported from types to avoid circular deps)
|
||||
export { handlerRegistry } from '@stock-bot/types';
|
||||
|
||||
// Types
|
||||
export type {
|
||||
ExecutionContext,
|
||||
IHandler,
|
||||
JobHandler,
|
||||
ScheduledJob,
|
||||
HandlerConfig,
|
||||
HandlerConfigWithSchedule,
|
||||
TypedJobHandler,
|
||||
HandlerMetadata,
|
||||
OperationMetadata,
|
||||
} from './types/types';
|
||||
|
||||
export type { IServiceContainer } from './types/service-container';
|
||||
|
||||
export { createJobHandler } from './types/types';
|
||||
|
||||
// Decorators
|
||||
export {
|
||||
Handler,
|
||||
Operation,
|
||||
QueueSchedule,
|
||||
ScheduledOperation,
|
||||
Disabled,
|
||||
} from './decorators/decorators';
|
||||
|
||||
// Auto-registration utilities
|
||||
export { autoRegisterHandlers, createAutoHandlerRegistry } from './registry/auto-register';
|
||||
|
||||
// Future exports - commented for now
|
||||
// export { EventListener, EventPublisher } from './decorators/decorators';
|
||||
|
|
|
|||
|
|
@ -1,191 +1,193 @@
|
|||
import { getLogger } from '@stock-bot/logger';
|
||||
import type { JobHandler, HandlerConfig, HandlerConfigWithSchedule, ScheduledJob } from '../types/types';
|
||||
|
||||
const logger = getLogger('handler-registry');
|
||||
|
||||
class HandlerRegistry {
|
||||
private handlers = new Map<string, HandlerConfig>();
|
||||
private handlerSchedules = new Map<string, ScheduledJob[]>();
|
||||
|
||||
/**
|
||||
* Register a handler with its operations (simple config)
|
||||
*/
|
||||
register(handlerName: string, config: HandlerConfig): void {
|
||||
logger.info(`Registering handler: ${handlerName}`, {
|
||||
operations: Object.keys(config),
|
||||
});
|
||||
|
||||
this.handlers.set(handlerName, config);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a handler with operations and scheduled jobs (full config)
|
||||
*/
|
||||
registerWithSchedule(config: HandlerConfigWithSchedule): void {
|
||||
logger.info(`Registering handler with schedule: ${config.name}`, {
|
||||
operations: Object.keys(config.operations),
|
||||
scheduledJobs: config.scheduledJobs?.length || 0,
|
||||
});
|
||||
|
||||
this.handlers.set(config.name, config.operations);
|
||||
|
||||
if (config.scheduledJobs && config.scheduledJobs.length > 0) {
|
||||
this.handlerSchedules.set(config.name, config.scheduledJobs);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a handler for a specific handler and operation
|
||||
*/
|
||||
getHandler(handler: string, operation: string): JobHandler | null {
|
||||
const handlerConfig = this.handlers.get(handler);
|
||||
if (!handlerConfig) {
|
||||
logger.warn(`Handler not found: ${handler}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
const jobHandler = handlerConfig[operation];
|
||||
if (!jobHandler) {
|
||||
logger.warn(`Operation not found: ${handler}:${operation}`, {
|
||||
availableOperations: Object.keys(handlerConfig),
|
||||
});
|
||||
return null;
|
||||
}
|
||||
|
||||
return jobHandler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all scheduled jobs from all handlers
|
||||
*/
|
||||
getAllScheduledJobs(): Array<{ handler: string; job: ScheduledJob }> {
|
||||
const allJobs: Array<{ handler: string; job: ScheduledJob }> = [];
|
||||
|
||||
for (const [handlerName, jobs] of this.handlerSchedules) {
|
||||
for (const job of jobs) {
|
||||
allJobs.push({
|
||||
handler: handlerName,
|
||||
job,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return allJobs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get scheduled jobs for a specific handler
|
||||
*/
|
||||
getScheduledJobs(handler: string): ScheduledJob[] {
|
||||
return this.handlerSchedules.get(handler) || [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a handler has scheduled jobs
|
||||
*/
|
||||
hasScheduledJobs(handler: string): boolean {
|
||||
return this.handlerSchedules.has(handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered handlers with their configurations
|
||||
*/
|
||||
getHandlerConfigs(): Array<{ name: string; operations: string[]; scheduledJobs: number }> {
|
||||
return Array.from(this.handlers.keys()).map(name => ({
|
||||
name,
|
||||
operations: Object.keys(this.handlers.get(name) || {}),
|
||||
scheduledJobs: this.handlerSchedules.get(name)?.length || 0,
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all handlers with their full configurations for queue manager registration
|
||||
*/
|
||||
getAllHandlers(): Map<string, { operations: HandlerConfig; scheduledJobs?: ScheduledJob[] }> {
|
||||
const result = new Map<
|
||||
string,
|
||||
{ operations: HandlerConfig; scheduledJobs?: ScheduledJob[] }
|
||||
>();
|
||||
|
||||
for (const [name, operations] of this.handlers) {
|
||||
const scheduledJobs = this.handlerSchedules.get(name);
|
||||
result.set(name, {
|
||||
operations,
|
||||
scheduledJobs,
|
||||
});
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered handlers
|
||||
*/
|
||||
getHandlers(): string[] {
|
||||
return Array.from(this.handlers.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get operations for a specific handler
|
||||
*/
|
||||
getOperations(handler: string): string[] {
|
||||
const handlerConfig = this.handlers.get(handler);
|
||||
return handlerConfig ? Object.keys(handlerConfig) : [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a handler exists
|
||||
*/
|
||||
hasHandler(handler: string): boolean {
|
||||
return this.handlers.has(handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a handler has a specific operation
|
||||
*/
|
||||
hasOperation(handler: string, operation: string): boolean {
|
||||
const handlerConfig = this.handlers.get(handler);
|
||||
return handlerConfig ? operation in handlerConfig : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a handler
|
||||
*/
|
||||
unregister(handler: string): boolean {
|
||||
this.handlerSchedules.delete(handler);
|
||||
return this.handlers.delete(handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all handlers
|
||||
*/
|
||||
clear(): void {
|
||||
this.handlers.clear();
|
||||
this.handlerSchedules.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get registry statistics
|
||||
*/
|
||||
getStats(): { handlers: number; totalOperations: number; totalScheduledJobs: number } {
|
||||
let totalOperations = 0;
|
||||
let totalScheduledJobs = 0;
|
||||
|
||||
for (const config of this.handlers.values()) {
|
||||
totalOperations += Object.keys(config).length;
|
||||
}
|
||||
|
||||
for (const jobs of this.handlerSchedules.values()) {
|
||||
totalScheduledJobs += jobs.length;
|
||||
}
|
||||
|
||||
return {
|
||||
handlers: this.handlers.size,
|
||||
totalOperations,
|
||||
totalScheduledJobs,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export const handlerRegistry = new HandlerRegistry();
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import type {
|
||||
HandlerConfig,
|
||||
HandlerConfigWithSchedule,
|
||||
JobHandler,
|
||||
ScheduledJob,
|
||||
} from '../types/types';
|
||||
|
||||
const logger = getLogger('handler-registry');
|
||||
|
||||
class HandlerRegistry {
|
||||
private handlers = new Map<string, HandlerConfig>();
|
||||
private handlerSchedules = new Map<string, ScheduledJob[]>();
|
||||
|
||||
/**
|
||||
* Register a handler with its operations (simple config)
|
||||
*/
|
||||
register(handlerName: string, config: HandlerConfig): void {
|
||||
logger.info(`Registering handler: ${handlerName}`, {
|
||||
operations: Object.keys(config),
|
||||
});
|
||||
|
||||
this.handlers.set(handlerName, config);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a handler with operations and scheduled jobs (full config)
|
||||
*/
|
||||
registerWithSchedule(config: HandlerConfigWithSchedule): void {
|
||||
logger.info(`Registering handler with schedule: ${config.name}`, {
|
||||
operations: Object.keys(config.operations),
|
||||
scheduledJobs: config.scheduledJobs?.length || 0,
|
||||
});
|
||||
|
||||
this.handlers.set(config.name, config.operations);
|
||||
|
||||
if (config.scheduledJobs && config.scheduledJobs.length > 0) {
|
||||
this.handlerSchedules.set(config.name, config.scheduledJobs);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a handler for a specific handler and operation
|
||||
*/
|
||||
getHandler(handler: string, operation: string): JobHandler | null {
|
||||
const handlerConfig = this.handlers.get(handler);
|
||||
if (!handlerConfig) {
|
||||
logger.warn(`Handler not found: ${handler}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
const jobHandler = handlerConfig[operation];
|
||||
if (!jobHandler) {
|
||||
logger.warn(`Operation not found: ${handler}:${operation}`, {
|
||||
availableOperations: Object.keys(handlerConfig),
|
||||
});
|
||||
return null;
|
||||
}
|
||||
|
||||
return jobHandler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all scheduled jobs from all handlers
|
||||
*/
|
||||
getAllScheduledJobs(): Array<{ handler: string; job: ScheduledJob }> {
|
||||
const allJobs: Array<{ handler: string; job: ScheduledJob }> = [];
|
||||
|
||||
for (const [handlerName, jobs] of this.handlerSchedules) {
|
||||
for (const job of jobs) {
|
||||
allJobs.push({
|
||||
handler: handlerName,
|
||||
job,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return allJobs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get scheduled jobs for a specific handler
|
||||
*/
|
||||
getScheduledJobs(handler: string): ScheduledJob[] {
|
||||
return this.handlerSchedules.get(handler) || [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a handler has scheduled jobs
|
||||
*/
|
||||
hasScheduledJobs(handler: string): boolean {
|
||||
return this.handlerSchedules.has(handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered handlers with their configurations
|
||||
*/
|
||||
getHandlerConfigs(): Array<{ name: string; operations: string[]; scheduledJobs: number }> {
|
||||
return Array.from(this.handlers.keys()).map(name => ({
|
||||
name,
|
||||
operations: Object.keys(this.handlers.get(name) || {}),
|
||||
scheduledJobs: this.handlerSchedules.get(name)?.length || 0,
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all handlers with their full configurations for queue manager registration
|
||||
*/
|
||||
getAllHandlers(): Map<string, { operations: HandlerConfig; scheduledJobs?: ScheduledJob[] }> {
|
||||
const result = new Map<string, { operations: HandlerConfig; scheduledJobs?: ScheduledJob[] }>();
|
||||
|
||||
for (const [name, operations] of this.handlers) {
|
||||
const scheduledJobs = this.handlerSchedules.get(name);
|
||||
result.set(name, {
|
||||
operations,
|
||||
scheduledJobs,
|
||||
});
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered handlers
|
||||
*/
|
||||
getHandlers(): string[] {
|
||||
return Array.from(this.handlers.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get operations for a specific handler
|
||||
*/
|
||||
getOperations(handler: string): string[] {
|
||||
const handlerConfig = this.handlers.get(handler);
|
||||
return handlerConfig ? Object.keys(handlerConfig) : [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a handler exists
|
||||
*/
|
||||
hasHandler(handler: string): boolean {
|
||||
return this.handlers.has(handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a handler has a specific operation
|
||||
*/
|
||||
hasOperation(handler: string, operation: string): boolean {
|
||||
const handlerConfig = this.handlers.get(handler);
|
||||
return handlerConfig ? operation in handlerConfig : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a handler
|
||||
*/
|
||||
unregister(handler: string): boolean {
|
||||
this.handlerSchedules.delete(handler);
|
||||
return this.handlers.delete(handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all handlers
|
||||
*/
|
||||
clear(): void {
|
||||
this.handlers.clear();
|
||||
this.handlerSchedules.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get registry statistics
|
||||
*/
|
||||
getStats(): { handlers: number; totalOperations: number; totalScheduledJobs: number } {
|
||||
let totalOperations = 0;
|
||||
let totalScheduledJobs = 0;
|
||||
|
||||
for (const config of this.handlers.values()) {
|
||||
totalOperations += Object.keys(config).length;
|
||||
}
|
||||
|
||||
for (const jobs of this.handlerSchedules.values()) {
|
||||
totalScheduledJobs += jobs.length;
|
||||
}
|
||||
|
||||
return {
|
||||
handlers: this.handlers.size,
|
||||
totalOperations,
|
||||
totalScheduledJobs,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export const handlerRegistry = new HandlerRegistry();
|
||||
|
|
|
|||
|
|
@ -1,180 +1,188 @@
|
|||
/**
|
||||
* Auto-registration utilities for handlers
|
||||
* Automatically discovers and registers handlers based on file patterns
|
||||
*/
|
||||
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import type { IServiceContainer } from '../types/service-container';
|
||||
import { BaseHandler } from '../base/BaseHandler';
|
||||
import { readdirSync, statSync } from 'fs';
|
||||
import { join, relative } from 'path';
|
||||
|
||||
const logger = getLogger('handler-auto-register');
|
||||
|
||||
/**
|
||||
* Recursively find all handler files in a directory
|
||||
*/
|
||||
function findHandlerFiles(dir: string, pattern = '.handler.'): string[] {
|
||||
const files: string[] = [];
|
||||
|
||||
function scan(currentDir: string) {
|
||||
const entries = readdirSync(currentDir);
|
||||
|
||||
for (const entry of entries) {
|
||||
const fullPath = join(currentDir, entry);
|
||||
const stat = statSync(fullPath);
|
||||
|
||||
if (stat.isDirectory() && !entry.startsWith('.') && entry !== 'node_modules') {
|
||||
scan(fullPath);
|
||||
} else if (stat.isFile() && entry.includes(pattern) && entry.endsWith('.ts')) {
|
||||
files.push(fullPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
scan(dir);
|
||||
return files;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract handler classes from a module
|
||||
*/
|
||||
function extractHandlerClasses(module: any): Array<new (services: IServiceContainer) => BaseHandler> {
|
||||
const handlers: Array<new (services: IServiceContainer) => BaseHandler> = [];
|
||||
|
||||
for (const key of Object.keys(module)) {
|
||||
const exported = module[key];
|
||||
|
||||
// Check if it's a class that extends BaseHandler
|
||||
if (
|
||||
typeof exported === 'function' &&
|
||||
exported.prototype &&
|
||||
exported.prototype instanceof BaseHandler
|
||||
) {
|
||||
handlers.push(exported);
|
||||
}
|
||||
}
|
||||
|
||||
return handlers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto-register all handlers in a directory
|
||||
* @param directory The directory to scan for handlers
|
||||
* @param services The service container to inject into handlers
|
||||
* @param options Configuration options
|
||||
*/
|
||||
export async function autoRegisterHandlers(
|
||||
directory: string,
|
||||
services: IServiceContainer,
|
||||
options: {
|
||||
pattern?: string;
|
||||
exclude?: string[];
|
||||
dryRun?: boolean;
|
||||
} = {}
|
||||
): Promise<{ registered: string[]; failed: string[] }> {
|
||||
const { pattern = '.handler.', exclude = [], dryRun = false } = options;
|
||||
const registered: string[] = [];
|
||||
const failed: string[] = [];
|
||||
|
||||
try {
|
||||
logger.info('Starting auto-registration of handlers', { directory, pattern });
|
||||
|
||||
// Find all handler files
|
||||
const handlerFiles = findHandlerFiles(directory, pattern);
|
||||
logger.debug(`Found ${handlerFiles.length} handler files`, { files: handlerFiles });
|
||||
|
||||
// Process each handler file
|
||||
for (const file of handlerFiles) {
|
||||
const relativePath = relative(directory, file);
|
||||
|
||||
// Skip excluded files
|
||||
if (exclude.some(ex => relativePath.includes(ex))) {
|
||||
logger.debug(`Skipping excluded file: ${relativePath}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
// Import the module
|
||||
const module = await import(file);
|
||||
const handlerClasses = extractHandlerClasses(module);
|
||||
|
||||
if (handlerClasses.length === 0) {
|
||||
logger.warn(`No handler classes found in ${relativePath}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Register each handler class
|
||||
for (const HandlerClass of handlerClasses) {
|
||||
const handlerName = HandlerClass.name;
|
||||
|
||||
// Check if handler is disabled
|
||||
if ((HandlerClass as any).__disabled) {
|
||||
logger.info(`Skipping disabled handler: ${handlerName} from ${relativePath}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (dryRun) {
|
||||
logger.info(`[DRY RUN] Would register handler: ${handlerName} from ${relativePath}`);
|
||||
registered.push(handlerName);
|
||||
} else {
|
||||
logger.info(`Registering handler: ${handlerName} from ${relativePath}`);
|
||||
|
||||
// Create instance and register
|
||||
const handler = new HandlerClass(services);
|
||||
handler.register();
|
||||
|
||||
registered.push(handlerName);
|
||||
logger.info(`Successfully registered handler: ${handlerName}`);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to process handler file: ${relativePath}`, { error });
|
||||
failed.push(relativePath);
|
||||
}
|
||||
}
|
||||
|
||||
logger.info('Auto-registration complete', {
|
||||
totalFiles: handlerFiles.length,
|
||||
registered: registered.length,
|
||||
failed: failed.length
|
||||
});
|
||||
|
||||
return { registered, failed };
|
||||
} catch (error) {
|
||||
logger.error('Auto-registration failed', { error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a handler registry that auto-discovers handlers
|
||||
*/
|
||||
export function createAutoHandlerRegistry(services: IServiceContainer) {
|
||||
return {
|
||||
/**
|
||||
* Register all handlers from a directory
|
||||
*/
|
||||
async registerDirectory(directory: string, options?: Parameters<typeof autoRegisterHandlers>[2]) {
|
||||
return autoRegisterHandlers(directory, services, options);
|
||||
},
|
||||
|
||||
/**
|
||||
* Register handlers from multiple directories
|
||||
*/
|
||||
async registerDirectories(directories: string[], options?: Parameters<typeof autoRegisterHandlers>[2]) {
|
||||
const results = {
|
||||
registered: [] as string[],
|
||||
failed: [] as string[]
|
||||
};
|
||||
|
||||
for (const dir of directories) {
|
||||
const result = await autoRegisterHandlers(dir, services, options);
|
||||
results.registered.push(...result.registered);
|
||||
results.failed.push(...result.failed);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Auto-registration utilities for handlers
|
||||
* Automatically discovers and registers handlers based on file patterns
|
||||
*/
|
||||
|
||||
import { readdirSync, statSync } from 'fs';
|
||||
import { join, relative } from 'path';
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import { BaseHandler } from '../base/BaseHandler';
|
||||
import type { IServiceContainer } from '../types/service-container';
|
||||
|
||||
const logger = getLogger('handler-auto-register');
|
||||
|
||||
/**
|
||||
* Recursively find all handler files in a directory
|
||||
*/
|
||||
function findHandlerFiles(dir: string, pattern = '.handler.'): string[] {
|
||||
const files: string[] = [];
|
||||
|
||||
function scan(currentDir: string) {
|
||||
const entries = readdirSync(currentDir);
|
||||
|
||||
for (const entry of entries) {
|
||||
const fullPath = join(currentDir, entry);
|
||||
const stat = statSync(fullPath);
|
||||
|
||||
if (stat.isDirectory() && !entry.startsWith('.') && entry !== 'node_modules') {
|
||||
scan(fullPath);
|
||||
} else if (stat.isFile() && entry.includes(pattern) && entry.endsWith('.ts')) {
|
||||
files.push(fullPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
scan(dir);
|
||||
return files;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract handler classes from a module
|
||||
*/
|
||||
function extractHandlerClasses(
|
||||
module: any
|
||||
): Array<new (services: IServiceContainer) => BaseHandler> {
|
||||
const handlers: Array<new (services: IServiceContainer) => BaseHandler> = [];
|
||||
|
||||
for (const key of Object.keys(module)) {
|
||||
const exported = module[key];
|
||||
|
||||
// Check if it's a class that extends BaseHandler
|
||||
if (
|
||||
typeof exported === 'function' &&
|
||||
exported.prototype &&
|
||||
exported.prototype instanceof BaseHandler
|
||||
) {
|
||||
handlers.push(exported);
|
||||
}
|
||||
}
|
||||
|
||||
return handlers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto-register all handlers in a directory
|
||||
* @param directory The directory to scan for handlers
|
||||
* @param services The service container to inject into handlers
|
||||
* @param options Configuration options
|
||||
*/
|
||||
export async function autoRegisterHandlers(
|
||||
directory: string,
|
||||
services: IServiceContainer,
|
||||
options: {
|
||||
pattern?: string;
|
||||
exclude?: string[];
|
||||
dryRun?: boolean;
|
||||
} = {}
|
||||
): Promise<{ registered: string[]; failed: string[] }> {
|
||||
const { pattern = '.handler.', exclude = [], dryRun = false } = options;
|
||||
const registered: string[] = [];
|
||||
const failed: string[] = [];
|
||||
|
||||
try {
|
||||
logger.info('Starting auto-registration of handlers', { directory, pattern });
|
||||
|
||||
// Find all handler files
|
||||
const handlerFiles = findHandlerFiles(directory, pattern);
|
||||
logger.debug(`Found ${handlerFiles.length} handler files`, { files: handlerFiles });
|
||||
|
||||
// Process each handler file
|
||||
for (const file of handlerFiles) {
|
||||
const relativePath = relative(directory, file);
|
||||
|
||||
// Skip excluded files
|
||||
if (exclude.some(ex => relativePath.includes(ex))) {
|
||||
logger.debug(`Skipping excluded file: ${relativePath}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
// Import the module
|
||||
const module = await import(file);
|
||||
const handlerClasses = extractHandlerClasses(module);
|
||||
|
||||
if (handlerClasses.length === 0) {
|
||||
logger.warn(`No handler classes found in ${relativePath}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Register each handler class
|
||||
for (const HandlerClass of handlerClasses) {
|
||||
const handlerName = HandlerClass.name;
|
||||
|
||||
// Check if handler is disabled
|
||||
if ((HandlerClass as any).__disabled) {
|
||||
logger.info(`Skipping disabled handler: ${handlerName} from ${relativePath}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (dryRun) {
|
||||
logger.info(`[DRY RUN] Would register handler: ${handlerName} from ${relativePath}`);
|
||||
registered.push(handlerName);
|
||||
} else {
|
||||
logger.info(`Registering handler: ${handlerName} from ${relativePath}`);
|
||||
|
||||
// Create instance and register
|
||||
const handler = new HandlerClass(services);
|
||||
handler.register();
|
||||
|
||||
registered.push(handlerName);
|
||||
logger.info(`Successfully registered handler: ${handlerName}`);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to process handler file: ${relativePath}`, { error });
|
||||
failed.push(relativePath);
|
||||
}
|
||||
}
|
||||
|
||||
logger.info('Auto-registration complete', {
|
||||
totalFiles: handlerFiles.length,
|
||||
registered: registered.length,
|
||||
failed: failed.length,
|
||||
});
|
||||
|
||||
return { registered, failed };
|
||||
} catch (error) {
|
||||
logger.error('Auto-registration failed', { error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a handler registry that auto-discovers handlers
|
||||
*/
|
||||
export function createAutoHandlerRegistry(services: IServiceContainer) {
|
||||
return {
|
||||
/**
|
||||
* Register all handlers from a directory
|
||||
*/
|
||||
async registerDirectory(
|
||||
directory: string,
|
||||
options?: Parameters<typeof autoRegisterHandlers>[2]
|
||||
) {
|
||||
return autoRegisterHandlers(directory, services, options);
|
||||
},
|
||||
|
||||
/**
|
||||
* Register handlers from multiple directories
|
||||
*/
|
||||
async registerDirectories(
|
||||
directories: string[],
|
||||
options?: Parameters<typeof autoRegisterHandlers>[2]
|
||||
) {
|
||||
const results = {
|
||||
registered: [] as string[],
|
||||
failed: [] as string[],
|
||||
};
|
||||
|
||||
for (const dir of directories) {
|
||||
const result = await autoRegisterHandlers(dir, services, options);
|
||||
results.registered.push(...result.registered);
|
||||
results.failed.push(...result.failed);
|
||||
}
|
||||
|
||||
return results;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,27 +1,27 @@
|
|||
/**
|
||||
* Universal Service Container for Handlers
|
||||
* Simple, comprehensive container with all services available
|
||||
*/
|
||||
|
||||
import type { ProxyManager } from '@stock-bot/proxy';
|
||||
|
||||
/**
|
||||
* Universal service container with all common services
|
||||
* Designed to work across different service contexts (data-ingestion, processing, etc.)
|
||||
*/
|
||||
export interface IServiceContainer {
|
||||
// Core infrastructure
|
||||
readonly logger: any; // Logger instance
|
||||
readonly cache?: any; // Cache provider (Redis/Dragonfly) - optional
|
||||
readonly queue?: any; // Queue manager (BullMQ) - optional
|
||||
readonly proxy?: ProxyManager; // Proxy manager service - optional (depends on cache)
|
||||
readonly browser?: any; // Browser automation (Playwright)
|
||||
|
||||
// Database clients - all optional to support selective enabling
|
||||
readonly mongodb?: any; // MongoDB client
|
||||
readonly postgres?: any; // PostgreSQL client
|
||||
readonly questdb?: any; // QuestDB client (time-series)
|
||||
|
||||
// Optional extensions for future use
|
||||
readonly custom?: Record<string, any>;
|
||||
}
|
||||
/**
|
||||
* Universal Service Container for Handlers
|
||||
* Simple, comprehensive container with all services available
|
||||
*/
|
||||
|
||||
import type { ProxyManager } from '@stock-bot/proxy';
|
||||
|
||||
/**
|
||||
* Universal service container with all common services
|
||||
* Designed to work across different service contexts (data-ingestion, processing, etc.)
|
||||
*/
|
||||
export interface IServiceContainer {
|
||||
// Core infrastructure
|
||||
readonly logger: any; // Logger instance
|
||||
readonly cache?: any; // Cache provider (Redis/Dragonfly) - optional
|
||||
readonly queue?: any; // Queue manager (BullMQ) - optional
|
||||
readonly proxy?: ProxyManager; // Proxy manager service - optional (depends on cache)
|
||||
readonly browser?: any; // Browser automation (Playwright)
|
||||
|
||||
// Database clients - all optional to support selective enabling
|
||||
readonly mongodb?: any; // MongoDB client
|
||||
readonly postgres?: any; // PostgreSQL client
|
||||
readonly questdb?: any; // QuestDB client (time-series)
|
||||
|
||||
// Optional extensions for future use
|
||||
readonly custom?: Record<string, any>;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,14 +1,14 @@
|
|||
// Re-export all handler types from the shared types package
|
||||
export type {
|
||||
ExecutionContext,
|
||||
HandlerConfig,
|
||||
HandlerConfigWithSchedule,
|
||||
HandlerMetadata,
|
||||
IHandler,
|
||||
JobHandler,
|
||||
OperationMetadata,
|
||||
ScheduledJob,
|
||||
TypedJobHandler,
|
||||
} from '@stock-bot/types';
|
||||
|
||||
export { createJobHandler } from '@stock-bot/types';
|
||||
// Re-export all handler types from the shared types package
|
||||
export type {
|
||||
ExecutionContext,
|
||||
HandlerConfig,
|
||||
HandlerConfigWithSchedule,
|
||||
HandlerMetadata,
|
||||
IHandler,
|
||||
JobHandler,
|
||||
OperationMetadata,
|
||||
ScheduledJob,
|
||||
TypedJobHandler,
|
||||
} from '@stock-bot/types';
|
||||
|
||||
export { createJobHandler } from '@stock-bot/types';
|
||||
|
|
|
|||
|
|
@ -1,15 +1,15 @@
|
|||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../config" },
|
||||
{ "path": "../logger" },
|
||||
{ "path": "../di" },
|
||||
{ "path": "../../utils" }
|
||||
]
|
||||
}
|
||||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../config" },
|
||||
{ "path": "../logger" },
|
||||
{ "path": "../di" },
|
||||
{ "path": "../../utils" }
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,12 +58,12 @@ function createDestination(
|
|||
// Console: In-process pretty stream for dev (fast shutdown)
|
||||
if (config.logConsole && config.environment !== 'production') {
|
||||
const prettyStream = pretty({
|
||||
sync: true, // IMPORTANT: Make async to prevent blocking the event loop
|
||||
sync: true, // IMPORTANT: Make async to prevent blocking the event loop
|
||||
colorize: true,
|
||||
translateTime: 'yyyy-mm-dd HH:MM:ss.l',
|
||||
messageFormat: '[{service}{childName}] {msg}',
|
||||
singleLine: false, // This was causing logs to be on one line
|
||||
hideObject: false, // Hide metadata objects
|
||||
singleLine: false, // This was causing logs to be on one line
|
||||
hideObject: false, // Hide metadata objects
|
||||
ignore: 'pid,hostname,service,environment,version,childName',
|
||||
errorLikeObjectKeys: ['err', 'error'],
|
||||
errorProps: 'message,stack,name,code',
|
||||
|
|
@ -193,7 +193,6 @@ export class Logger {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
// Simple log level methods
|
||||
trace(message: string | object, metadata?: LogMetadata): void {
|
||||
this.log('trace', message, metadata);
|
||||
|
|
|
|||
|
|
@ -6,6 +6,5 @@
|
|||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
]
|
||||
"references": []
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,9 +3,9 @@
|
|||
* Types for strategy backtesting and analysis
|
||||
*/
|
||||
|
||||
import type { TradeExecution, TradePerformance } from './trading';
|
||||
import type { PortfolioAnalysis } from './portfolio';
|
||||
import type { RiskMetrics, DrawdownAnalysis } from './risk-metrics';
|
||||
import type { DrawdownAnalysis, RiskMetrics } from './risk-metrics';
|
||||
import type { TradeExecution, TradePerformance } from './trading';
|
||||
|
||||
/**
|
||||
* Backtesting results
|
||||
|
|
@ -31,4 +31,4 @@ export interface BacktestResults {
|
|||
initialCapital: number;
|
||||
/** Final value */
|
||||
finalValue: number;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ export interface BalanceSheet {
|
|||
period: string;
|
||||
/** Currency */
|
||||
currency: string;
|
||||
|
||||
|
||||
// Assets
|
||||
/** Total current assets */
|
||||
totalCurrentAssets: number;
|
||||
|
|
@ -29,7 +29,7 @@ export interface BalanceSheet {
|
|||
prepaidExpenses?: number;
|
||||
/** Other current assets */
|
||||
otherCurrentAssets?: number;
|
||||
|
||||
|
||||
/** Total non-current assets */
|
||||
totalNonCurrentAssets: number;
|
||||
/** Property, plant & equipment (net) */
|
||||
|
|
@ -42,10 +42,10 @@ export interface BalanceSheet {
|
|||
longTermInvestments?: number;
|
||||
/** Other non-current assets */
|
||||
otherNonCurrentAssets?: number;
|
||||
|
||||
|
||||
/** Total assets */
|
||||
totalAssets: number;
|
||||
|
||||
|
||||
// Liabilities
|
||||
/** Total current liabilities */
|
||||
totalCurrentLiabilities: number;
|
||||
|
|
@ -57,7 +57,7 @@ export interface BalanceSheet {
|
|||
accruedLiabilities?: number;
|
||||
/** Other current liabilities */
|
||||
otherCurrentLiabilities?: number;
|
||||
|
||||
|
||||
/** Total non-current liabilities */
|
||||
totalNonCurrentLiabilities: number;
|
||||
/** Long-term debt */
|
||||
|
|
@ -66,10 +66,10 @@ export interface BalanceSheet {
|
|||
deferredTaxLiabilities?: number;
|
||||
/** Other non-current liabilities */
|
||||
otherNonCurrentLiabilities?: number;
|
||||
|
||||
|
||||
/** Total liabilities */
|
||||
totalLiabilities: number;
|
||||
|
||||
|
||||
// Equity
|
||||
/** Total stockholders' equity */
|
||||
totalStockholdersEquity: number;
|
||||
|
|
@ -95,14 +95,14 @@ export interface IncomeStatement {
|
|||
period: string;
|
||||
/** Currency */
|
||||
currency: string;
|
||||
|
||||
|
||||
/** Total revenue/net sales */
|
||||
totalRevenue: number;
|
||||
/** Cost of goods sold */
|
||||
costOfGoodsSold: number;
|
||||
/** Gross profit */
|
||||
grossProfit: number;
|
||||
|
||||
|
||||
/** Operating expenses */
|
||||
operatingExpenses: number;
|
||||
/** Research and development */
|
||||
|
|
@ -113,24 +113,24 @@ export interface IncomeStatement {
|
|||
depreciationAmortization?: number;
|
||||
/** Other operating expenses */
|
||||
otherOperatingExpenses?: number;
|
||||
|
||||
|
||||
/** Operating income */
|
||||
operatingIncome: number;
|
||||
|
||||
|
||||
/** Interest income */
|
||||
interestIncome?: number;
|
||||
/** Interest expense */
|
||||
interestExpense?: number;
|
||||
/** Other income/expense */
|
||||
otherIncomeExpense?: number;
|
||||
|
||||
|
||||
/** Income before taxes */
|
||||
incomeBeforeTaxes: number;
|
||||
/** Income tax expense */
|
||||
incomeTaxExpense: number;
|
||||
/** Net income */
|
||||
netIncome: number;
|
||||
|
||||
|
||||
/** Earnings per share (basic) */
|
||||
earningsPerShareBasic: number;
|
||||
/** Earnings per share (diluted) */
|
||||
|
|
@ -151,7 +151,7 @@ export interface CashFlowStatement {
|
|||
period: string;
|
||||
/** Currency */
|
||||
currency: string;
|
||||
|
||||
|
||||
// Operating Activities
|
||||
/** Net income */
|
||||
netIncome: number;
|
||||
|
|
@ -163,8 +163,8 @@ export interface CashFlowStatement {
|
|||
otherOperatingActivities?: number;
|
||||
/** Net cash from operating activities */
|
||||
netCashFromOperatingActivities: number;
|
||||
|
||||
// Investing Activities
|
||||
|
||||
// Investing Activities
|
||||
/** Capital expenditures */
|
||||
capitalExpenditures: number;
|
||||
/** Acquisitions */
|
||||
|
|
@ -175,7 +175,7 @@ export interface CashFlowStatement {
|
|||
otherInvestingActivities?: number;
|
||||
/** Net cash from investing activities */
|
||||
netCashFromInvestingActivities: number;
|
||||
|
||||
|
||||
// Financing Activities
|
||||
/** Debt issuance/repayment */
|
||||
debtIssuanceRepayment?: number;
|
||||
|
|
@ -187,11 +187,11 @@ export interface CashFlowStatement {
|
|||
otherFinancingActivities?: number;
|
||||
/** Net cash from financing activities */
|
||||
netCashFromFinancingActivities: number;
|
||||
|
||||
|
||||
/** Net change in cash */
|
||||
netChangeInCash: number;
|
||||
/** Cash at beginning of period */
|
||||
cashAtBeginningOfPeriod: number;
|
||||
/** Cash at end of period */
|
||||
cashAtEndOfPeriod: number;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,111 +1,119 @@
|
|||
/**
|
||||
* Handler Registry - Lightweight registry for queue handlers
|
||||
* Moved here to avoid circular dependencies between handlers and queue
|
||||
*/
|
||||
|
||||
import type { JobHandler, HandlerConfig, HandlerConfigWithSchedule, ScheduledJob } from './handlers';
|
||||
|
||||
class HandlerRegistry {
|
||||
private handlers = new Map<string, HandlerConfig>();
|
||||
private handlerSchedules = new Map<string, ScheduledJob[]>();
|
||||
|
||||
/**
|
||||
* Register a handler with its operations (simple config)
|
||||
*/
|
||||
register(handlerName: string, config: HandlerConfig): void {
|
||||
console.log(`Registering handler: ${handlerName}`, {
|
||||
operations: Object.keys(config),
|
||||
});
|
||||
|
||||
this.handlers.set(handlerName, config);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a handler with scheduled jobs (enhanced config)
|
||||
*/
|
||||
registerWithSchedule(config: HandlerConfigWithSchedule): void {
|
||||
console.log(`Registering handler with schedule: ${config.name}`, {
|
||||
operations: Object.keys(config.operations),
|
||||
scheduledJobs: config.scheduledJobs?.length || 0,
|
||||
});
|
||||
|
||||
this.handlers.set(config.name, config.operations);
|
||||
|
||||
if (config.scheduledJobs && config.scheduledJobs.length > 0) {
|
||||
this.handlerSchedules.set(config.name, config.scheduledJobs);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific handler's configuration
|
||||
*/
|
||||
getHandler(handlerName: string): HandlerConfig | undefined {
|
||||
return this.handlers.get(handlerName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered handlers
|
||||
*/
|
||||
getAllHandlers(): Map<string, HandlerConfig> {
|
||||
return new Map(this.handlers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get scheduled jobs for a handler
|
||||
*/
|
||||
getScheduledJobs(handlerName: string): ScheduledJob[] {
|
||||
return this.handlerSchedules.get(handlerName) || [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all handlers with their scheduled jobs
|
||||
*/
|
||||
getAllHandlersWithSchedule(): Map<string, { operations: HandlerConfig; scheduledJobs: ScheduledJob[] }> {
|
||||
const result = new Map<string, { operations: HandlerConfig; scheduledJobs: ScheduledJob[] }>();
|
||||
|
||||
for (const [name, operations] of this.handlers) {
|
||||
result.set(name, {
|
||||
operations,
|
||||
scheduledJobs: this.handlerSchedules.get(name) || []
|
||||
});
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific operation from a handler
|
||||
*/
|
||||
getOperation(handlerName: string, operationName: string): JobHandler | undefined {
|
||||
const handler = this.handlers.get(handlerName);
|
||||
if (!handler) {
|
||||
return undefined;
|
||||
}
|
||||
return handler[operationName];
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a handler is registered
|
||||
*/
|
||||
hasHandler(handlerName: string): boolean {
|
||||
return this.handlers.has(handlerName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of all registered handler names
|
||||
*/
|
||||
getHandlerNames(): string[] {
|
||||
return Array.from(this.handlers.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all registrations (useful for testing)
|
||||
*/
|
||||
clear(): void {
|
||||
this.handlers.clear();
|
||||
this.handlerSchedules.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export const handlerRegistry = new HandlerRegistry();
|
||||
/**
|
||||
* Handler Registry - Lightweight registry for queue handlers
|
||||
* Moved here to avoid circular dependencies between handlers and queue
|
||||
*/
|
||||
|
||||
import type {
|
||||
HandlerConfig,
|
||||
HandlerConfigWithSchedule,
|
||||
JobHandler,
|
||||
ScheduledJob,
|
||||
} from './handlers';
|
||||
|
||||
class HandlerRegistry {
|
||||
private handlers = new Map<string, HandlerConfig>();
|
||||
private handlerSchedules = new Map<string, ScheduledJob[]>();
|
||||
|
||||
/**
|
||||
* Register a handler with its operations (simple config)
|
||||
*/
|
||||
register(handlerName: string, config: HandlerConfig): void {
|
||||
console.log(`Registering handler: ${handlerName}`, {
|
||||
operations: Object.keys(config),
|
||||
});
|
||||
|
||||
this.handlers.set(handlerName, config);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a handler with scheduled jobs (enhanced config)
|
||||
*/
|
||||
registerWithSchedule(config: HandlerConfigWithSchedule): void {
|
||||
console.log(`Registering handler with schedule: ${config.name}`, {
|
||||
operations: Object.keys(config.operations),
|
||||
scheduledJobs: config.scheduledJobs?.length || 0,
|
||||
});
|
||||
|
||||
this.handlers.set(config.name, config.operations);
|
||||
|
||||
if (config.scheduledJobs && config.scheduledJobs.length > 0) {
|
||||
this.handlerSchedules.set(config.name, config.scheduledJobs);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific handler's configuration
|
||||
*/
|
||||
getHandler(handlerName: string): HandlerConfig | undefined {
|
||||
return this.handlers.get(handlerName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered handlers
|
||||
*/
|
||||
getAllHandlers(): Map<string, HandlerConfig> {
|
||||
return new Map(this.handlers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get scheduled jobs for a handler
|
||||
*/
|
||||
getScheduledJobs(handlerName: string): ScheduledJob[] {
|
||||
return this.handlerSchedules.get(handlerName) || [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all handlers with their scheduled jobs
|
||||
*/
|
||||
getAllHandlersWithSchedule(): Map<
|
||||
string,
|
||||
{ operations: HandlerConfig; scheduledJobs: ScheduledJob[] }
|
||||
> {
|
||||
const result = new Map<string, { operations: HandlerConfig; scheduledJobs: ScheduledJob[] }>();
|
||||
|
||||
for (const [name, operations] of this.handlers) {
|
||||
result.set(name, {
|
||||
operations,
|
||||
scheduledJobs: this.handlerSchedules.get(name) || [],
|
||||
});
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific operation from a handler
|
||||
*/
|
||||
getOperation(handlerName: string, operationName: string): JobHandler | undefined {
|
||||
const handler = this.handlers.get(handlerName);
|
||||
if (!handler) {
|
||||
return undefined;
|
||||
}
|
||||
return handler[operationName];
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a handler is registered
|
||||
*/
|
||||
hasHandler(handlerName: string): boolean {
|
||||
return this.handlers.has(handlerName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of all registered handler names
|
||||
*/
|
||||
getHandlerNames(): string[] {
|
||||
return Array.from(this.handlers.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all registrations (useful for testing)
|
||||
*/
|
||||
clear(): void {
|
||||
this.handlers.clear();
|
||||
this.handlerSchedules.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export const handlerRegistry = new HandlerRegistry();
|
||||
|
|
|
|||
|
|
@ -1,83 +1,83 @@
|
|||
/**
|
||||
* Handler and Queue Types
|
||||
* Shared types for handler system and queue operations
|
||||
*/
|
||||
|
||||
// Generic execution context - decoupled from service implementations
|
||||
export interface ExecutionContext {
|
||||
type: 'http' | 'queue' | 'scheduled' | 'event';
|
||||
metadata: {
|
||||
source?: string;
|
||||
jobId?: string;
|
||||
attempts?: number;
|
||||
timestamp?: number;
|
||||
traceId?: string;
|
||||
[key: string]: unknown;
|
||||
};
|
||||
}
|
||||
|
||||
// Simple handler interface
|
||||
export interface IHandler {
|
||||
execute(operation: string, input: unknown, context: ExecutionContext): Promise<unknown>;
|
||||
}
|
||||
|
||||
// Job handler type for queue operations
|
||||
export interface JobHandler<TPayload = unknown, TResult = unknown> {
|
||||
(payload: TPayload): Promise<TResult>;
|
||||
}
|
||||
|
||||
// Type-safe wrapper for creating job handlers
|
||||
export type TypedJobHandler<TPayload, TResult = unknown> = (payload: TPayload) => Promise<TResult>;
|
||||
|
||||
// Scheduled job configuration
|
||||
export interface ScheduledJob<T = unknown> {
|
||||
type: string;
|
||||
operation: string;
|
||||
payload?: T;
|
||||
cronPattern: string;
|
||||
priority?: number;
|
||||
description?: string;
|
||||
immediately?: boolean;
|
||||
delay?: number;
|
||||
}
|
||||
|
||||
// Handler configuration
|
||||
export interface HandlerConfig {
|
||||
[operation: string]: JobHandler;
|
||||
}
|
||||
|
||||
// Handler configuration with schedule
|
||||
export interface HandlerConfigWithSchedule {
|
||||
name: string;
|
||||
operations: Record<string, JobHandler>;
|
||||
scheduledJobs?: ScheduledJob[];
|
||||
}
|
||||
|
||||
// Handler metadata for registry
|
||||
export interface HandlerMetadata {
|
||||
name: string;
|
||||
version?: string;
|
||||
description?: string;
|
||||
operations: string[];
|
||||
scheduledJobs?: ScheduledJob[];
|
||||
}
|
||||
|
||||
// Operation metadata for decorators
|
||||
export interface OperationMetadata {
|
||||
name: string;
|
||||
schedules?: string[];
|
||||
operation?: string;
|
||||
description?: string;
|
||||
validation?: (input: unknown) => boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a typed job handler with validation
|
||||
*/
|
||||
export function createJobHandler<TPayload = unknown, TResult = unknown>(
|
||||
handler: TypedJobHandler<TPayload, TResult>
|
||||
): JobHandler<unknown, TResult> {
|
||||
return async (payload: unknown): Promise<TResult> => {
|
||||
return handler(payload as TPayload);
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Handler and Queue Types
|
||||
* Shared types for handler system and queue operations
|
||||
*/
|
||||
|
||||
// Generic execution context - decoupled from service implementations
|
||||
export interface ExecutionContext {
|
||||
type: 'http' | 'queue' | 'scheduled' | 'event';
|
||||
metadata: {
|
||||
source?: string;
|
||||
jobId?: string;
|
||||
attempts?: number;
|
||||
timestamp?: number;
|
||||
traceId?: string;
|
||||
[key: string]: unknown;
|
||||
};
|
||||
}
|
||||
|
||||
// Simple handler interface
|
||||
export interface IHandler {
|
||||
execute(operation: string, input: unknown, context: ExecutionContext): Promise<unknown>;
|
||||
}
|
||||
|
||||
// Job handler type for queue operations
|
||||
export interface JobHandler<TPayload = unknown, TResult = unknown> {
|
||||
(payload: TPayload): Promise<TResult>;
|
||||
}
|
||||
|
||||
// Type-safe wrapper for creating job handlers
|
||||
export type TypedJobHandler<TPayload, TResult = unknown> = (payload: TPayload) => Promise<TResult>;
|
||||
|
||||
// Scheduled job configuration
|
||||
export interface ScheduledJob<T = unknown> {
|
||||
type: string;
|
||||
operation: string;
|
||||
payload?: T;
|
||||
cronPattern: string;
|
||||
priority?: number;
|
||||
description?: string;
|
||||
immediately?: boolean;
|
||||
delay?: number;
|
||||
}
|
||||
|
||||
// Handler configuration
|
||||
export interface HandlerConfig {
|
||||
[operation: string]: JobHandler;
|
||||
}
|
||||
|
||||
// Handler configuration with schedule
|
||||
export interface HandlerConfigWithSchedule {
|
||||
name: string;
|
||||
operations: Record<string, JobHandler>;
|
||||
scheduledJobs?: ScheduledJob[];
|
||||
}
|
||||
|
||||
// Handler metadata for registry
|
||||
export interface HandlerMetadata {
|
||||
name: string;
|
||||
version?: string;
|
||||
description?: string;
|
||||
operations: string[];
|
||||
scheduledJobs?: ScheduledJob[];
|
||||
}
|
||||
|
||||
// Operation metadata for decorators
|
||||
export interface OperationMetadata {
|
||||
name: string;
|
||||
schedules?: string[];
|
||||
operation?: string;
|
||||
description?: string;
|
||||
validation?: (input: unknown) => boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a typed job handler with validation
|
||||
*/
|
||||
export function createJobHandler<TPayload = unknown, TResult = unknown>(
|
||||
handler: TypedJobHandler<TPayload, TResult>
|
||||
): JobHandler<unknown, TResult> {
|
||||
return async (payload: unknown): Promise<TResult> => {
|
||||
return handler(payload as TPayload);
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,4 +33,4 @@ export interface HasVolume {
|
|||
*/
|
||||
export interface HasTimestamp {
|
||||
timestamp: number;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -104,4 +104,4 @@ export interface MarketRegime {
|
|||
trendDirection?: 'up' | 'down';
|
||||
/** Volatility level */
|
||||
volatilityLevel: 'low' | 'medium' | 'high';
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,4 +55,4 @@ export interface GreeksCalculation {
|
|||
vega: number;
|
||||
/** Rho - interest rate sensitivity */
|
||||
rho: number;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -105,4 +105,4 @@ export interface KellyParams {
|
|||
averageLoss: number;
|
||||
/** Risk-free rate */
|
||||
riskFreeRate?: number;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -83,4 +83,4 @@ export interface ReturnAnalysis {
|
|||
averagePositiveReturn: number;
|
||||
/** Average negative return */
|
||||
averageNegativeReturn: number;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,23 +14,23 @@ export interface TechnicalIndicators {
|
|||
/** Relative Strength Index */
|
||||
rsi: number[];
|
||||
/** MACD indicator */
|
||||
macd: {
|
||||
macd: number[];
|
||||
signal: number[];
|
||||
histogram: number[];
|
||||
macd: {
|
||||
macd: number[];
|
||||
signal: number[];
|
||||
histogram: number[];
|
||||
};
|
||||
/** Bollinger Bands */
|
||||
bollinger: {
|
||||
upper: number[];
|
||||
middle: number[];
|
||||
lower: number[];
|
||||
bollinger: {
|
||||
upper: number[];
|
||||
middle: number[];
|
||||
lower: number[];
|
||||
};
|
||||
/** Average True Range */
|
||||
atr: number[];
|
||||
/** Stochastic Oscillator */
|
||||
stochastic: {
|
||||
k: number[];
|
||||
d: number[];
|
||||
stochastic: {
|
||||
k: number[];
|
||||
d: number[];
|
||||
};
|
||||
/** Williams %R */
|
||||
williams_r: number[];
|
||||
|
|
@ -106,4 +106,4 @@ export interface GARCHParameters {
|
|||
aic: number;
|
||||
/** BIC (Bayesian Information Criterion) */
|
||||
bic: number;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,4 +59,4 @@ export interface TradePerformance {
|
|||
grossLoss: number;
|
||||
/** Net profit */
|
||||
netProfit: number;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,5 @@
|
|||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
]
|
||||
"references": []
|
||||
}
|
||||
|
|
|
|||
8
libs/data/cache/src/index.ts
vendored
8
libs/data/cache/src/index.ts
vendored
|
|
@ -39,7 +39,13 @@ export function createCache(options: CacheOptions): CacheProvider {
|
|||
|
||||
// Export types and classes
|
||||
export type {
|
||||
CacheConfig, CacheKey, CacheOptions, CacheProvider, CacheStats, RedisConfig, SerializationOptions
|
||||
CacheConfig,
|
||||
CacheKey,
|
||||
CacheOptions,
|
||||
CacheProvider,
|
||||
CacheStats,
|
||||
RedisConfig,
|
||||
SerializationOptions,
|
||||
} from './types';
|
||||
|
||||
export { RedisConnectionManager } from './connection-manager';
|
||||
|
|
|
|||
4
libs/data/cache/tsconfig.json
vendored
4
libs/data/cache/tsconfig.json
vendored
|
|
@ -6,7 +6,5 @@
|
|||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../../core/logger" }
|
||||
]
|
||||
"references": [{ "path": "../../core/logger" }]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,13 @@
|
|||
import type { Logger } from '@stock-bot/core/logger';
|
||||
import type { OptionalUnlessRequiredId } from 'mongodb';
|
||||
import { Collection, Db, MongoClient } from 'mongodb';
|
||||
import type { ConnectionEvents, DocumentBase, DynamicPoolConfig, MongoDBClientConfig, PoolMetrics } from './types';
|
||||
import type { Logger } from '@stock-bot/core/logger';
|
||||
import type {
|
||||
ConnectionEvents,
|
||||
DocumentBase,
|
||||
DynamicPoolConfig,
|
||||
MongoDBClientConfig,
|
||||
PoolMetrics,
|
||||
} from './types';
|
||||
|
||||
/**
|
||||
* MongoDB Client for Stock Bot Data Service
|
||||
|
|
@ -71,7 +77,7 @@ export class MongoDBClient {
|
|||
if (this.events?.onConnect) {
|
||||
await Promise.resolve(this.events.onConnect());
|
||||
}
|
||||
|
||||
|
||||
// Fire pool created event
|
||||
if (this.events?.onPoolCreated) {
|
||||
await Promise.resolve(this.events.onPoolCreated());
|
||||
|
|
@ -89,12 +95,12 @@ export class MongoDBClient {
|
|||
} catch (error) {
|
||||
this.metrics.errors++;
|
||||
this.metrics.lastError = error instanceof Error ? error.message : 'Unknown error';
|
||||
|
||||
|
||||
// Fire error event
|
||||
if (this.events?.onError) {
|
||||
await Promise.resolve(this.events.onError(error as Error));
|
||||
}
|
||||
|
||||
|
||||
this.logger.error('MongoDB connection failed:', error);
|
||||
if (this.client) {
|
||||
await this.client.close();
|
||||
|
|
@ -123,12 +129,12 @@ export class MongoDBClient {
|
|||
this.isConnected = false;
|
||||
this.client = null;
|
||||
this.db = null;
|
||||
|
||||
|
||||
// Fire disconnect event
|
||||
if (this.events?.onDisconnect) {
|
||||
await Promise.resolve(this.events.onDisconnect());
|
||||
}
|
||||
|
||||
|
||||
this.logger.info('Disconnected from MongoDB');
|
||||
} catch (error) {
|
||||
this.logger.error('Error disconnecting from MongoDB:', error);
|
||||
|
|
@ -206,13 +212,16 @@ export class MongoDBClient {
|
|||
let totalUpdated = 0;
|
||||
const errors: unknown[] = [];
|
||||
|
||||
this.logger.info(`Starting batch upsert operation [${collectionName}-${documents.length}][${operationId}]`, {
|
||||
database: dbName,
|
||||
collection: collectionName,
|
||||
totalDocuments: documents.length,
|
||||
uniqueKeys: keyFields,
|
||||
chunkSize,
|
||||
});
|
||||
this.logger.info(
|
||||
`Starting batch upsert operation [${collectionName}-${documents.length}][${operationId}]`,
|
||||
{
|
||||
database: dbName,
|
||||
collection: collectionName,
|
||||
totalDocuments: documents.length,
|
||||
uniqueKeys: keyFields,
|
||||
chunkSize,
|
||||
}
|
||||
);
|
||||
|
||||
// Process documents in chunks to avoid memory issues
|
||||
for (let i = 0; i < documents.length; i += chunkSize) {
|
||||
|
|
@ -422,7 +431,7 @@ export class MongoDBClient {
|
|||
getPoolMetrics(): PoolMetrics {
|
||||
// Update last used timestamp
|
||||
this.metrics.lastUsed = new Date();
|
||||
|
||||
|
||||
// Note: MongoDB driver doesn't expose detailed pool metrics
|
||||
// These are estimates based on configuration
|
||||
return { ...this.metrics };
|
||||
|
|
@ -433,7 +442,7 @@ export class MongoDBClient {
|
|||
*/
|
||||
setDynamicPoolConfig(config: DynamicPoolConfig): void {
|
||||
this.dynamicPoolConfig = config;
|
||||
|
||||
|
||||
if (config.enabled && this.isConnected && !this.poolMonitorInterval) {
|
||||
this.startPoolMonitoring();
|
||||
} else if (!config.enabled && this.poolMonitorInterval) {
|
||||
|
|
@ -465,7 +474,7 @@ export class MongoDBClient {
|
|||
|
||||
const { minSize, maxSize, scaleUpThreshold, scaleDownThreshold } = this.dynamicPoolConfig;
|
||||
const currentSize = this.metrics.totalConnections;
|
||||
const utilization = ((this.metrics.activeConnections / currentSize) * 100);
|
||||
const utilization = (this.metrics.activeConnections / currentSize) * 100;
|
||||
|
||||
this.logger.debug('Pool utilization', {
|
||||
utilization: `${utilization.toFixed(1)}%`,
|
||||
|
|
@ -477,13 +486,21 @@ export class MongoDBClient {
|
|||
if (utilization > scaleUpThreshold && currentSize < maxSize) {
|
||||
const newSize = Math.min(currentSize + this.dynamicPoolConfig.scaleUpIncrement, maxSize);
|
||||
await this.resizePool(newSize);
|
||||
this.logger.info('Scaling up connection pool', { from: currentSize, to: newSize, utilization });
|
||||
this.logger.info('Scaling up connection pool', {
|
||||
from: currentSize,
|
||||
to: newSize,
|
||||
utilization,
|
||||
});
|
||||
}
|
||||
// Scale down if utilization is low
|
||||
else if (utilization < scaleDownThreshold && currentSize > minSize) {
|
||||
const newSize = Math.max(currentSize - this.dynamicPoolConfig.scaleDownIncrement, minSize);
|
||||
await this.resizePool(newSize);
|
||||
this.logger.info('Scaling down connection pool', { from: currentSize, to: newSize, utilization });
|
||||
this.logger.info('Scaling down connection pool', {
|
||||
from: currentSize,
|
||||
to: newSize,
|
||||
utilization,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -494,8 +511,10 @@ export class MongoDBClient {
|
|||
private async resizePool(newSize: number): Promise<void> {
|
||||
// MongoDB doesn't support dynamic pool resizing
|
||||
// This is a placeholder for future implementation
|
||||
this.logger.warn('Dynamic pool resizing not yet implemented for MongoDB', { requestedSize: newSize });
|
||||
|
||||
this.logger.warn('Dynamic pool resizing not yet implemented for MongoDB', {
|
||||
requestedSize: newSize,
|
||||
});
|
||||
|
||||
// Update metrics to reflect desired state
|
||||
this.metrics.totalConnections = newSize;
|
||||
}
|
||||
|
|
@ -514,7 +533,10 @@ export class MongoDBClient {
|
|||
// Create minimum connections by running parallel pings
|
||||
for (let i = 0; i < minSize; i++) {
|
||||
promises.push(
|
||||
this.client.db(this.defaultDatabase).admin().ping()
|
||||
this.client
|
||||
.db(this.defaultDatabase)
|
||||
.admin()
|
||||
.ping()
|
||||
.then(() => {
|
||||
this.logger.debug(`Warmed up connection ${i + 1}/${minSize}`);
|
||||
})
|
||||
|
|
|
|||
|
|
@ -9,14 +9,20 @@ export { MongoDBClient } from './client';
|
|||
|
||||
// Types
|
||||
export type {
|
||||
AnalystReport, ConnectionEvents, DocumentBase, DynamicPoolConfig, EarningsTranscript,
|
||||
ExchangeSourceMapping,
|
||||
MasterExchange,
|
||||
MongoDBClientConfig,
|
||||
MongoDBConnectionOptions,
|
||||
NewsArticle, PoolMetrics, RawDocument,
|
||||
SecFiling,
|
||||
SentimentData
|
||||
AnalystReport,
|
||||
ConnectionEvents,
|
||||
DocumentBase,
|
||||
DynamicPoolConfig,
|
||||
EarningsTranscript,
|
||||
ExchangeSourceMapping,
|
||||
MasterExchange,
|
||||
MongoDBClientConfig,
|
||||
MongoDBConnectionOptions,
|
||||
NewsArticle,
|
||||
PoolMetrics,
|
||||
RawDocument,
|
||||
SecFiling,
|
||||
SentimentData,
|
||||
} from './types';
|
||||
|
||||
// Note: Factory functions removed - use Awilix DI container instead
|
||||
|
|
|
|||
|
|
@ -6,8 +6,5 @@
|
|||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../../core/logger" },
|
||||
{ "path": "../../core/types" }
|
||||
]
|
||||
"references": [{ "path": "../../core/logger" }, { "path": "../../core/types" }]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,13 +4,13 @@ import { PostgreSQLHealthMonitor } from './health';
|
|||
import { PostgreSQLQueryBuilder } from './query-builder';
|
||||
import { PostgreSQLTransactionManager } from './transactions';
|
||||
import type {
|
||||
ConnectionEvents,
|
||||
DynamicPoolConfig,
|
||||
PoolMetrics,
|
||||
PostgreSQLClientConfig,
|
||||
PostgreSQLConnectionOptions,
|
||||
QueryResult,
|
||||
TransactionCallback,
|
||||
PoolMetrics,
|
||||
ConnectionEvents,
|
||||
DynamicPoolConfig,
|
||||
} from './types';
|
||||
|
||||
/**
|
||||
|
|
@ -32,7 +32,12 @@ export class PostgreSQLClient {
|
|||
private dynamicPoolConfig?: DynamicPoolConfig;
|
||||
private poolMonitorInterval?: NodeJS.Timeout;
|
||||
|
||||
constructor(config: PostgreSQLClientConfig, logger?: any, options?: PostgreSQLConnectionOptions, events?: ConnectionEvents) {
|
||||
constructor(
|
||||
config: PostgreSQLClientConfig,
|
||||
logger?: any,
|
||||
options?: PostgreSQLConnectionOptions,
|
||||
events?: ConnectionEvents
|
||||
) {
|
||||
this.config = config;
|
||||
this.options = {
|
||||
retryAttempts: 3,
|
||||
|
|
@ -45,7 +50,7 @@ export class PostgreSQLClient {
|
|||
this.logger = logger || console;
|
||||
this.healthMonitor = new PostgreSQLHealthMonitor(this);
|
||||
this.transactionManager = new PostgreSQLTransactionManager(this);
|
||||
|
||||
|
||||
this.metrics = {
|
||||
totalConnections: 0,
|
||||
activeConnections: 0,
|
||||
|
|
@ -80,22 +85,22 @@ export class PostgreSQLClient {
|
|||
client.release();
|
||||
|
||||
this.isConnected = true;
|
||||
|
||||
|
||||
// Update metrics
|
||||
const poolConfig = this.config.poolSettings;
|
||||
this.metrics.totalConnections = poolConfig?.max || 10;
|
||||
this.metrics.idleConnections = poolConfig?.min || 2;
|
||||
|
||||
|
||||
// Fire connection event
|
||||
if (this.events?.onConnect) {
|
||||
await Promise.resolve(this.events.onConnect());
|
||||
}
|
||||
|
||||
|
||||
// Fire pool created event
|
||||
if (this.events?.onPoolCreated) {
|
||||
await Promise.resolve(this.events.onPoolCreated());
|
||||
}
|
||||
|
||||
|
||||
this.logger.info('Successfully connected to PostgreSQL', {
|
||||
poolSize: this.metrics.totalConnections,
|
||||
});
|
||||
|
|
@ -105,10 +110,10 @@ export class PostgreSQLClient {
|
|||
|
||||
// Setup error handlers
|
||||
this.setupErrorHandlers();
|
||||
|
||||
|
||||
// Setup pool event listeners for metrics
|
||||
this.setupPoolMetrics();
|
||||
|
||||
|
||||
// Start dynamic pool monitoring if enabled
|
||||
if (this.dynamicPoolConfig?.enabled) {
|
||||
this.startPoolMonitoring();
|
||||
|
|
@ -119,12 +124,12 @@ export class PostgreSQLClient {
|
|||
lastError = error as Error;
|
||||
this.metrics.errors++;
|
||||
this.metrics.lastError = lastError.message;
|
||||
|
||||
|
||||
// Fire error event
|
||||
if (this.events?.onError) {
|
||||
await Promise.resolve(this.events.onError(lastError));
|
||||
}
|
||||
|
||||
|
||||
this.logger.error(`PostgreSQL connection attempt ${attempt} failed:`, error);
|
||||
|
||||
if (this.pool) {
|
||||
|
|
@ -157,17 +162,17 @@ export class PostgreSQLClient {
|
|||
clearInterval(this.poolMonitorInterval);
|
||||
this.poolMonitorInterval = undefined;
|
||||
}
|
||||
|
||||
|
||||
this.healthMonitor.stop();
|
||||
await this.pool.end();
|
||||
this.isConnected = false;
|
||||
this.pool = null;
|
||||
|
||||
|
||||
// Fire disconnect event
|
||||
if (this.events?.onDisconnect) {
|
||||
await Promise.resolve(this.events.onDisconnect());
|
||||
}
|
||||
|
||||
|
||||
this.logger.info('Disconnected from PostgreSQL');
|
||||
} catch (error) {
|
||||
this.logger.error('Error disconnecting from PostgreSQL:', error);
|
||||
|
|
@ -429,7 +434,6 @@ export class PostgreSQLClient {
|
|||
return this.pool;
|
||||
}
|
||||
|
||||
|
||||
private buildPoolConfig(): any {
|
||||
return {
|
||||
host: this.config.host,
|
||||
|
|
@ -481,7 +485,7 @@ export class PostgreSQLClient {
|
|||
getPoolMetrics(): PoolMetrics {
|
||||
// Update last used timestamp
|
||||
this.metrics.lastUsed = new Date();
|
||||
|
||||
|
||||
// Update metrics from pool if available
|
||||
if (this.pool) {
|
||||
this.metrics.totalConnections = this.pool.totalCount;
|
||||
|
|
@ -489,7 +493,7 @@ export class PostgreSQLClient {
|
|||
this.metrics.waitingRequests = this.pool.waitingCount;
|
||||
this.metrics.activeConnections = this.metrics.totalConnections - this.metrics.idleConnections;
|
||||
}
|
||||
|
||||
|
||||
return { ...this.metrics };
|
||||
}
|
||||
|
||||
|
|
@ -498,7 +502,7 @@ export class PostgreSQLClient {
|
|||
*/
|
||||
setDynamicPoolConfig(config: DynamicPoolConfig): void {
|
||||
this.dynamicPoolConfig = config;
|
||||
|
||||
|
||||
if (config.enabled && this.isConnected && !this.poolMonitorInterval) {
|
||||
this.startPoolMonitoring();
|
||||
} else if (!config.enabled && this.poolMonitorInterval) {
|
||||
|
|
@ -552,7 +556,7 @@ export class PostgreSQLClient {
|
|||
const metrics = this.getPoolMetrics();
|
||||
const { minSize, maxSize, scaleUpThreshold, scaleDownThreshold } = this.dynamicPoolConfig;
|
||||
const currentSize = metrics.totalConnections;
|
||||
const utilization = currentSize > 0 ? ((metrics.activeConnections / currentSize) * 100) : 0;
|
||||
const utilization = currentSize > 0 ? (metrics.activeConnections / currentSize) * 100 : 0;
|
||||
|
||||
this.logger.debug('Pool utilization', {
|
||||
utilization: `${utilization.toFixed(1)}%`,
|
||||
|
|
@ -564,13 +568,21 @@ export class PostgreSQLClient {
|
|||
// Scale up if utilization is high or there are waiting requests
|
||||
if ((utilization > scaleUpThreshold || metrics.waitingRequests > 0) && currentSize < maxSize) {
|
||||
const newSize = Math.min(currentSize + this.dynamicPoolConfig.scaleUpIncrement, maxSize);
|
||||
this.logger.info('Would scale up connection pool', { from: currentSize, to: newSize, utilization });
|
||||
this.logger.info('Would scale up connection pool', {
|
||||
from: currentSize,
|
||||
to: newSize,
|
||||
utilization,
|
||||
});
|
||||
// Note: pg module doesn't support dynamic resizing, would need reconnection
|
||||
}
|
||||
// Scale down if utilization is low
|
||||
else if (utilization < scaleDownThreshold && currentSize > minSize) {
|
||||
const newSize = Math.max(currentSize - this.dynamicPoolConfig.scaleDownIncrement, minSize);
|
||||
this.logger.info('Would scale down connection pool', { from: currentSize, to: newSize, utilization });
|
||||
this.logger.info('Would scale down connection pool', {
|
||||
from: currentSize,
|
||||
to: newSize,
|
||||
utilization,
|
||||
});
|
||||
// Note: pg module doesn't support dynamic resizing, would need reconnection
|
||||
}
|
||||
}
|
||||
|
|
@ -589,7 +601,8 @@ export class PostgreSQLClient {
|
|||
// Create minimum connections by running parallel queries
|
||||
for (let i = 0; i < minSize; i++) {
|
||||
promises.push(
|
||||
this.pool.query('SELECT 1')
|
||||
this.pool
|
||||
.query('SELECT 1')
|
||||
.then(() => {
|
||||
this.logger.debug(`Warmed up connection ${i + 1}/${minSize}`);
|
||||
})
|
||||
|
|
|
|||
|
|
@ -6,8 +6,5 @@
|
|||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../../core/logger" },
|
||||
{ "path": "../../core/types" }
|
||||
]
|
||||
"references": [{ "path": "../../core/logger" }, { "path": "../../core/types" }]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,12 +41,12 @@ export class QuestDBClient {
|
|||
console.log('DEBUG: QuestDB client constructor called with config:', {
|
||||
...config,
|
||||
user: config.user || '[NOT PROVIDED]',
|
||||
password: config.password ? '[PROVIDED]' : '[NOT PROVIDED]'
|
||||
password: config.password ? '[PROVIDED]' : '[NOT PROVIDED]',
|
||||
});
|
||||
this.logger.debug('QuestDB client created with config:', {
|
||||
...config,
|
||||
user: config.user || '[NOT PROVIDED]',
|
||||
password: config.password ? '[PROVIDED]' : '[NOT PROVIDED]'
|
||||
password: config.password ? '[PROVIDED]' : '[NOT PROVIDED]',
|
||||
});
|
||||
|
||||
this.healthMonitor = new QuestDBHealthMonitor(this);
|
||||
|
|
@ -417,7 +417,6 @@ export class QuestDBClient {
|
|||
return { ...this.config };
|
||||
}
|
||||
|
||||
|
||||
private buildPgPoolConfig(): any {
|
||||
const config: any = {
|
||||
host: this.config.host,
|
||||
|
|
@ -443,7 +442,7 @@ export class QuestDBClient {
|
|||
console.log('DEBUG: No user provided for QuestDB connection');
|
||||
this.logger.debug('No user provided for QuestDB connection');
|
||||
}
|
||||
|
||||
|
||||
if (this.config.password) {
|
||||
console.log('DEBUG: Adding password to QuestDB pool config');
|
||||
this.logger.debug('Adding password to QuestDB pool config');
|
||||
|
|
@ -453,8 +452,14 @@ export class QuestDBClient {
|
|||
this.logger.debug('No password provided for QuestDB connection');
|
||||
}
|
||||
|
||||
console.log('DEBUG: Final QuestDB pool config:', { ...config, password: config.password ? '[REDACTED]' : undefined });
|
||||
this.logger.debug('Final QuestDB pool config:', { ...config, password: config.password ? '[REDACTED]' : undefined });
|
||||
console.log('DEBUG: Final QuestDB pool config:', {
|
||||
...config,
|
||||
password: config.password ? '[REDACTED]' : undefined,
|
||||
});
|
||||
this.logger.debug('Final QuestDB pool config:', {
|
||||
...config,
|
||||
password: config.password ? '[REDACTED]' : undefined,
|
||||
});
|
||||
return config;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,5 @@
|
|||
import { getLogger } from '@stock-bot/logger';
|
||||
import type {
|
||||
QueryResult,
|
||||
TableNames,
|
||||
TimeRange,
|
||||
} from './types';
|
||||
import type { QueryResult, TableNames, TimeRange } from './types';
|
||||
|
||||
// Interface to avoid circular dependency
|
||||
interface QuestDBClientInterface {
|
||||
|
|
|
|||
|
|
@ -337,7 +337,6 @@ export class QuestDBSchemaManager {
|
|||
return sql;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Validate schema definition
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -6,8 +6,5 @@
|
|||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../../core/logger" },
|
||||
{ "path": "../../core/types" }
|
||||
]
|
||||
"references": [{ "path": "../../core/logger" }, { "path": "../../core/types" }]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -174,9 +174,11 @@ export class Browser {
|
|||
if (proxy) {
|
||||
const [protocol, rest] = proxy.split('://');
|
||||
if (!rest) {
|
||||
throw new Error('Invalid proxy format. Expected protocol://host:port or protocol://user:pass@host:port');
|
||||
throw new Error(
|
||||
'Invalid proxy format. Expected protocol://host:port or protocol://user:pass@host:port'
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
const [auth, hostPort] = rest.includes('@') ? rest.split('@') : [null, rest];
|
||||
const finalHostPort = hostPort || rest;
|
||||
const [host, port] = finalHostPort.split(':');
|
||||
|
|
|
|||
|
|
@ -6,7 +6,5 @@
|
|||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../../core/logger" }
|
||||
]
|
||||
"references": [{ "path": "../../core/logger" }]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,12 +1,7 @@
|
|||
import { EventEmitter } from 'eventemitter3';
|
||||
import Redis from 'ioredis';
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import type {
|
||||
EventBusConfig,
|
||||
EventBusMessage,
|
||||
EventHandler,
|
||||
EventSubscription,
|
||||
} from './types';
|
||||
import type { EventBusConfig, EventBusMessage, EventHandler, EventSubscription } from './types';
|
||||
|
||||
/**
|
||||
* Lightweight Event Bus for inter-service communication
|
||||
|
|
@ -52,7 +47,7 @@ export class EventBus extends EventEmitter {
|
|||
this.isConnected = true;
|
||||
});
|
||||
|
||||
this.publisher.on('error', (error) => {
|
||||
this.publisher.on('error', error => {
|
||||
this.logger.error('Publisher Redis error:', error);
|
||||
});
|
||||
|
||||
|
|
@ -63,7 +58,7 @@ export class EventBus extends EventEmitter {
|
|||
this.resubscribeAll();
|
||||
});
|
||||
|
||||
this.subscriber.on('error', (error) => {
|
||||
this.subscriber.on('error', error => {
|
||||
this.logger.error('Subscriber Redis error:', error);
|
||||
});
|
||||
|
||||
|
|
@ -89,7 +84,7 @@ export class EventBus extends EventEmitter {
|
|||
// Call registered handler if exists
|
||||
const subscription = this.subscriptions.get(eventType);
|
||||
if (subscription?.handler) {
|
||||
Promise.resolve(subscription.handler(eventMessage)).catch((error) => {
|
||||
Promise.resolve(subscription.handler(eventMessage)).catch(error => {
|
||||
this.logger.error(`Handler error for event ${eventType}:`, error);
|
||||
});
|
||||
}
|
||||
|
|
@ -103,11 +98,7 @@ export class EventBus extends EventEmitter {
|
|||
/**
|
||||
* Publish an event
|
||||
*/
|
||||
async publish<T = any>(
|
||||
type: string,
|
||||
data: T,
|
||||
metadata?: Record<string, any>
|
||||
): Promise<void> {
|
||||
async publish<T = any>(type: string, data: T, metadata?: Record<string, any>): Promise<void> {
|
||||
const message: EventBusMessage<T> = {
|
||||
id: this.generateId(),
|
||||
type,
|
||||
|
|
@ -199,11 +190,11 @@ export class EventBus extends EventEmitter {
|
|||
*/
|
||||
async waitForConnection(timeout: number = 5000): Promise<void> {
|
||||
const startTime = Date.now();
|
||||
|
||||
|
||||
while (!this.isConnected && Date.now() - startTime < timeout) {
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
}
|
||||
|
||||
|
||||
if (!this.isConnected) {
|
||||
throw new Error(`Failed to connect to Redis within ${timeout}ms`);
|
||||
}
|
||||
|
|
@ -220,10 +211,7 @@ export class EventBus extends EventEmitter {
|
|||
this.removeAllListeners();
|
||||
|
||||
// Close Redis connections
|
||||
await Promise.all([
|
||||
this.publisher.quit(),
|
||||
this.subscriber.quit(),
|
||||
]);
|
||||
await Promise.all([this.publisher.quit(), this.subscriber.quit()]);
|
||||
|
||||
this.logger.info('Event bus closed');
|
||||
}
|
||||
|
|
@ -248,4 +236,4 @@ export class EventBus extends EventEmitter {
|
|||
get service(): string {
|
||||
return this.serviceName;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,4 +10,4 @@ export function createEventBus(config: EventBusConfig): EventBus {
|
|||
|
||||
// Re-export everything
|
||||
export { EventBus } from './event-bus';
|
||||
export * from './types';
|
||||
export * from './types';
|
||||
|
|
|
|||
|
|
@ -33,27 +33,27 @@ export enum TradingEventType {
|
|||
PRICE_UPDATE = 'market.price.update',
|
||||
ORDERBOOK_UPDATE = 'market.orderbook.update',
|
||||
TRADE_EXECUTED = 'market.trade.executed',
|
||||
|
||||
|
||||
// Order events
|
||||
ORDER_CREATED = 'order.created',
|
||||
ORDER_FILLED = 'order.filled',
|
||||
ORDER_CANCELLED = 'order.cancelled',
|
||||
ORDER_REJECTED = 'order.rejected',
|
||||
|
||||
|
||||
// Position events
|
||||
POSITION_OPENED = 'position.opened',
|
||||
POSITION_CLOSED = 'position.closed',
|
||||
POSITION_UPDATED = 'position.updated',
|
||||
|
||||
|
||||
// Strategy events
|
||||
STRATEGY_SIGNAL = 'strategy.signal',
|
||||
STRATEGY_STARTED = 'strategy.started',
|
||||
STRATEGY_STOPPED = 'strategy.stopped',
|
||||
|
||||
|
||||
// Risk events
|
||||
RISK_LIMIT_BREACH = 'risk.limit.breach',
|
||||
RISK_WARNING = 'risk.warning',
|
||||
|
||||
|
||||
// System events
|
||||
SERVICE_STARTED = 'system.service.started',
|
||||
SERVICE_STOPPED = 'system.service.stopped',
|
||||
|
|
@ -108,4 +108,4 @@ export interface RiskEvent {
|
|||
portfolioId?: string;
|
||||
strategyId?: string;
|
||||
message: string;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,5 @@
|
|||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../../core/logger" }
|
||||
]
|
||||
"references": [{ "path": "../../core/logger" }]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,25 +1,25 @@
|
|||
{
|
||||
"name": "@stock-bot/proxy",
|
||||
"version": "0.1.0",
|
||||
"description": "Proxy management and synchronization services",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"dev": "tsc --watch",
|
||||
"clean": "rm -rf dist"
|
||||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/logger": "workspace:*",
|
||||
"@stock-bot/cache": "workspace:*"
|
||||
},
|
||||
"devDependencies": {
|
||||
"typescript": "^5.0.0"
|
||||
},
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"default": "./dist/index.js"
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
"name": "@stock-bot/proxy",
|
||||
"version": "0.1.0",
|
||||
"description": "Proxy management and synchronization services",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"dev": "tsc --watch",
|
||||
"clean": "rm -rf dist"
|
||||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/logger": "workspace:*",
|
||||
"@stock-bot/cache": "workspace:*"
|
||||
},
|
||||
"devDependencies": {
|
||||
"typescript": "^5.0.0"
|
||||
},
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"default": "./dist/index.js"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,19 +1,16 @@
|
|||
/**
|
||||
* Proxy Service Library
|
||||
* Centralized proxy management and synchronization
|
||||
*/
|
||||
|
||||
// Main classes
|
||||
export { ProxyManager } from './proxy-manager';
|
||||
|
||||
// Types
|
||||
export type {
|
||||
ProxyInfo,
|
||||
ProxyManagerConfig, ProxyStats, ProxySyncConfig
|
||||
} from './types';
|
||||
|
||||
// Note: Convenience functions removed as ProxyManager is no longer a singleton
|
||||
// Create an instance and use its methods directly
|
||||
|
||||
// Default export
|
||||
export { ProxyManager as default } from './proxy-manager';
|
||||
/**
|
||||
* Proxy Service Library
|
||||
* Centralized proxy management and synchronization
|
||||
*/
|
||||
|
||||
// Main classes
|
||||
export { ProxyManager } from './proxy-manager';
|
||||
|
||||
// Types
|
||||
export type { ProxyInfo, ProxyManagerConfig, ProxyStats, ProxySyncConfig } from './types';
|
||||
|
||||
// Note: Convenience functions removed as ProxyManager is no longer a singleton
|
||||
// Create an instance and use its methods directly
|
||||
|
||||
// Default export
|
||||
export { ProxyManager as default } from './proxy-manager';
|
||||
|
|
|
|||
|
|
@ -1,284 +1,287 @@
|
|||
/**
|
||||
* Centralized Proxy Manager - Handles proxy storage, retrieval, and caching
|
||||
*/
|
||||
import type { CacheProvider } from '@stock-bot/cache';
|
||||
import type { ProxyInfo, ProxyManagerConfig, ProxyStats } from './types';
|
||||
|
||||
export class ProxyManager {
|
||||
private cache: CacheProvider;
|
||||
private proxies: ProxyInfo[] = [];
|
||||
private proxyIndex: number = 0;
|
||||
private lastUpdate: Date | null = null;
|
||||
private isInitialized = false;
|
||||
private logger: any;
|
||||
|
||||
constructor(cache: CacheProvider, _config: ProxyManagerConfig = {}, logger?: any) {
|
||||
this.cache = cache;
|
||||
this.logger = logger || console;
|
||||
// Config can be used in the future for customization
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal initialization - loads existing proxies from cache
|
||||
*/
|
||||
private async initializeInternal(): Promise<void> {
|
||||
if (this.isInitialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
this.logger.info('Initializing proxy manager...');
|
||||
|
||||
// Wait for cache to be ready
|
||||
await this.cache.waitForReady(10000); // Wait up to 10 seconds
|
||||
this.logger.debug('Cache is ready');
|
||||
|
||||
await this.loadFromCache();
|
||||
this.isInitialized = true;
|
||||
this.logger.info('Proxy manager initialized', {
|
||||
proxiesLoaded: this.proxies.length,
|
||||
lastUpdate: this.lastUpdate,
|
||||
});
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to initialize proxy manager', { error });
|
||||
this.isInitialized = true; // Set to true anyway to avoid infinite retries
|
||||
}
|
||||
}
|
||||
|
||||
getProxy(): string | null {
|
||||
if (this.proxies.length === 0) {
|
||||
this.logger.warn('No proxies available in memory');
|
||||
return null;
|
||||
}
|
||||
|
||||
// Cycle through proxies
|
||||
if (this.proxyIndex >= this.proxies.length) {
|
||||
this.proxyIndex = 0;
|
||||
}
|
||||
|
||||
const proxyInfo = this.proxies[this.proxyIndex++];
|
||||
if (!proxyInfo) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Build proxy URL with optional auth
|
||||
let proxyUrl = `${proxyInfo.protocol}://`;
|
||||
if (proxyInfo.username && proxyInfo.password) {
|
||||
proxyUrl += `${proxyInfo.username}:${proxyInfo.password}@`;
|
||||
}
|
||||
proxyUrl += `${proxyInfo.host}:${proxyInfo.port}`;
|
||||
|
||||
return proxyUrl;
|
||||
}
|
||||
/**
|
||||
* Get a random working proxy from the available pool (synchronous)
|
||||
*/
|
||||
getRandomProxy(): ProxyInfo | null {
|
||||
// Ensure initialized
|
||||
if (!this.isInitialized) {
|
||||
throw new Error('ProxyManager not initialized');
|
||||
}
|
||||
|
||||
// Return null if no proxies available
|
||||
if (this.proxies.length === 0) {
|
||||
this.logger.warn('No proxies available in memory');
|
||||
return null;
|
||||
}
|
||||
|
||||
// Filter for working proxies (not explicitly marked as non-working)
|
||||
const workingProxies = this.proxies.filter(proxy => proxy.isWorking !== false);
|
||||
|
||||
if (workingProxies.length === 0) {
|
||||
this.logger.warn('No working proxies available');
|
||||
return null;
|
||||
}
|
||||
|
||||
// Return random proxy with preference for recently successful ones
|
||||
const sortedProxies = workingProxies.sort((a, b) => {
|
||||
// Prefer proxies with better success rates
|
||||
const aRate = a.successRate || 0;
|
||||
const bRate = b.successRate || 0;
|
||||
return bRate - aRate;
|
||||
});
|
||||
|
||||
// Take from top 50% of best performing proxies
|
||||
const topProxies = sortedProxies.slice(0, Math.max(1, Math.floor(sortedProxies.length * 0.5)));
|
||||
const selectedProxy = topProxies[Math.floor(Math.random() * topProxies.length)];
|
||||
|
||||
if (!selectedProxy) {
|
||||
this.logger.warn('No proxy selected from available pool');
|
||||
return null;
|
||||
}
|
||||
|
||||
this.logger.debug('Selected proxy', {
|
||||
host: selectedProxy.host,
|
||||
port: selectedProxy.port,
|
||||
successRate: selectedProxy.successRate,
|
||||
totalAvailable: workingProxies.length,
|
||||
});
|
||||
|
||||
return selectedProxy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all working proxies (synchronous)
|
||||
*/
|
||||
getWorkingProxies(): ProxyInfo[] {
|
||||
if (!this.isInitialized) {
|
||||
throw new Error('ProxyManager not initialized');
|
||||
}
|
||||
|
||||
return this.proxies.filter(proxy => proxy.isWorking !== false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all proxies (working and non-working)
|
||||
*/
|
||||
getAllProxies(): ProxyInfo[] {
|
||||
if (!this.isInitialized) {
|
||||
throw new Error('ProxyManager not initialized');
|
||||
}
|
||||
|
||||
return [...this.proxies];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get proxy statistics
|
||||
*/
|
||||
getStats(): ProxyStats {
|
||||
if (!this.isInitialized) {
|
||||
throw new Error('ProxyManager not initialized');
|
||||
}
|
||||
|
||||
return {
|
||||
total: this.proxies.length,
|
||||
working: this.proxies.filter(p => p.isWorking !== false).length,
|
||||
failed: this.proxies.filter(p => p.isWorking === false).length,
|
||||
lastUpdate: this.lastUpdate
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the proxy pool with new proxies
|
||||
*/
|
||||
async updateProxies(proxies: ProxyInfo[]): Promise<void> {
|
||||
// Ensure manager is initialized before updating
|
||||
if (!this.isInitialized) {
|
||||
await this.initializeInternal();
|
||||
}
|
||||
|
||||
try {
|
||||
this.logger.info('Updating proxy pool', { newCount: proxies.length, existingCount: this.proxies.length });
|
||||
|
||||
this.proxies = proxies;
|
||||
this.lastUpdate = new Date();
|
||||
|
||||
// Store to cache
|
||||
await this.cache.set('active-proxies', proxies);
|
||||
await this.cache.set('last-update', this.lastUpdate.toISOString());
|
||||
|
||||
const workingCount = proxies.filter(p => p.isWorking !== false).length;
|
||||
this.logger.info('Proxy pool updated successfully', {
|
||||
totalProxies: proxies.length,
|
||||
workingProxies: workingCount,
|
||||
lastUpdate: this.lastUpdate,
|
||||
});
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to update proxy pool', { error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add or update a single proxy in the pool
|
||||
*/
|
||||
async updateProxy(proxy: ProxyInfo): Promise<void> {
|
||||
const existingIndex = this.proxies.findIndex(
|
||||
p => p.host === proxy.host && p.port === proxy.port && p.protocol === proxy.protocol
|
||||
);
|
||||
|
||||
if (existingIndex >= 0) {
|
||||
this.proxies[existingIndex] = { ...this.proxies[existingIndex], ...proxy };
|
||||
this.logger.debug('Updated existing proxy', { host: proxy.host, port: proxy.port });
|
||||
} else {
|
||||
this.proxies.push(proxy);
|
||||
this.logger.debug('Added new proxy', { host: proxy.host, port: proxy.port });
|
||||
}
|
||||
|
||||
// Update cache
|
||||
await this.updateProxies(this.proxies);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a proxy from the pool
|
||||
*/
|
||||
async removeProxy(host: string, port: number, protocol: string): Promise<void> {
|
||||
const initialLength = this.proxies.length;
|
||||
this.proxies = this.proxies.filter(
|
||||
p => !(p.host === host && p.port === port && p.protocol === protocol)
|
||||
);
|
||||
|
||||
if (this.proxies.length < initialLength) {
|
||||
await this.updateProxies(this.proxies);
|
||||
this.logger.debug('Removed proxy', { host, port, protocol });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all proxies from memory and cache
|
||||
*/
|
||||
async clearProxies(): Promise<void> {
|
||||
this.proxies = [];
|
||||
this.lastUpdate = null;
|
||||
|
||||
await this.cache.del('active-proxies');
|
||||
await this.cache.del('last-update');
|
||||
|
||||
this.logger.info('Cleared all proxies');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if proxy manager is ready
|
||||
*/
|
||||
isReady(): boolean {
|
||||
return this.isInitialized;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load proxies from cache storage
|
||||
*/
|
||||
private async loadFromCache(): Promise<void> {
|
||||
try {
|
||||
const cachedProxies = await this.cache.get<ProxyInfo[]>('active-proxies');
|
||||
const lastUpdateStr = await this.cache.get<string>('last-update');
|
||||
|
||||
if (cachedProxies && Array.isArray(cachedProxies)) {
|
||||
this.proxies = cachedProxies;
|
||||
this.lastUpdate = lastUpdateStr ? new Date(lastUpdateStr) : null;
|
||||
|
||||
this.logger.debug('Loaded proxies from cache', {
|
||||
count: this.proxies.length,
|
||||
lastUpdate: this.lastUpdate,
|
||||
});
|
||||
} else {
|
||||
this.logger.debug('No cached proxies found');
|
||||
}
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to load proxies from cache', { error });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the proxy manager
|
||||
*/
|
||||
async initialize(): Promise<void> {
|
||||
await this.initializeInternal();
|
||||
|
||||
// Note: Initial proxy sync should be handled by the container or application
|
||||
// that creates ProxyManager instance
|
||||
this.logger.info('ProxyManager initialized - proxy sync should be handled externally');
|
||||
}
|
||||
}
|
||||
|
||||
// Export the class as default
|
||||
export default ProxyManager;
|
||||
/**
|
||||
* Centralized Proxy Manager - Handles proxy storage, retrieval, and caching
|
||||
*/
|
||||
import type { CacheProvider } from '@stock-bot/cache';
|
||||
import type { ProxyInfo, ProxyManagerConfig, ProxyStats } from './types';
|
||||
|
||||
export class ProxyManager {
|
||||
private cache: CacheProvider;
|
||||
private proxies: ProxyInfo[] = [];
|
||||
private proxyIndex: number = 0;
|
||||
private lastUpdate: Date | null = null;
|
||||
private isInitialized = false;
|
||||
private logger: any;
|
||||
|
||||
constructor(cache: CacheProvider, _config: ProxyManagerConfig = {}, logger?: any) {
|
||||
this.cache = cache;
|
||||
this.logger = logger || console;
|
||||
// Config can be used in the future for customization
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal initialization - loads existing proxies from cache
|
||||
*/
|
||||
private async initializeInternal(): Promise<void> {
|
||||
if (this.isInitialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
this.logger.info('Initializing proxy manager...');
|
||||
|
||||
// Wait for cache to be ready
|
||||
await this.cache.waitForReady(10000); // Wait up to 10 seconds
|
||||
this.logger.debug('Cache is ready');
|
||||
|
||||
await this.loadFromCache();
|
||||
this.isInitialized = true;
|
||||
this.logger.info('Proxy manager initialized', {
|
||||
proxiesLoaded: this.proxies.length,
|
||||
lastUpdate: this.lastUpdate,
|
||||
});
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to initialize proxy manager', { error });
|
||||
this.isInitialized = true; // Set to true anyway to avoid infinite retries
|
||||
}
|
||||
}
|
||||
|
||||
getProxy(): string | null {
|
||||
if (this.proxies.length === 0) {
|
||||
this.logger.warn('No proxies available in memory');
|
||||
return null;
|
||||
}
|
||||
|
||||
// Cycle through proxies
|
||||
if (this.proxyIndex >= this.proxies.length) {
|
||||
this.proxyIndex = 0;
|
||||
}
|
||||
|
||||
const proxyInfo = this.proxies[this.proxyIndex++];
|
||||
if (!proxyInfo) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Build proxy URL with optional auth
|
||||
let proxyUrl = `${proxyInfo.protocol}://`;
|
||||
if (proxyInfo.username && proxyInfo.password) {
|
||||
proxyUrl += `${proxyInfo.username}:${proxyInfo.password}@`;
|
||||
}
|
||||
proxyUrl += `${proxyInfo.host}:${proxyInfo.port}`;
|
||||
|
||||
return proxyUrl;
|
||||
}
|
||||
/**
|
||||
* Get a random working proxy from the available pool (synchronous)
|
||||
*/
|
||||
getRandomProxy(): ProxyInfo | null {
|
||||
// Ensure initialized
|
||||
if (!this.isInitialized) {
|
||||
throw new Error('ProxyManager not initialized');
|
||||
}
|
||||
|
||||
// Return null if no proxies available
|
||||
if (this.proxies.length === 0) {
|
||||
this.logger.warn('No proxies available in memory');
|
||||
return null;
|
||||
}
|
||||
|
||||
// Filter for working proxies (not explicitly marked as non-working)
|
||||
const workingProxies = this.proxies.filter(proxy => proxy.isWorking !== false);
|
||||
|
||||
if (workingProxies.length === 0) {
|
||||
this.logger.warn('No working proxies available');
|
||||
return null;
|
||||
}
|
||||
|
||||
// Return random proxy with preference for recently successful ones
|
||||
const sortedProxies = workingProxies.sort((a, b) => {
|
||||
// Prefer proxies with better success rates
|
||||
const aRate = a.successRate || 0;
|
||||
const bRate = b.successRate || 0;
|
||||
return bRate - aRate;
|
||||
});
|
||||
|
||||
// Take from top 50% of best performing proxies
|
||||
const topProxies = sortedProxies.slice(0, Math.max(1, Math.floor(sortedProxies.length * 0.5)));
|
||||
const selectedProxy = topProxies[Math.floor(Math.random() * topProxies.length)];
|
||||
|
||||
if (!selectedProxy) {
|
||||
this.logger.warn('No proxy selected from available pool');
|
||||
return null;
|
||||
}
|
||||
|
||||
this.logger.debug('Selected proxy', {
|
||||
host: selectedProxy.host,
|
||||
port: selectedProxy.port,
|
||||
successRate: selectedProxy.successRate,
|
||||
totalAvailable: workingProxies.length,
|
||||
});
|
||||
|
||||
return selectedProxy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all working proxies (synchronous)
|
||||
*/
|
||||
getWorkingProxies(): ProxyInfo[] {
|
||||
if (!this.isInitialized) {
|
||||
throw new Error('ProxyManager not initialized');
|
||||
}
|
||||
|
||||
return this.proxies.filter(proxy => proxy.isWorking !== false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all proxies (working and non-working)
|
||||
*/
|
||||
getAllProxies(): ProxyInfo[] {
|
||||
if (!this.isInitialized) {
|
||||
throw new Error('ProxyManager not initialized');
|
||||
}
|
||||
|
||||
return [...this.proxies];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get proxy statistics
|
||||
*/
|
||||
getStats(): ProxyStats {
|
||||
if (!this.isInitialized) {
|
||||
throw new Error('ProxyManager not initialized');
|
||||
}
|
||||
|
||||
return {
|
||||
total: this.proxies.length,
|
||||
working: this.proxies.filter(p => p.isWorking !== false).length,
|
||||
failed: this.proxies.filter(p => p.isWorking === false).length,
|
||||
lastUpdate: this.lastUpdate,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the proxy pool with new proxies
|
||||
*/
|
||||
async updateProxies(proxies: ProxyInfo[]): Promise<void> {
|
||||
// Ensure manager is initialized before updating
|
||||
if (!this.isInitialized) {
|
||||
await this.initializeInternal();
|
||||
}
|
||||
|
||||
try {
|
||||
this.logger.info('Updating proxy pool', {
|
||||
newCount: proxies.length,
|
||||
existingCount: this.proxies.length,
|
||||
});
|
||||
|
||||
this.proxies = proxies;
|
||||
this.lastUpdate = new Date();
|
||||
|
||||
// Store to cache
|
||||
await this.cache.set('active-proxies', proxies);
|
||||
await this.cache.set('last-update', this.lastUpdate.toISOString());
|
||||
|
||||
const workingCount = proxies.filter(p => p.isWorking !== false).length;
|
||||
this.logger.info('Proxy pool updated successfully', {
|
||||
totalProxies: proxies.length,
|
||||
workingProxies: workingCount,
|
||||
lastUpdate: this.lastUpdate,
|
||||
});
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to update proxy pool', { error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add or update a single proxy in the pool
|
||||
*/
|
||||
async updateProxy(proxy: ProxyInfo): Promise<void> {
|
||||
const existingIndex = this.proxies.findIndex(
|
||||
p => p.host === proxy.host && p.port === proxy.port && p.protocol === proxy.protocol
|
||||
);
|
||||
|
||||
if (existingIndex >= 0) {
|
||||
this.proxies[existingIndex] = { ...this.proxies[existingIndex], ...proxy };
|
||||
this.logger.debug('Updated existing proxy', { host: proxy.host, port: proxy.port });
|
||||
} else {
|
||||
this.proxies.push(proxy);
|
||||
this.logger.debug('Added new proxy', { host: proxy.host, port: proxy.port });
|
||||
}
|
||||
|
||||
// Update cache
|
||||
await this.updateProxies(this.proxies);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a proxy from the pool
|
||||
*/
|
||||
async removeProxy(host: string, port: number, protocol: string): Promise<void> {
|
||||
const initialLength = this.proxies.length;
|
||||
this.proxies = this.proxies.filter(
|
||||
p => !(p.host === host && p.port === port && p.protocol === protocol)
|
||||
);
|
||||
|
||||
if (this.proxies.length < initialLength) {
|
||||
await this.updateProxies(this.proxies);
|
||||
this.logger.debug('Removed proxy', { host, port, protocol });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all proxies from memory and cache
|
||||
*/
|
||||
async clearProxies(): Promise<void> {
|
||||
this.proxies = [];
|
||||
this.lastUpdate = null;
|
||||
|
||||
await this.cache.del('active-proxies');
|
||||
await this.cache.del('last-update');
|
||||
|
||||
this.logger.info('Cleared all proxies');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if proxy manager is ready
|
||||
*/
|
||||
isReady(): boolean {
|
||||
return this.isInitialized;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load proxies from cache storage
|
||||
*/
|
||||
private async loadFromCache(): Promise<void> {
|
||||
try {
|
||||
const cachedProxies = await this.cache.get<ProxyInfo[]>('active-proxies');
|
||||
const lastUpdateStr = await this.cache.get<string>('last-update');
|
||||
|
||||
if (cachedProxies && Array.isArray(cachedProxies)) {
|
||||
this.proxies = cachedProxies;
|
||||
this.lastUpdate = lastUpdateStr ? new Date(lastUpdateStr) : null;
|
||||
|
||||
this.logger.debug('Loaded proxies from cache', {
|
||||
count: this.proxies.length,
|
||||
lastUpdate: this.lastUpdate,
|
||||
});
|
||||
} else {
|
||||
this.logger.debug('No cached proxies found');
|
||||
}
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to load proxies from cache', { error });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the proxy manager
|
||||
*/
|
||||
async initialize(): Promise<void> {
|
||||
await this.initializeInternal();
|
||||
|
||||
// Note: Initial proxy sync should be handled by the container or application
|
||||
// that creates ProxyManager instance
|
||||
this.logger.info('ProxyManager initialized - proxy sync should be handled externally');
|
||||
}
|
||||
}
|
||||
|
||||
// Export the class as default
|
||||
export default ProxyManager;
|
||||
|
|
|
|||
|
|
@ -1,42 +1,42 @@
|
|||
/**
|
||||
* Proxy service types and interfaces
|
||||
*/
|
||||
|
||||
export interface ProxyInfo {
|
||||
host: string;
|
||||
port: number;
|
||||
protocol: 'http' | 'https'; // Simplified to only support HTTP/HTTPS
|
||||
username?: string;
|
||||
password?: string;
|
||||
isWorking?: boolean;
|
||||
successRate?: number;
|
||||
lastChecked?: Date;
|
||||
lastUsed?: Date;
|
||||
responseTime?: number;
|
||||
source?: string;
|
||||
country?: string;
|
||||
error?: string;
|
||||
// Tracking properties
|
||||
working?: number; // Number of successful checks
|
||||
total?: number; // Total number of checks
|
||||
averageResponseTime?: number; // Average response time in milliseconds
|
||||
firstSeen?: Date; // When the proxy was first added
|
||||
}
|
||||
|
||||
export interface ProxyManagerConfig {
|
||||
cachePrefix?: string;
|
||||
ttl?: number;
|
||||
enableMetrics?: boolean;
|
||||
}
|
||||
|
||||
export interface ProxySyncConfig {
|
||||
intervalMs?: number;
|
||||
enableAutoSync?: boolean;
|
||||
}
|
||||
|
||||
export interface ProxyStats {
|
||||
total: number;
|
||||
working: number;
|
||||
failed: number;
|
||||
lastUpdate: Date | null;
|
||||
}
|
||||
/**
|
||||
* Proxy service types and interfaces
|
||||
*/
|
||||
|
||||
export interface ProxyInfo {
|
||||
host: string;
|
||||
port: number;
|
||||
protocol: 'http' | 'https'; // Simplified to only support HTTP/HTTPS
|
||||
username?: string;
|
||||
password?: string;
|
||||
isWorking?: boolean;
|
||||
successRate?: number;
|
||||
lastChecked?: Date;
|
||||
lastUsed?: Date;
|
||||
responseTime?: number;
|
||||
source?: string;
|
||||
country?: string;
|
||||
error?: string;
|
||||
// Tracking properties
|
||||
working?: number; // Number of successful checks
|
||||
total?: number; // Total number of checks
|
||||
averageResponseTime?: number; // Average response time in milliseconds
|
||||
firstSeen?: Date; // When the proxy was first added
|
||||
}
|
||||
|
||||
export interface ProxyManagerConfig {
|
||||
cachePrefix?: string;
|
||||
ttl?: number;
|
||||
enableMetrics?: boolean;
|
||||
}
|
||||
|
||||
export interface ProxySyncConfig {
|
||||
intervalMs?: number;
|
||||
enableAutoSync?: boolean;
|
||||
}
|
||||
|
||||
export interface ProxyStats {
|
||||
total: number;
|
||||
working: number;
|
||||
failed: number;
|
||||
lastUpdate: Date | null;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"sourceMap": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["dist", "node_modules"]
|
||||
}
|
||||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"sourceMap": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["dist", "node_modules"]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,251 +1,249 @@
|
|||
import { getLogger } from '@stock-bot/logger';
|
||||
import { Queue, type Job } from 'bullmq';
|
||||
import type { DLQConfig, RedisConfig } from './types';
|
||||
import { getRedisConnection } from './utils';
|
||||
|
||||
const logger = getLogger('dlq-handler');
|
||||
|
||||
export class DeadLetterQueueHandler {
|
||||
private dlq: Queue;
|
||||
private config: Required<DLQConfig>;
|
||||
private failureCount = new Map<string, number>();
|
||||
|
||||
constructor(
|
||||
private mainQueue: Queue,
|
||||
connection: RedisConfig,
|
||||
config: DLQConfig = {}
|
||||
) {
|
||||
this.config = {
|
||||
maxRetries: config.maxRetries ?? 3,
|
||||
retryDelay: config.retryDelay ?? 60000, // 1 minute
|
||||
alertThreshold: config.alertThreshold ?? 100,
|
||||
cleanupAge: config.cleanupAge ?? 168, // 7 days
|
||||
};
|
||||
|
||||
// Create DLQ with same name but -dlq suffix
|
||||
const dlqName = `${mainQueue.name}-dlq`;
|
||||
this.dlq = new Queue(dlqName, { connection: getRedisConnection(connection) });
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a failed job - either retry or move to DLQ
|
||||
*/
|
||||
async handleFailedJob(job: Job, error: Error): Promise<void> {
|
||||
const jobKey = `${job.name}:${job.id}`;
|
||||
const currentFailures = (this.failureCount.get(jobKey) || 0) + 1;
|
||||
this.failureCount.set(jobKey, currentFailures);
|
||||
|
||||
logger.warn('Job failed', {
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
attempt: job.attemptsMade,
|
||||
maxAttempts: job.opts.attempts,
|
||||
error: error.message,
|
||||
failureCount: currentFailures,
|
||||
});
|
||||
|
||||
// Check if job should be moved to DLQ
|
||||
if (job.attemptsMade >= (job.opts.attempts || this.config.maxRetries)) {
|
||||
await this.moveToDeadLetterQueue(job, error);
|
||||
this.failureCount.delete(jobKey);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Move job to dead letter queue
|
||||
*/
|
||||
private async moveToDeadLetterQueue(job: Job, error: Error): Promise<void> {
|
||||
try {
|
||||
const dlqData = {
|
||||
originalJob: {
|
||||
id: job.id,
|
||||
name: job.name,
|
||||
data: job.data,
|
||||
opts: job.opts,
|
||||
attemptsMade: job.attemptsMade,
|
||||
failedReason: job.failedReason,
|
||||
processedOn: job.processedOn,
|
||||
timestamp: job.timestamp,
|
||||
},
|
||||
error: {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
name: error.name,
|
||||
},
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
};
|
||||
|
||||
await this.dlq.add('failed-job', dlqData, {
|
||||
removeOnComplete: 100,
|
||||
removeOnFail: 50,
|
||||
});
|
||||
|
||||
logger.error('Job moved to DLQ', {
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
error: error.message,
|
||||
});
|
||||
|
||||
// Check if we need to alert
|
||||
await this.checkAlertThreshold();
|
||||
} catch (dlqError) {
|
||||
logger.error('Failed to move job to DLQ', {
|
||||
jobId: job.id,
|
||||
error: dlqError,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retry jobs from DLQ
|
||||
*/
|
||||
async retryDLQJobs(limit = 10): Promise<number> {
|
||||
const jobs = await this.dlq.getCompleted(0, limit);
|
||||
let retriedCount = 0;
|
||||
|
||||
for (const dlqJob of jobs) {
|
||||
try {
|
||||
const { originalJob } = dlqJob.data;
|
||||
|
||||
// Re-add to main queue with delay
|
||||
await this.mainQueue.add(
|
||||
originalJob.name,
|
||||
originalJob.data,
|
||||
{
|
||||
...originalJob.opts,
|
||||
delay: this.config.retryDelay,
|
||||
attempts: this.config.maxRetries,
|
||||
}
|
||||
);
|
||||
|
||||
// Remove from DLQ
|
||||
await dlqJob.remove();
|
||||
retriedCount++;
|
||||
|
||||
logger.info('Job retried from DLQ', {
|
||||
originalJobId: originalJob.id,
|
||||
jobName: originalJob.name,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to retry DLQ job', {
|
||||
dlqJobId: dlqJob.id,
|
||||
error,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return retriedCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get DLQ statistics
|
||||
*/
|
||||
async getStats(): Promise<{
|
||||
total: number;
|
||||
recent: number;
|
||||
byJobName: Record<string, number>;
|
||||
oldestJob: Date | null;
|
||||
}> {
|
||||
const [completed, failed, waiting] = await Promise.all([
|
||||
this.dlq.getCompleted(),
|
||||
this.dlq.getFailed(),
|
||||
this.dlq.getWaiting(),
|
||||
]);
|
||||
|
||||
const allJobs = [...completed, ...failed, ...waiting];
|
||||
const byJobName: Record<string, number> = {};
|
||||
let oldestTimestamp: number | null = null;
|
||||
|
||||
for (const job of allJobs) {
|
||||
const jobName = job.data.originalJob?.name || 'unknown';
|
||||
byJobName[jobName] = (byJobName[jobName] || 0) + 1;
|
||||
|
||||
if (!oldestTimestamp || job.timestamp < oldestTimestamp) {
|
||||
oldestTimestamp = job.timestamp;
|
||||
}
|
||||
}
|
||||
|
||||
// Count recent jobs (last 24 hours)
|
||||
const oneDayAgo = Date.now() - 24 * 60 * 60 * 1000;
|
||||
const recent = allJobs.filter(job => job.timestamp > oneDayAgo).length;
|
||||
|
||||
return {
|
||||
total: allJobs.length,
|
||||
recent,
|
||||
byJobName,
|
||||
oldestJob: oldestTimestamp ? new Date(oldestTimestamp) : null,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up old DLQ entries
|
||||
*/
|
||||
async cleanup(): Promise<number> {
|
||||
const ageInMs = this.config.cleanupAge * 60 * 60 * 1000;
|
||||
const cutoffTime = Date.now() - ageInMs;
|
||||
|
||||
const jobs = await this.dlq.getCompleted();
|
||||
let removedCount = 0;
|
||||
|
||||
for (const job of jobs) {
|
||||
if (job.timestamp < cutoffTime) {
|
||||
await job.remove();
|
||||
removedCount++;
|
||||
}
|
||||
}
|
||||
|
||||
logger.info('DLQ cleanup completed', {
|
||||
removedCount,
|
||||
cleanupAge: `${this.config.cleanupAge} hours`,
|
||||
});
|
||||
|
||||
return removedCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if alert threshold is exceeded
|
||||
*/
|
||||
private async checkAlertThreshold(): Promise<void> {
|
||||
const stats = await this.getStats();
|
||||
|
||||
if (stats.total >= this.config.alertThreshold) {
|
||||
logger.error('DLQ alert threshold exceeded', {
|
||||
threshold: this.config.alertThreshold,
|
||||
currentCount: stats.total,
|
||||
byJobName: stats.byJobName,
|
||||
});
|
||||
// In a real implementation, this would trigger alerts
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get failed jobs for inspection
|
||||
*/
|
||||
async inspectFailedJobs(limit = 10): Promise<Array<{
|
||||
id: string;
|
||||
name: string;
|
||||
data: unknown;
|
||||
error: unknown;
|
||||
failedAt: string;
|
||||
attempts: number;
|
||||
}>> {
|
||||
const jobs = await this.dlq.getCompleted(0, limit);
|
||||
|
||||
return jobs.map(job => ({
|
||||
id: job.data.originalJob.id,
|
||||
name: job.data.originalJob.name,
|
||||
data: job.data.originalJob.data,
|
||||
error: job.data.error,
|
||||
failedAt: job.data.movedToDLQAt,
|
||||
attempts: job.data.originalJob.attemptsMade,
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown DLQ handler
|
||||
*/
|
||||
async shutdown(): Promise<void> {
|
||||
await this.dlq.close();
|
||||
this.failureCount.clear();
|
||||
}
|
||||
}
|
||||
import { Queue, type Job } from 'bullmq';
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import type { DLQConfig, RedisConfig } from './types';
|
||||
import { getRedisConnection } from './utils';
|
||||
|
||||
const logger = getLogger('dlq-handler');
|
||||
|
||||
export class DeadLetterQueueHandler {
|
||||
private dlq: Queue;
|
||||
private config: Required<DLQConfig>;
|
||||
private failureCount = new Map<string, number>();
|
||||
|
||||
constructor(
|
||||
private mainQueue: Queue,
|
||||
connection: RedisConfig,
|
||||
config: DLQConfig = {}
|
||||
) {
|
||||
this.config = {
|
||||
maxRetries: config.maxRetries ?? 3,
|
||||
retryDelay: config.retryDelay ?? 60000, // 1 minute
|
||||
alertThreshold: config.alertThreshold ?? 100,
|
||||
cleanupAge: config.cleanupAge ?? 168, // 7 days
|
||||
};
|
||||
|
||||
// Create DLQ with same name but -dlq suffix
|
||||
const dlqName = `${mainQueue.name}-dlq`;
|
||||
this.dlq = new Queue(dlqName, { connection: getRedisConnection(connection) });
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a failed job - either retry or move to DLQ
|
||||
*/
|
||||
async handleFailedJob(job: Job, error: Error): Promise<void> {
|
||||
const jobKey = `${job.name}:${job.id}`;
|
||||
const currentFailures = (this.failureCount.get(jobKey) || 0) + 1;
|
||||
this.failureCount.set(jobKey, currentFailures);
|
||||
|
||||
logger.warn('Job failed', {
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
attempt: job.attemptsMade,
|
||||
maxAttempts: job.opts.attempts,
|
||||
error: error.message,
|
||||
failureCount: currentFailures,
|
||||
});
|
||||
|
||||
// Check if job should be moved to DLQ
|
||||
if (job.attemptsMade >= (job.opts.attempts || this.config.maxRetries)) {
|
||||
await this.moveToDeadLetterQueue(job, error);
|
||||
this.failureCount.delete(jobKey);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Move job to dead letter queue
|
||||
*/
|
||||
private async moveToDeadLetterQueue(job: Job, error: Error): Promise<void> {
|
||||
try {
|
||||
const dlqData = {
|
||||
originalJob: {
|
||||
id: job.id,
|
||||
name: job.name,
|
||||
data: job.data,
|
||||
opts: job.opts,
|
||||
attemptsMade: job.attemptsMade,
|
||||
failedReason: job.failedReason,
|
||||
processedOn: job.processedOn,
|
||||
timestamp: job.timestamp,
|
||||
},
|
||||
error: {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
name: error.name,
|
||||
},
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
};
|
||||
|
||||
await this.dlq.add('failed-job', dlqData, {
|
||||
removeOnComplete: 100,
|
||||
removeOnFail: 50,
|
||||
});
|
||||
|
||||
logger.error('Job moved to DLQ', {
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
error: error.message,
|
||||
});
|
||||
|
||||
// Check if we need to alert
|
||||
await this.checkAlertThreshold();
|
||||
} catch (dlqError) {
|
||||
logger.error('Failed to move job to DLQ', {
|
||||
jobId: job.id,
|
||||
error: dlqError,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retry jobs from DLQ
|
||||
*/
|
||||
async retryDLQJobs(limit = 10): Promise<number> {
|
||||
const jobs = await this.dlq.getCompleted(0, limit);
|
||||
let retriedCount = 0;
|
||||
|
||||
for (const dlqJob of jobs) {
|
||||
try {
|
||||
const { originalJob } = dlqJob.data;
|
||||
|
||||
// Re-add to main queue with delay
|
||||
await this.mainQueue.add(originalJob.name, originalJob.data, {
|
||||
...originalJob.opts,
|
||||
delay: this.config.retryDelay,
|
||||
attempts: this.config.maxRetries,
|
||||
});
|
||||
|
||||
// Remove from DLQ
|
||||
await dlqJob.remove();
|
||||
retriedCount++;
|
||||
|
||||
logger.info('Job retried from DLQ', {
|
||||
originalJobId: originalJob.id,
|
||||
jobName: originalJob.name,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to retry DLQ job', {
|
||||
dlqJobId: dlqJob.id,
|
||||
error,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return retriedCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get DLQ statistics
|
||||
*/
|
||||
async getStats(): Promise<{
|
||||
total: number;
|
||||
recent: number;
|
||||
byJobName: Record<string, number>;
|
||||
oldestJob: Date | null;
|
||||
}> {
|
||||
const [completed, failed, waiting] = await Promise.all([
|
||||
this.dlq.getCompleted(),
|
||||
this.dlq.getFailed(),
|
||||
this.dlq.getWaiting(),
|
||||
]);
|
||||
|
||||
const allJobs = [...completed, ...failed, ...waiting];
|
||||
const byJobName: Record<string, number> = {};
|
||||
let oldestTimestamp: number | null = null;
|
||||
|
||||
for (const job of allJobs) {
|
||||
const jobName = job.data.originalJob?.name || 'unknown';
|
||||
byJobName[jobName] = (byJobName[jobName] || 0) + 1;
|
||||
|
||||
if (!oldestTimestamp || job.timestamp < oldestTimestamp) {
|
||||
oldestTimestamp = job.timestamp;
|
||||
}
|
||||
}
|
||||
|
||||
// Count recent jobs (last 24 hours)
|
||||
const oneDayAgo = Date.now() - 24 * 60 * 60 * 1000;
|
||||
const recent = allJobs.filter(job => job.timestamp > oneDayAgo).length;
|
||||
|
||||
return {
|
||||
total: allJobs.length,
|
||||
recent,
|
||||
byJobName,
|
||||
oldestJob: oldestTimestamp ? new Date(oldestTimestamp) : null,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up old DLQ entries
|
||||
*/
|
||||
async cleanup(): Promise<number> {
|
||||
const ageInMs = this.config.cleanupAge * 60 * 60 * 1000;
|
||||
const cutoffTime = Date.now() - ageInMs;
|
||||
|
||||
const jobs = await this.dlq.getCompleted();
|
||||
let removedCount = 0;
|
||||
|
||||
for (const job of jobs) {
|
||||
if (job.timestamp < cutoffTime) {
|
||||
await job.remove();
|
||||
removedCount++;
|
||||
}
|
||||
}
|
||||
|
||||
logger.info('DLQ cleanup completed', {
|
||||
removedCount,
|
||||
cleanupAge: `${this.config.cleanupAge} hours`,
|
||||
});
|
||||
|
||||
return removedCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if alert threshold is exceeded
|
||||
*/
|
||||
private async checkAlertThreshold(): Promise<void> {
|
||||
const stats = await this.getStats();
|
||||
|
||||
if (stats.total >= this.config.alertThreshold) {
|
||||
logger.error('DLQ alert threshold exceeded', {
|
||||
threshold: this.config.alertThreshold,
|
||||
currentCount: stats.total,
|
||||
byJobName: stats.byJobName,
|
||||
});
|
||||
// In a real implementation, this would trigger alerts
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get failed jobs for inspection
|
||||
*/
|
||||
async inspectFailedJobs(limit = 10): Promise<
|
||||
Array<{
|
||||
id: string;
|
||||
name: string;
|
||||
data: unknown;
|
||||
error: unknown;
|
||||
failedAt: string;
|
||||
attempts: number;
|
||||
}>
|
||||
> {
|
||||
const jobs = await this.dlq.getCompleted(0, limit);
|
||||
|
||||
return jobs.map(job => ({
|
||||
id: job.data.originalJob.id,
|
||||
name: job.data.originalJob.name,
|
||||
data: job.data.originalJob.data,
|
||||
error: job.data.error,
|
||||
failedAt: job.data.movedToDLQAt,
|
||||
attempts: job.data.originalJob.attemptsMade,
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown DLQ handler
|
||||
*/
|
||||
async shutdown(): Promise<void> {
|
||||
await this.dlq.close();
|
||||
this.failureCount.clear();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,34 +26,33 @@ export type {
|
|||
QueueOptions,
|
||||
QueueStats,
|
||||
GlobalStats,
|
||||
|
||||
|
||||
// Batch processing types
|
||||
BatchResult,
|
||||
ProcessOptions,
|
||||
BatchJobData,
|
||||
|
||||
|
||||
// Handler types
|
||||
JobHandler,
|
||||
TypedJobHandler,
|
||||
HandlerConfig,
|
||||
HandlerConfigWithSchedule,
|
||||
HandlerInitializer,
|
||||
|
||||
|
||||
// Configuration types
|
||||
RedisConfig,
|
||||
QueueConfig,
|
||||
QueueManagerConfig,
|
||||
|
||||
|
||||
// Rate limiting types
|
||||
RateLimitConfig,
|
||||
RateLimitRule,
|
||||
|
||||
|
||||
// DLQ types
|
||||
DLQConfig,
|
||||
DLQJobInfo,
|
||||
|
||||
|
||||
// Scheduled job types
|
||||
ScheduledJob,
|
||||
ScheduleConfig,
|
||||
} from './types';
|
||||
|
||||
|
|
|
|||
|
|
@ -130,7 +130,8 @@ export class QueueManager {
|
|||
const queueConfig: QueueWorkerConfig = {
|
||||
workers: mergedOptions.workers,
|
||||
concurrency: mergedOptions.concurrency,
|
||||
startWorker: !!mergedOptions.workers && mergedOptions.workers > 0 && !this.config.delayWorkerStart,
|
||||
startWorker:
|
||||
!!mergedOptions.workers && mergedOptions.workers > 0 && !this.config.delayWorkerStart,
|
||||
};
|
||||
|
||||
const queue = new Queue(
|
||||
|
|
@ -443,7 +444,9 @@ export class QueueManager {
|
|||
*/
|
||||
startAllWorkers(): void {
|
||||
if (!this.config.delayWorkerStart) {
|
||||
logger.info('startAllWorkers() called but workers already started automatically (delayWorkerStart is false)');
|
||||
logger.info(
|
||||
'startAllWorkers() called but workers already started automatically (delayWorkerStart is false)'
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -451,17 +454,17 @@ export class QueueManager {
|
|||
for (const queue of this.queues.values()) {
|
||||
const workerCount = this.config.defaultQueueOptions?.workers || 1;
|
||||
const concurrency = this.config.defaultQueueOptions?.concurrency || 1;
|
||||
|
||||
|
||||
if (workerCount > 0) {
|
||||
queue.startWorkersManually(workerCount, concurrency);
|
||||
workersStarted++;
|
||||
}
|
||||
}
|
||||
|
||||
logger.info('All workers started', {
|
||||
logger.info('All workers started', {
|
||||
totalQueues: this.queues.size,
|
||||
queuesWithWorkers: workersStarted,
|
||||
delayWorkerStart: this.config.delayWorkerStart
|
||||
delayWorkerStart: this.config.delayWorkerStart,
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,314 +1,318 @@
|
|||
import { Queue, QueueEvents } from 'bullmq';
|
||||
// import { getLogger } from '@stock-bot/logger';
|
||||
|
||||
// const logger = getLogger('queue-metrics');
|
||||
|
||||
export interface QueueMetrics {
|
||||
// Job counts
|
||||
waiting: number;
|
||||
active: number;
|
||||
completed: number;
|
||||
failed: number;
|
||||
delayed: number;
|
||||
paused?: number;
|
||||
|
||||
// Performance metrics
|
||||
processingTime: {
|
||||
avg: number;
|
||||
min: number;
|
||||
max: number;
|
||||
p95: number;
|
||||
p99: number;
|
||||
};
|
||||
|
||||
// Throughput
|
||||
throughput: {
|
||||
completedPerMinute: number;
|
||||
failedPerMinute: number;
|
||||
totalPerMinute: number;
|
||||
};
|
||||
|
||||
// Job age
|
||||
oldestWaitingJob: Date | null;
|
||||
|
||||
// Health
|
||||
isHealthy: boolean;
|
||||
healthIssues: string[];
|
||||
}
|
||||
|
||||
export class QueueMetricsCollector {
|
||||
private processingTimes: number[] = [];
|
||||
private completedTimestamps: number[] = [];
|
||||
private failedTimestamps: number[] = [];
|
||||
private jobStartTimes = new Map<string, number>();
|
||||
private readonly maxSamples = 1000;
|
||||
private readonly metricsInterval = 60000; // 1 minute
|
||||
|
||||
constructor(
|
||||
private queue: Queue,
|
||||
private queueEvents: QueueEvents
|
||||
) {
|
||||
this.setupEventListeners();
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup event listeners for metrics collection
|
||||
*/
|
||||
private setupEventListeners(): void {
|
||||
this.queueEvents.on('completed', () => {
|
||||
// Record completion
|
||||
this.completedTimestamps.push(Date.now());
|
||||
this.cleanupOldTimestamps();
|
||||
});
|
||||
|
||||
this.queueEvents.on('failed', () => {
|
||||
// Record failure
|
||||
this.failedTimestamps.push(Date.now());
|
||||
this.cleanupOldTimestamps();
|
||||
});
|
||||
|
||||
// Track processing times
|
||||
this.queueEvents.on('active', ({ jobId }) => {
|
||||
this.jobStartTimes.set(jobId, Date.now());
|
||||
});
|
||||
|
||||
this.queueEvents.on('completed', ({ jobId }) => {
|
||||
const startTime = this.jobStartTimes.get(jobId);
|
||||
if (startTime) {
|
||||
const processingTime = Date.now() - startTime;
|
||||
this.recordProcessingTime(processingTime);
|
||||
this.jobStartTimes.delete(jobId);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Record processing time
|
||||
*/
|
||||
private recordProcessingTime(time: number): void {
|
||||
this.processingTimes.push(time);
|
||||
|
||||
// Keep only recent samples
|
||||
if (this.processingTimes.length > this.maxSamples) {
|
||||
this.processingTimes = this.processingTimes.slice(-this.maxSamples);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up old timestamps
|
||||
*/
|
||||
private cleanupOldTimestamps(): void {
|
||||
const cutoff = Date.now() - this.metricsInterval;
|
||||
|
||||
this.completedTimestamps = this.completedTimestamps.filter(ts => ts > cutoff);
|
||||
this.failedTimestamps = this.failedTimestamps.filter(ts => ts > cutoff);
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect current metrics
|
||||
*/
|
||||
async collect(): Promise<QueueMetrics> {
|
||||
// Get job counts
|
||||
const [waiting, active, completed, failed, delayed] = await Promise.all([
|
||||
this.queue.getWaitingCount(),
|
||||
this.queue.getActiveCount(),
|
||||
this.queue.getCompletedCount(),
|
||||
this.queue.getFailedCount(),
|
||||
this.queue.getDelayedCount(),
|
||||
]);
|
||||
|
||||
// BullMQ doesn't have getPausedCount, check if queue is paused
|
||||
const paused = await this.queue.isPaused() ? waiting : 0;
|
||||
|
||||
// Calculate processing time metrics
|
||||
const processingTime = this.calculateProcessingTimeMetrics();
|
||||
|
||||
// Calculate throughput
|
||||
const throughput = this.calculateThroughput();
|
||||
|
||||
// Get oldest waiting job
|
||||
const oldestWaitingJob = await this.getOldestWaitingJob();
|
||||
|
||||
// Check health
|
||||
const { isHealthy, healthIssues } = this.checkHealth({
|
||||
waiting,
|
||||
active,
|
||||
failed,
|
||||
processingTime,
|
||||
});
|
||||
|
||||
return {
|
||||
waiting,
|
||||
active,
|
||||
completed,
|
||||
failed,
|
||||
delayed,
|
||||
paused,
|
||||
processingTime,
|
||||
throughput,
|
||||
oldestWaitingJob,
|
||||
isHealthy,
|
||||
healthIssues,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate processing time metrics
|
||||
*/
|
||||
private calculateProcessingTimeMetrics(): QueueMetrics['processingTime'] {
|
||||
if (this.processingTimes.length === 0) {
|
||||
return { avg: 0, min: 0, max: 0, p95: 0, p99: 0 };
|
||||
}
|
||||
|
||||
const sorted = [...this.processingTimes].sort((a, b) => a - b);
|
||||
const sum = sorted.reduce((acc, val) => acc + val, 0);
|
||||
|
||||
return {
|
||||
avg: sorted.length > 0 ? Math.round(sum / sorted.length) : 0,
|
||||
min: sorted[0] || 0,
|
||||
max: sorted[sorted.length - 1] || 0,
|
||||
p95: sorted[Math.floor(sorted.length * 0.95)] || 0,
|
||||
p99: sorted[Math.floor(sorted.length * 0.99)] || 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate throughput metrics
|
||||
*/
|
||||
private calculateThroughput(): QueueMetrics['throughput'] {
|
||||
const now = Date.now();
|
||||
const oneMinuteAgo = now - 60000;
|
||||
|
||||
const completedPerMinute = this.completedTimestamps.filter(ts => ts > oneMinuteAgo).length;
|
||||
const failedPerMinute = this.failedTimestamps.filter(ts => ts > oneMinuteAgo).length;
|
||||
|
||||
return {
|
||||
completedPerMinute,
|
||||
failedPerMinute,
|
||||
totalPerMinute: completedPerMinute + failedPerMinute,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get oldest waiting job
|
||||
*/
|
||||
private async getOldestWaitingJob(): Promise<Date | null> {
|
||||
const waitingJobs = await this.queue.getWaiting(0, 1);
|
||||
|
||||
if (waitingJobs.length > 0) {
|
||||
return new Date(waitingJobs[0].timestamp);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check queue health
|
||||
*/
|
||||
private checkHealth(metrics: {
|
||||
waiting: number;
|
||||
active: number;
|
||||
failed: number;
|
||||
processingTime: QueueMetrics['processingTime'];
|
||||
}): { isHealthy: boolean; healthIssues: string[] } {
|
||||
const issues: string[] = [];
|
||||
|
||||
// Check for high failure rate
|
||||
const failureRate = metrics.failed / (metrics.failed + this.completedTimestamps.length);
|
||||
if (failureRate > 0.1) {
|
||||
issues.push(`High failure rate: ${(failureRate * 100).toFixed(1)}%`);
|
||||
}
|
||||
|
||||
// Check for queue backlog
|
||||
if (metrics.waiting > 1000) {
|
||||
issues.push(`Large queue backlog: ${metrics.waiting} jobs waiting`);
|
||||
}
|
||||
|
||||
// Check for slow processing
|
||||
if (metrics.processingTime.avg > 30000) { // 30 seconds
|
||||
issues.push(`Slow average processing time: ${(metrics.processingTime.avg / 1000).toFixed(1)}s`);
|
||||
}
|
||||
|
||||
// Check for stalled active jobs
|
||||
if (metrics.active > 100) {
|
||||
issues.push(`High number of active jobs: ${metrics.active}`);
|
||||
}
|
||||
|
||||
return {
|
||||
isHealthy: issues.length === 0,
|
||||
healthIssues: issues,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get formatted metrics report
|
||||
*/
|
||||
async getReport(): Promise<string> {
|
||||
const metrics = await this.collect();
|
||||
|
||||
return `
|
||||
Queue Metrics Report
|
||||
===================
|
||||
Status: ${metrics.isHealthy ? '✅ Healthy' : '⚠️ Issues Detected'}
|
||||
|
||||
Job Counts:
|
||||
- Waiting: ${metrics.waiting}
|
||||
- Active: ${metrics.active}
|
||||
- Completed: ${metrics.completed}
|
||||
- Failed: ${metrics.failed}
|
||||
- Delayed: ${metrics.delayed}
|
||||
- Paused: ${metrics.paused}
|
||||
|
||||
Performance:
|
||||
- Avg Processing Time: ${(metrics.processingTime.avg / 1000).toFixed(2)}s
|
||||
- Min/Max: ${(metrics.processingTime.min / 1000).toFixed(2)}s / ${(metrics.processingTime.max / 1000).toFixed(2)}s
|
||||
- P95/P99: ${(metrics.processingTime.p95 / 1000).toFixed(2)}s / ${(metrics.processingTime.p99 / 1000).toFixed(2)}s
|
||||
|
||||
Throughput:
|
||||
- Completed/min: ${metrics.throughput.completedPerMinute}
|
||||
- Failed/min: ${metrics.throughput.failedPerMinute}
|
||||
- Total/min: ${metrics.throughput.totalPerMinute}
|
||||
|
||||
${metrics.oldestWaitingJob ? `Oldest Waiting Job: ${metrics.oldestWaitingJob.toISOString()}` : 'No waiting jobs'}
|
||||
|
||||
${metrics.healthIssues.length > 0 ? `\nHealth Issues:\n${metrics.healthIssues.map(issue => `- ${issue}`).join('\n')}` : ''}
|
||||
`.trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Export metrics in Prometheus format
|
||||
*/
|
||||
async getPrometheusMetrics(): Promise<string> {
|
||||
const metrics = await this.collect();
|
||||
const queueName = this.queue.name;
|
||||
|
||||
return `
|
||||
# HELP queue_jobs_total Total number of jobs by status
|
||||
# TYPE queue_jobs_total gauge
|
||||
queue_jobs_total{queue="${queueName}",status="waiting"} ${metrics.waiting}
|
||||
queue_jobs_total{queue="${queueName}",status="active"} ${metrics.active}
|
||||
queue_jobs_total{queue="${queueName}",status="completed"} ${metrics.completed}
|
||||
queue_jobs_total{queue="${queueName}",status="failed"} ${metrics.failed}
|
||||
queue_jobs_total{queue="${queueName}",status="delayed"} ${metrics.delayed}
|
||||
queue_jobs_total{queue="${queueName}",status="paused"} ${metrics.paused}
|
||||
|
||||
# HELP queue_processing_time_seconds Job processing time in seconds
|
||||
# TYPE queue_processing_time_seconds summary
|
||||
queue_processing_time_seconds{queue="${queueName}",quantile="0.5"} ${(metrics.processingTime.avg / 1000).toFixed(3)}
|
||||
queue_processing_time_seconds{queue="${queueName}",quantile="0.95"} ${(metrics.processingTime.p95 / 1000).toFixed(3)}
|
||||
queue_processing_time_seconds{queue="${queueName}",quantile="0.99"} ${(metrics.processingTime.p99 / 1000).toFixed(3)}
|
||||
queue_processing_time_seconds_sum{queue="${queueName}"} ${(metrics.processingTime.avg * this.processingTimes.length / 1000).toFixed(3)}
|
||||
queue_processing_time_seconds_count{queue="${queueName}"} ${this.processingTimes.length}
|
||||
|
||||
# HELP queue_throughput_per_minute Jobs processed per minute
|
||||
# TYPE queue_throughput_per_minute gauge
|
||||
queue_throughput_per_minute{queue="${queueName}",status="completed"} ${metrics.throughput.completedPerMinute}
|
||||
queue_throughput_per_minute{queue="${queueName}",status="failed"} ${metrics.throughput.failedPerMinute}
|
||||
queue_throughput_per_minute{queue="${queueName}",status="total"} ${metrics.throughput.totalPerMinute}
|
||||
|
||||
# HELP queue_health Queue health status
|
||||
# TYPE queue_health gauge
|
||||
queue_health{queue="${queueName}"} ${metrics.isHealthy ? 1 : 0}
|
||||
`.trim();
|
||||
}
|
||||
}
|
||||
import { Queue, QueueEvents } from 'bullmq';
|
||||
|
||||
// import { getLogger } from '@stock-bot/logger';
|
||||
|
||||
// const logger = getLogger('queue-metrics');
|
||||
|
||||
export interface QueueMetrics {
|
||||
// Job counts
|
||||
waiting: number;
|
||||
active: number;
|
||||
completed: number;
|
||||
failed: number;
|
||||
delayed: number;
|
||||
paused?: number;
|
||||
|
||||
// Performance metrics
|
||||
processingTime: {
|
||||
avg: number;
|
||||
min: number;
|
||||
max: number;
|
||||
p95: number;
|
||||
p99: number;
|
||||
};
|
||||
|
||||
// Throughput
|
||||
throughput: {
|
||||
completedPerMinute: number;
|
||||
failedPerMinute: number;
|
||||
totalPerMinute: number;
|
||||
};
|
||||
|
||||
// Job age
|
||||
oldestWaitingJob: Date | null;
|
||||
|
||||
// Health
|
||||
isHealthy: boolean;
|
||||
healthIssues: string[];
|
||||
}
|
||||
|
||||
export class QueueMetricsCollector {
|
||||
private processingTimes: number[] = [];
|
||||
private completedTimestamps: number[] = [];
|
||||
private failedTimestamps: number[] = [];
|
||||
private jobStartTimes = new Map<string, number>();
|
||||
private readonly maxSamples = 1000;
|
||||
private readonly metricsInterval = 60000; // 1 minute
|
||||
|
||||
constructor(
|
||||
private queue: Queue,
|
||||
private queueEvents: QueueEvents
|
||||
) {
|
||||
this.setupEventListeners();
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup event listeners for metrics collection
|
||||
*/
|
||||
private setupEventListeners(): void {
|
||||
this.queueEvents.on('completed', () => {
|
||||
// Record completion
|
||||
this.completedTimestamps.push(Date.now());
|
||||
this.cleanupOldTimestamps();
|
||||
});
|
||||
|
||||
this.queueEvents.on('failed', () => {
|
||||
// Record failure
|
||||
this.failedTimestamps.push(Date.now());
|
||||
this.cleanupOldTimestamps();
|
||||
});
|
||||
|
||||
// Track processing times
|
||||
this.queueEvents.on('active', ({ jobId }) => {
|
||||
this.jobStartTimes.set(jobId, Date.now());
|
||||
});
|
||||
|
||||
this.queueEvents.on('completed', ({ jobId }) => {
|
||||
const startTime = this.jobStartTimes.get(jobId);
|
||||
if (startTime) {
|
||||
const processingTime = Date.now() - startTime;
|
||||
this.recordProcessingTime(processingTime);
|
||||
this.jobStartTimes.delete(jobId);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Record processing time
|
||||
*/
|
||||
private recordProcessingTime(time: number): void {
|
||||
this.processingTimes.push(time);
|
||||
|
||||
// Keep only recent samples
|
||||
if (this.processingTimes.length > this.maxSamples) {
|
||||
this.processingTimes = this.processingTimes.slice(-this.maxSamples);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up old timestamps
|
||||
*/
|
||||
private cleanupOldTimestamps(): void {
|
||||
const cutoff = Date.now() - this.metricsInterval;
|
||||
|
||||
this.completedTimestamps = this.completedTimestamps.filter(ts => ts > cutoff);
|
||||
this.failedTimestamps = this.failedTimestamps.filter(ts => ts > cutoff);
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect current metrics
|
||||
*/
|
||||
async collect(): Promise<QueueMetrics> {
|
||||
// Get job counts
|
||||
const [waiting, active, completed, failed, delayed] = await Promise.all([
|
||||
this.queue.getWaitingCount(),
|
||||
this.queue.getActiveCount(),
|
||||
this.queue.getCompletedCount(),
|
||||
this.queue.getFailedCount(),
|
||||
this.queue.getDelayedCount(),
|
||||
]);
|
||||
|
||||
// BullMQ doesn't have getPausedCount, check if queue is paused
|
||||
const paused = (await this.queue.isPaused()) ? waiting : 0;
|
||||
|
||||
// Calculate processing time metrics
|
||||
const processingTime = this.calculateProcessingTimeMetrics();
|
||||
|
||||
// Calculate throughput
|
||||
const throughput = this.calculateThroughput();
|
||||
|
||||
// Get oldest waiting job
|
||||
const oldestWaitingJob = await this.getOldestWaitingJob();
|
||||
|
||||
// Check health
|
||||
const { isHealthy, healthIssues } = this.checkHealth({
|
||||
waiting,
|
||||
active,
|
||||
failed,
|
||||
processingTime,
|
||||
});
|
||||
|
||||
return {
|
||||
waiting,
|
||||
active,
|
||||
completed,
|
||||
failed,
|
||||
delayed,
|
||||
paused,
|
||||
processingTime,
|
||||
throughput,
|
||||
oldestWaitingJob,
|
||||
isHealthy,
|
||||
healthIssues,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate processing time metrics
|
||||
*/
|
||||
private calculateProcessingTimeMetrics(): QueueMetrics['processingTime'] {
|
||||
if (this.processingTimes.length === 0) {
|
||||
return { avg: 0, min: 0, max: 0, p95: 0, p99: 0 };
|
||||
}
|
||||
|
||||
const sorted = [...this.processingTimes].sort((a, b) => a - b);
|
||||
const sum = sorted.reduce((acc, val) => acc + val, 0);
|
||||
|
||||
return {
|
||||
avg: sorted.length > 0 ? Math.round(sum / sorted.length) : 0,
|
||||
min: sorted[0] || 0,
|
||||
max: sorted[sorted.length - 1] || 0,
|
||||
p95: sorted[Math.floor(sorted.length * 0.95)] || 0,
|
||||
p99: sorted[Math.floor(sorted.length * 0.99)] || 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate throughput metrics
|
||||
*/
|
||||
private calculateThroughput(): QueueMetrics['throughput'] {
|
||||
const now = Date.now();
|
||||
const oneMinuteAgo = now - 60000;
|
||||
|
||||
const completedPerMinute = this.completedTimestamps.filter(ts => ts > oneMinuteAgo).length;
|
||||
const failedPerMinute = this.failedTimestamps.filter(ts => ts > oneMinuteAgo).length;
|
||||
|
||||
return {
|
||||
completedPerMinute,
|
||||
failedPerMinute,
|
||||
totalPerMinute: completedPerMinute + failedPerMinute,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get oldest waiting job
|
||||
*/
|
||||
private async getOldestWaitingJob(): Promise<Date | null> {
|
||||
const waitingJobs = await this.queue.getWaiting(0, 1);
|
||||
|
||||
if (waitingJobs.length > 0) {
|
||||
return new Date(waitingJobs[0].timestamp);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check queue health
|
||||
*/
|
||||
private checkHealth(metrics: {
|
||||
waiting: number;
|
||||
active: number;
|
||||
failed: number;
|
||||
processingTime: QueueMetrics['processingTime'];
|
||||
}): { isHealthy: boolean; healthIssues: string[] } {
|
||||
const issues: string[] = [];
|
||||
|
||||
// Check for high failure rate
|
||||
const failureRate = metrics.failed / (metrics.failed + this.completedTimestamps.length);
|
||||
if (failureRate > 0.1) {
|
||||
issues.push(`High failure rate: ${(failureRate * 100).toFixed(1)}%`);
|
||||
}
|
||||
|
||||
// Check for queue backlog
|
||||
if (metrics.waiting > 1000) {
|
||||
issues.push(`Large queue backlog: ${metrics.waiting} jobs waiting`);
|
||||
}
|
||||
|
||||
// Check for slow processing
|
||||
if (metrics.processingTime.avg > 30000) {
|
||||
// 30 seconds
|
||||
issues.push(
|
||||
`Slow average processing time: ${(metrics.processingTime.avg / 1000).toFixed(1)}s`
|
||||
);
|
||||
}
|
||||
|
||||
// Check for stalled active jobs
|
||||
if (metrics.active > 100) {
|
||||
issues.push(`High number of active jobs: ${metrics.active}`);
|
||||
}
|
||||
|
||||
return {
|
||||
isHealthy: issues.length === 0,
|
||||
healthIssues: issues,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get formatted metrics report
|
||||
*/
|
||||
async getReport(): Promise<string> {
|
||||
const metrics = await this.collect();
|
||||
|
||||
return `
|
||||
Queue Metrics Report
|
||||
===================
|
||||
Status: ${metrics.isHealthy ? '✅ Healthy' : '⚠️ Issues Detected'}
|
||||
|
||||
Job Counts:
|
||||
- Waiting: ${metrics.waiting}
|
||||
- Active: ${metrics.active}
|
||||
- Completed: ${metrics.completed}
|
||||
- Failed: ${metrics.failed}
|
||||
- Delayed: ${metrics.delayed}
|
||||
- Paused: ${metrics.paused}
|
||||
|
||||
Performance:
|
||||
- Avg Processing Time: ${(metrics.processingTime.avg / 1000).toFixed(2)}s
|
||||
- Min/Max: ${(metrics.processingTime.min / 1000).toFixed(2)}s / ${(metrics.processingTime.max / 1000).toFixed(2)}s
|
||||
- P95/P99: ${(metrics.processingTime.p95 / 1000).toFixed(2)}s / ${(metrics.processingTime.p99 / 1000).toFixed(2)}s
|
||||
|
||||
Throughput:
|
||||
- Completed/min: ${metrics.throughput.completedPerMinute}
|
||||
- Failed/min: ${metrics.throughput.failedPerMinute}
|
||||
- Total/min: ${metrics.throughput.totalPerMinute}
|
||||
|
||||
${metrics.oldestWaitingJob ? `Oldest Waiting Job: ${metrics.oldestWaitingJob.toISOString()}` : 'No waiting jobs'}
|
||||
|
||||
${metrics.healthIssues.length > 0 ? `\nHealth Issues:\n${metrics.healthIssues.map(issue => `- ${issue}`).join('\n')}` : ''}
|
||||
`.trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Export metrics in Prometheus format
|
||||
*/
|
||||
async getPrometheusMetrics(): Promise<string> {
|
||||
const metrics = await this.collect();
|
||||
const queueName = this.queue.name;
|
||||
|
||||
return `
|
||||
# HELP queue_jobs_total Total number of jobs by status
|
||||
# TYPE queue_jobs_total gauge
|
||||
queue_jobs_total{queue="${queueName}",status="waiting"} ${metrics.waiting}
|
||||
queue_jobs_total{queue="${queueName}",status="active"} ${metrics.active}
|
||||
queue_jobs_total{queue="${queueName}",status="completed"} ${metrics.completed}
|
||||
queue_jobs_total{queue="${queueName}",status="failed"} ${metrics.failed}
|
||||
queue_jobs_total{queue="${queueName}",status="delayed"} ${metrics.delayed}
|
||||
queue_jobs_total{queue="${queueName}",status="paused"} ${metrics.paused}
|
||||
|
||||
# HELP queue_processing_time_seconds Job processing time in seconds
|
||||
# TYPE queue_processing_time_seconds summary
|
||||
queue_processing_time_seconds{queue="${queueName}",quantile="0.5"} ${(metrics.processingTime.avg / 1000).toFixed(3)}
|
||||
queue_processing_time_seconds{queue="${queueName}",quantile="0.95"} ${(metrics.processingTime.p95 / 1000).toFixed(3)}
|
||||
queue_processing_time_seconds{queue="${queueName}",quantile="0.99"} ${(metrics.processingTime.p99 / 1000).toFixed(3)}
|
||||
queue_processing_time_seconds_sum{queue="${queueName}"} ${((metrics.processingTime.avg * this.processingTimes.length) / 1000).toFixed(3)}
|
||||
queue_processing_time_seconds_count{queue="${queueName}"} ${this.processingTimes.length}
|
||||
|
||||
# HELP queue_throughput_per_minute Jobs processed per minute
|
||||
# TYPE queue_throughput_per_minute gauge
|
||||
queue_throughput_per_minute{queue="${queueName}",status="completed"} ${metrics.throughput.completedPerMinute}
|
||||
queue_throughput_per_minute{queue="${queueName}",status="failed"} ${metrics.throughput.failedPerMinute}
|
||||
queue_throughput_per_minute{queue="${queueName}",status="total"} ${metrics.throughput.totalPerMinute}
|
||||
|
||||
# HELP queue_health Queue health status
|
||||
# TYPE queue_health gauge
|
||||
queue_health{queue="${queueName}"} ${metrics.isHealthy ? 1 : 0}
|
||||
`.trim();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,372 +1,372 @@
|
|||
import { Queue as BullQueue, QueueEvents, Worker, type Job } from 'bullmq';
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import { handlerRegistry } from '@stock-bot/types';
|
||||
import type { JobData, JobOptions, QueueStats, RedisConfig } from './types';
|
||||
import { getRedisConnection } from './utils';
|
||||
|
||||
const logger = getLogger('queue');
|
||||
|
||||
export interface QueueWorkerConfig {
|
||||
workers?: number;
|
||||
concurrency?: number;
|
||||
startWorker?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Consolidated Queue class that handles both job operations and optional worker management
|
||||
* Can be used as a simple job queue or with workers for automatic processing
|
||||
*/
|
||||
export class Queue {
|
||||
private bullQueue: BullQueue;
|
||||
private workers: Worker[] = [];
|
||||
private queueEvents?: QueueEvents;
|
||||
private queueName: string;
|
||||
private redisConfig: RedisConfig;
|
||||
|
||||
constructor(
|
||||
queueName: string,
|
||||
redisConfig: RedisConfig,
|
||||
defaultJobOptions: JobOptions = {},
|
||||
config: QueueWorkerConfig = {}
|
||||
) {
|
||||
this.queueName = queueName;
|
||||
this.redisConfig = redisConfig;
|
||||
|
||||
const connection = getRedisConnection(redisConfig);
|
||||
|
||||
// Initialize BullMQ queue
|
||||
this.bullQueue = new BullQueue(`{${queueName}}`, {
|
||||
connection,
|
||||
defaultJobOptions: {
|
||||
removeOnComplete: 10,
|
||||
removeOnFail: 5,
|
||||
attempts: 3,
|
||||
backoff: {
|
||||
type: 'exponential',
|
||||
delay: 1000,
|
||||
},
|
||||
...defaultJobOptions,
|
||||
},
|
||||
});
|
||||
|
||||
// Initialize queue events if workers will be used
|
||||
if (config.workers && config.workers > 0) {
|
||||
this.queueEvents = new QueueEvents(`{${queueName}}`, { connection });
|
||||
}
|
||||
|
||||
// Start workers if requested and not explicitly disabled
|
||||
if (config.workers && config.workers > 0 && config.startWorker !== false) {
|
||||
this.startWorkers(config.workers, config.concurrency || 1);
|
||||
}
|
||||
|
||||
logger.trace('Queue created', {
|
||||
queueName,
|
||||
workers: config.workers || 0,
|
||||
concurrency: config.concurrency || 1,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the queue name
|
||||
*/
|
||||
getName(): string {
|
||||
return this.queueName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single job to the queue
|
||||
*/
|
||||
async add(name: string, data: JobData, options: JobOptions = {}): Promise<Job> {
|
||||
logger.trace('Adding job', { queueName: this.queueName, jobName: name });
|
||||
return await this.bullQueue.add(name, data, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add multiple jobs to the queue in bulk
|
||||
*/
|
||||
async addBulk(jobs: Array<{ name: string; data: JobData; opts?: JobOptions }>): Promise<Job[]> {
|
||||
logger.trace('Adding bulk jobs', {
|
||||
queueName: this.queueName,
|
||||
jobCount: jobs.length,
|
||||
});
|
||||
return await this.bullQueue.addBulk(jobs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a scheduled job with cron-like pattern
|
||||
*/
|
||||
async addScheduledJob(
|
||||
name: string,
|
||||
data: JobData,
|
||||
cronPattern: string,
|
||||
options: JobOptions = {}
|
||||
): Promise<Job> {
|
||||
const scheduledOptions: JobOptions = {
|
||||
...options,
|
||||
repeat: {
|
||||
pattern: cronPattern,
|
||||
// Use job name as repeat key to prevent duplicates
|
||||
key: `${this.queueName}:${name}`,
|
||||
...options.repeat,
|
||||
},
|
||||
};
|
||||
|
||||
logger.info('Adding scheduled job', {
|
||||
queueName: this.queueName,
|
||||
jobName: name,
|
||||
cronPattern,
|
||||
repeatKey: scheduledOptions.repeat?.key,
|
||||
immediately: scheduledOptions.repeat?.immediately,
|
||||
});
|
||||
|
||||
return await this.bullQueue.add(name, data, scheduledOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get queue statistics
|
||||
*/
|
||||
async getStats(): Promise<QueueStats> {
|
||||
const [waiting, active, completed, failed, delayed] = await Promise.all([
|
||||
this.bullQueue.getWaiting(),
|
||||
this.bullQueue.getActive(),
|
||||
this.bullQueue.getCompleted(),
|
||||
this.bullQueue.getFailed(),
|
||||
this.bullQueue.getDelayed(),
|
||||
]);
|
||||
|
||||
const isPaused = await this.bullQueue.isPaused();
|
||||
|
||||
return {
|
||||
waiting: waiting.length,
|
||||
active: active.length,
|
||||
completed: completed.length,
|
||||
failed: failed.length,
|
||||
delayed: delayed.length,
|
||||
paused: isPaused,
|
||||
workers: this.workers.length,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific job by ID
|
||||
*/
|
||||
async getJob(jobId: string): Promise<Job | undefined> {
|
||||
return await this.bullQueue.getJob(jobId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get jobs by state
|
||||
*/
|
||||
async getJobs(
|
||||
states: Array<'waiting' | 'active' | 'completed' | 'failed' | 'delayed'>,
|
||||
start = 0,
|
||||
end = 100
|
||||
): Promise<Job[]> {
|
||||
return await this.bullQueue.getJobs(states, start, end);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pause the queue (stops processing new jobs)
|
||||
*/
|
||||
async pause(): Promise<void> {
|
||||
await this.bullQueue.pause();
|
||||
logger.info('Queue paused', { queueName: this.queueName });
|
||||
}
|
||||
|
||||
/**
|
||||
* Resume the queue
|
||||
*/
|
||||
async resume(): Promise<void> {
|
||||
await this.bullQueue.resume();
|
||||
logger.info('Queue resumed', { queueName: this.queueName });
|
||||
}
|
||||
|
||||
/**
|
||||
* Drain the queue (remove all jobs)
|
||||
*/
|
||||
async drain(delayed = false): Promise<void> {
|
||||
await this.bullQueue.drain(delayed);
|
||||
logger.info('Queue drained', { queueName: this.queueName, delayed });
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean completed and failed jobs
|
||||
*/
|
||||
async clean(
|
||||
grace: number = 0,
|
||||
limit: number = 100,
|
||||
type: 'completed' | 'failed' = 'completed'
|
||||
): Promise<void> {
|
||||
await this.bullQueue.clean(grace, limit, type);
|
||||
logger.debug('Queue cleaned', { queueName: this.queueName, type, grace, limit });
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait until the queue is ready
|
||||
*/
|
||||
async waitUntilReady(): Promise<void> {
|
||||
await this.bullQueue.waitUntilReady();
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the queue (cleanup resources)
|
||||
*/
|
||||
/**
|
||||
* Close the queue (cleanup resources)
|
||||
*/
|
||||
async close(): Promise<void> {
|
||||
try {
|
||||
// Close the queue itself
|
||||
await this.bullQueue.close();
|
||||
logger.info('Queue closed', { queueName: this.queueName });
|
||||
|
||||
// Close queue events
|
||||
if (this.queueEvents) {
|
||||
await this.queueEvents.close();
|
||||
logger.debug('Queue events closed', { queueName: this.queueName });
|
||||
}
|
||||
|
||||
// Close workers first
|
||||
if (this.workers.length > 0) {
|
||||
await Promise.all(
|
||||
this.workers.map(async worker => {
|
||||
return await worker.close();
|
||||
})
|
||||
);
|
||||
this.workers = [];
|
||||
logger.debug('Workers closed', { queueName: this.queueName });
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error closing queue', { queueName: this.queueName, error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start workers for this queue
|
||||
*/
|
||||
private startWorkers(workerCount: number, concurrency: number): void {
|
||||
const connection = getRedisConnection(this.redisConfig);
|
||||
|
||||
for (let i = 0; i < workerCount; i++) {
|
||||
const worker = new Worker(`{${this.queueName}}`, this.processJob.bind(this), {
|
||||
connection,
|
||||
concurrency,
|
||||
maxStalledCount: 3,
|
||||
stalledInterval: 30000,
|
||||
});
|
||||
|
||||
// Setup worker event handlers
|
||||
worker.on('completed', job => {
|
||||
logger.trace('Job completed', {
|
||||
queueName: this.queueName,
|
||||
jobId: job.id,
|
||||
handler: job.data?.handler,
|
||||
operation: job.data?.operation,
|
||||
});
|
||||
});
|
||||
|
||||
worker.on('failed', (job, err) => {
|
||||
logger.error('Job failed', {
|
||||
queueName: this.queueName,
|
||||
jobId: job?.id,
|
||||
handler: job?.data?.handler,
|
||||
operation: job?.data?.operation,
|
||||
error: err.message,
|
||||
});
|
||||
});
|
||||
|
||||
worker.on('error', error => {
|
||||
logger.error('Worker error', {
|
||||
queueName: this.queueName,
|
||||
workerId: i,
|
||||
error: error.message,
|
||||
});
|
||||
});
|
||||
|
||||
this.workers.push(worker);
|
||||
}
|
||||
|
||||
logger.info('Workers started', {
|
||||
queueName: this.queueName,
|
||||
workerCount,
|
||||
concurrency,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a job using the handler registry
|
||||
*/
|
||||
private async processJob(job: Job): Promise<unknown> {
|
||||
const { handler, operation, payload }: JobData = job.data;
|
||||
|
||||
logger.trace('Processing job', {
|
||||
id: job.id,
|
||||
handler,
|
||||
operation,
|
||||
queueName: this.queueName,
|
||||
});
|
||||
|
||||
try {
|
||||
// Look up handler in registry
|
||||
const jobHandler = handlerRegistry.getOperation(handler, operation);
|
||||
|
||||
if (!jobHandler) {
|
||||
throw new Error(`No handler found for ${handler}:${operation}`);
|
||||
}
|
||||
|
||||
const result = await jobHandler(payload);
|
||||
|
||||
logger.trace('Job completed successfully', {
|
||||
id: job.id,
|
||||
handler,
|
||||
operation,
|
||||
queueName: this.queueName,
|
||||
});
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
logger.error('Job processing failed', {
|
||||
id: job.id,
|
||||
handler,
|
||||
operation,
|
||||
queueName: this.queueName,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start workers manually (for delayed initialization)
|
||||
*/
|
||||
startWorkersManually(workerCount: number, concurrency: number = 1): void {
|
||||
if (this.workers.length > 0) {
|
||||
logger.warn('Workers already started for queue', { queueName: this.queueName });
|
||||
return;
|
||||
}
|
||||
|
||||
// Initialize queue events if not already done
|
||||
if (!this.queueEvents) {
|
||||
const connection = getRedisConnection(this.redisConfig);
|
||||
this.queueEvents = new QueueEvents(`{${this.queueName}}`, { connection });
|
||||
}
|
||||
|
||||
this.startWorkers(workerCount, concurrency);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of active workers
|
||||
*/
|
||||
getWorkerCount(): number {
|
||||
return this.workers.length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the underlying BullMQ queue (for advanced operations)
|
||||
* @deprecated Use direct methods instead
|
||||
*/
|
||||
getBullQueue(): BullQueue {
|
||||
return this.bullQueue;
|
||||
}
|
||||
}
|
||||
import { Queue as BullQueue, QueueEvents, Worker, type Job } from 'bullmq';
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import { handlerRegistry } from '@stock-bot/types';
|
||||
import type { JobData, JobOptions, QueueStats, RedisConfig } from './types';
|
||||
import { getRedisConnection } from './utils';
|
||||
|
||||
const logger = getLogger('queue');
|
||||
|
||||
export interface QueueWorkerConfig {
|
||||
workers?: number;
|
||||
concurrency?: number;
|
||||
startWorker?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Consolidated Queue class that handles both job operations and optional worker management
|
||||
* Can be used as a simple job queue or with workers for automatic processing
|
||||
*/
|
||||
export class Queue {
|
||||
private bullQueue: BullQueue;
|
||||
private workers: Worker[] = [];
|
||||
private queueEvents?: QueueEvents;
|
||||
private queueName: string;
|
||||
private redisConfig: RedisConfig;
|
||||
|
||||
constructor(
|
||||
queueName: string,
|
||||
redisConfig: RedisConfig,
|
||||
defaultJobOptions: JobOptions = {},
|
||||
config: QueueWorkerConfig = {}
|
||||
) {
|
||||
this.queueName = queueName;
|
||||
this.redisConfig = redisConfig;
|
||||
|
||||
const connection = getRedisConnection(redisConfig);
|
||||
|
||||
// Initialize BullMQ queue
|
||||
this.bullQueue = new BullQueue(`{${queueName}}`, {
|
||||
connection,
|
||||
defaultJobOptions: {
|
||||
removeOnComplete: 10,
|
||||
removeOnFail: 5,
|
||||
attempts: 3,
|
||||
backoff: {
|
||||
type: 'exponential',
|
||||
delay: 1000,
|
||||
},
|
||||
...defaultJobOptions,
|
||||
},
|
||||
});
|
||||
|
||||
// Initialize queue events if workers will be used
|
||||
if (config.workers && config.workers > 0) {
|
||||
this.queueEvents = new QueueEvents(`{${queueName}}`, { connection });
|
||||
}
|
||||
|
||||
// Start workers if requested and not explicitly disabled
|
||||
if (config.workers && config.workers > 0 && config.startWorker !== false) {
|
||||
this.startWorkers(config.workers, config.concurrency || 1);
|
||||
}
|
||||
|
||||
logger.trace('Queue created', {
|
||||
queueName,
|
||||
workers: config.workers || 0,
|
||||
concurrency: config.concurrency || 1,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the queue name
|
||||
*/
|
||||
getName(): string {
|
||||
return this.queueName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single job to the queue
|
||||
*/
|
||||
async add(name: string, data: JobData, options: JobOptions = {}): Promise<Job> {
|
||||
logger.trace('Adding job', { queueName: this.queueName, jobName: name });
|
||||
return await this.bullQueue.add(name, data, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add multiple jobs to the queue in bulk
|
||||
*/
|
||||
async addBulk(jobs: Array<{ name: string; data: JobData; opts?: JobOptions }>): Promise<Job[]> {
|
||||
logger.trace('Adding bulk jobs', {
|
||||
queueName: this.queueName,
|
||||
jobCount: jobs.length,
|
||||
});
|
||||
return await this.bullQueue.addBulk(jobs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a scheduled job with cron-like pattern
|
||||
*/
|
||||
async addScheduledJob(
|
||||
name: string,
|
||||
data: JobData,
|
||||
cronPattern: string,
|
||||
options: JobOptions = {}
|
||||
): Promise<Job> {
|
||||
const scheduledOptions: JobOptions = {
|
||||
...options,
|
||||
repeat: {
|
||||
pattern: cronPattern,
|
||||
// Use job name as repeat key to prevent duplicates
|
||||
key: `${this.queueName}:${name}`,
|
||||
...options.repeat,
|
||||
},
|
||||
};
|
||||
|
||||
logger.info('Adding scheduled job', {
|
||||
queueName: this.queueName,
|
||||
jobName: name,
|
||||
cronPattern,
|
||||
repeatKey: scheduledOptions.repeat?.key,
|
||||
immediately: scheduledOptions.repeat?.immediately,
|
||||
});
|
||||
|
||||
return await this.bullQueue.add(name, data, scheduledOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get queue statistics
|
||||
*/
|
||||
async getStats(): Promise<QueueStats> {
|
||||
const [waiting, active, completed, failed, delayed] = await Promise.all([
|
||||
this.bullQueue.getWaiting(),
|
||||
this.bullQueue.getActive(),
|
||||
this.bullQueue.getCompleted(),
|
||||
this.bullQueue.getFailed(),
|
||||
this.bullQueue.getDelayed(),
|
||||
]);
|
||||
|
||||
const isPaused = await this.bullQueue.isPaused();
|
||||
|
||||
return {
|
||||
waiting: waiting.length,
|
||||
active: active.length,
|
||||
completed: completed.length,
|
||||
failed: failed.length,
|
||||
delayed: delayed.length,
|
||||
paused: isPaused,
|
||||
workers: this.workers.length,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific job by ID
|
||||
*/
|
||||
async getJob(jobId: string): Promise<Job | undefined> {
|
||||
return await this.bullQueue.getJob(jobId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get jobs by state
|
||||
*/
|
||||
async getJobs(
|
||||
states: Array<'waiting' | 'active' | 'completed' | 'failed' | 'delayed'>,
|
||||
start = 0,
|
||||
end = 100
|
||||
): Promise<Job[]> {
|
||||
return await this.bullQueue.getJobs(states, start, end);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pause the queue (stops processing new jobs)
|
||||
*/
|
||||
async pause(): Promise<void> {
|
||||
await this.bullQueue.pause();
|
||||
logger.info('Queue paused', { queueName: this.queueName });
|
||||
}
|
||||
|
||||
/**
|
||||
* Resume the queue
|
||||
*/
|
||||
async resume(): Promise<void> {
|
||||
await this.bullQueue.resume();
|
||||
logger.info('Queue resumed', { queueName: this.queueName });
|
||||
}
|
||||
|
||||
/**
|
||||
* Drain the queue (remove all jobs)
|
||||
*/
|
||||
async drain(delayed = false): Promise<void> {
|
||||
await this.bullQueue.drain(delayed);
|
||||
logger.info('Queue drained', { queueName: this.queueName, delayed });
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean completed and failed jobs
|
||||
*/
|
||||
async clean(
|
||||
grace: number = 0,
|
||||
limit: number = 100,
|
||||
type: 'completed' | 'failed' = 'completed'
|
||||
): Promise<void> {
|
||||
await this.bullQueue.clean(grace, limit, type);
|
||||
logger.debug('Queue cleaned', { queueName: this.queueName, type, grace, limit });
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait until the queue is ready
|
||||
*/
|
||||
async waitUntilReady(): Promise<void> {
|
||||
await this.bullQueue.waitUntilReady();
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the queue (cleanup resources)
|
||||
*/
|
||||
/**
|
||||
* Close the queue (cleanup resources)
|
||||
*/
|
||||
async close(): Promise<void> {
|
||||
try {
|
||||
// Close the queue itself
|
||||
await this.bullQueue.close();
|
||||
logger.info('Queue closed', { queueName: this.queueName });
|
||||
|
||||
// Close queue events
|
||||
if (this.queueEvents) {
|
||||
await this.queueEvents.close();
|
||||
logger.debug('Queue events closed', { queueName: this.queueName });
|
||||
}
|
||||
|
||||
// Close workers first
|
||||
if (this.workers.length > 0) {
|
||||
await Promise.all(
|
||||
this.workers.map(async worker => {
|
||||
return await worker.close();
|
||||
})
|
||||
);
|
||||
this.workers = [];
|
||||
logger.debug('Workers closed', { queueName: this.queueName });
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error closing queue', { queueName: this.queueName, error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start workers for this queue
|
||||
*/
|
||||
private startWorkers(workerCount: number, concurrency: number): void {
|
||||
const connection = getRedisConnection(this.redisConfig);
|
||||
|
||||
for (let i = 0; i < workerCount; i++) {
|
||||
const worker = new Worker(`{${this.queueName}}`, this.processJob.bind(this), {
|
||||
connection,
|
||||
concurrency,
|
||||
maxStalledCount: 3,
|
||||
stalledInterval: 30000,
|
||||
});
|
||||
|
||||
// Setup worker event handlers
|
||||
worker.on('completed', job => {
|
||||
logger.trace('Job completed', {
|
||||
queueName: this.queueName,
|
||||
jobId: job.id,
|
||||
handler: job.data?.handler,
|
||||
operation: job.data?.operation,
|
||||
});
|
||||
});
|
||||
|
||||
worker.on('failed', (job, err) => {
|
||||
logger.error('Job failed', {
|
||||
queueName: this.queueName,
|
||||
jobId: job?.id,
|
||||
handler: job?.data?.handler,
|
||||
operation: job?.data?.operation,
|
||||
error: err.message,
|
||||
});
|
||||
});
|
||||
|
||||
worker.on('error', error => {
|
||||
logger.error('Worker error', {
|
||||
queueName: this.queueName,
|
||||
workerId: i,
|
||||
error: error.message,
|
||||
});
|
||||
});
|
||||
|
||||
this.workers.push(worker);
|
||||
}
|
||||
|
||||
logger.info('Workers started', {
|
||||
queueName: this.queueName,
|
||||
workerCount,
|
||||
concurrency,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a job using the handler registry
|
||||
*/
|
||||
private async processJob(job: Job): Promise<unknown> {
|
||||
const { handler, operation, payload }: JobData = job.data;
|
||||
|
||||
logger.trace('Processing job', {
|
||||
id: job.id,
|
||||
handler,
|
||||
operation,
|
||||
queueName: this.queueName,
|
||||
});
|
||||
|
||||
try {
|
||||
// Look up handler in registry
|
||||
const jobHandler = handlerRegistry.getOperation(handler, operation);
|
||||
|
||||
if (!jobHandler) {
|
||||
throw new Error(`No handler found for ${handler}:${operation}`);
|
||||
}
|
||||
|
||||
const result = await jobHandler(payload);
|
||||
|
||||
logger.trace('Job completed successfully', {
|
||||
id: job.id,
|
||||
handler,
|
||||
operation,
|
||||
queueName: this.queueName,
|
||||
});
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
logger.error('Job processing failed', {
|
||||
id: job.id,
|
||||
handler,
|
||||
operation,
|
||||
queueName: this.queueName,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start workers manually (for delayed initialization)
|
||||
*/
|
||||
startWorkersManually(workerCount: number, concurrency: number = 1): void {
|
||||
if (this.workers.length > 0) {
|
||||
logger.warn('Workers already started for queue', { queueName: this.queueName });
|
||||
return;
|
||||
}
|
||||
|
||||
// Initialize queue events if not already done
|
||||
if (!this.queueEvents) {
|
||||
const connection = getRedisConnection(this.redisConfig);
|
||||
this.queueEvents = new QueueEvents(`{${this.queueName}}`, { connection });
|
||||
}
|
||||
|
||||
this.startWorkers(workerCount, concurrency);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of active workers
|
||||
*/
|
||||
getWorkerCount(): number {
|
||||
return this.workers.length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the underlying BullMQ queue (for advanced operations)
|
||||
* @deprecated Use direct methods instead
|
||||
*/
|
||||
getBullQueue(): BullQueue {
|
||||
return this.bullQueue;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,294 +1,327 @@
|
|||
import { RateLimiterRedis, RateLimiterRes } from 'rate-limiter-flexible';
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import type { RateLimitConfig as BaseRateLimitConfig, RateLimitRule } from './types';
|
||||
|
||||
const logger = getLogger('rate-limiter');
|
||||
|
||||
// Extend the base config to add rate-limiter specific fields
|
||||
export interface RateLimitConfig extends BaseRateLimitConfig {
|
||||
keyPrefix?: string;
|
||||
}
|
||||
|
||||
export class QueueRateLimiter {
|
||||
private limiters = new Map<string, RateLimiterRedis>();
|
||||
private rules: RateLimitRule[] = [];
|
||||
|
||||
constructor(private redisClient: ReturnType<typeof import('./utils').getRedisConnection>) {}
|
||||
|
||||
/**
|
||||
* Add a rate limit rule
|
||||
*/
|
||||
addRule(rule: RateLimitRule): void {
|
||||
this.rules.push(rule);
|
||||
|
||||
const key = this.getRuleKey(rule.level, rule.queueName, rule.handler, rule.operation);
|
||||
const limiter = new RateLimiterRedis({
|
||||
storeClient: this.redisClient,
|
||||
keyPrefix: `rl:${key}`,
|
||||
points: rule.config.points,
|
||||
duration: rule.config.duration,
|
||||
blockDuration: rule.config.blockDuration || 0,
|
||||
});
|
||||
|
||||
this.limiters.set(key, limiter);
|
||||
|
||||
logger.info('Rate limit rule added', {
|
||||
level: rule.level,
|
||||
queueName: rule.queueName,
|
||||
handler: rule.handler,
|
||||
operation: rule.operation,
|
||||
points: rule.config.points,
|
||||
duration: rule.config.duration,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a job can be processed based on rate limits
|
||||
* Uses hierarchical precedence: operation > handler > queue > global
|
||||
* The most specific matching rule takes precedence
|
||||
*/
|
||||
async checkLimit(queueName: string, handler: string, operation: string): Promise<{
|
||||
allowed: boolean;
|
||||
retryAfter?: number;
|
||||
remainingPoints?: number;
|
||||
appliedRule?: RateLimitRule;
|
||||
}> {
|
||||
const applicableRule = this.getMostSpecificRule(queueName, handler, operation);
|
||||
|
||||
if (!applicableRule) {
|
||||
return { allowed: true };
|
||||
}
|
||||
|
||||
const key = this.getRuleKey(applicableRule.level, applicableRule.queueName, applicableRule.handler, applicableRule.operation);
|
||||
const limiter = this.limiters.get(key);
|
||||
|
||||
if (!limiter) {
|
||||
logger.warn('Rate limiter not found for rule', { key, rule: applicableRule });
|
||||
return { allowed: true };
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await this.consumePoint(limiter, this.getConsumerKey(queueName, handler, operation));
|
||||
|
||||
return {
|
||||
...result,
|
||||
appliedRule: applicableRule,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Rate limit check failed', { queueName, handler, operation, error });
|
||||
// On error, allow the request to proceed
|
||||
return { allowed: true };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the most specific rule that applies to this job
|
||||
* Precedence: operation > handler > queue > global
|
||||
*/
|
||||
private getMostSpecificRule(queueName: string, handler: string, operation: string): RateLimitRule | undefined {
|
||||
// 1. Check for operation-specific rule (most specific)
|
||||
let rule = this.rules.find(r =>
|
||||
r.level === 'operation' &&
|
||||
r.queueName === queueName &&
|
||||
r.handler === handler &&
|
||||
r.operation === operation
|
||||
);
|
||||
if (rule) {return rule;}
|
||||
|
||||
// 2. Check for handler-specific rule
|
||||
rule = this.rules.find(r =>
|
||||
r.level === 'handler' &&
|
||||
r.queueName === queueName &&
|
||||
r.handler === handler
|
||||
);
|
||||
if (rule) {return rule;}
|
||||
|
||||
// 3. Check for queue-specific rule
|
||||
rule = this.rules.find(r =>
|
||||
r.level === 'queue' &&
|
||||
r.queueName === queueName
|
||||
);
|
||||
if (rule) {return rule;}
|
||||
|
||||
// 4. Check for global rule (least specific)
|
||||
rule = this.rules.find(r => r.level === 'global');
|
||||
return rule;
|
||||
}
|
||||
|
||||
/**
|
||||
* Consume a point from the rate limiter
|
||||
*/
|
||||
private async consumePoint(
|
||||
limiter: RateLimiterRedis,
|
||||
key: string
|
||||
): Promise<{ allowed: boolean; retryAfter?: number; remainingPoints?: number }> {
|
||||
try {
|
||||
const result = await limiter.consume(key);
|
||||
return {
|
||||
allowed: true,
|
||||
remainingPoints: result.remainingPoints,
|
||||
};
|
||||
} catch (rejRes) {
|
||||
if (rejRes instanceof RateLimiterRes) {
|
||||
logger.warn('Rate limit exceeded', {
|
||||
key,
|
||||
retryAfter: rejRes.msBeforeNext,
|
||||
});
|
||||
|
||||
return {
|
||||
allowed: false,
|
||||
retryAfter: rejRes.msBeforeNext,
|
||||
remainingPoints: rejRes.remainingPoints,
|
||||
};
|
||||
}
|
||||
throw rejRes;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get rule key for storing rate limiter
|
||||
*/
|
||||
private getRuleKey(level: string, queueName?: string, handler?: string, operation?: string): string {
|
||||
switch (level) {
|
||||
case 'global':
|
||||
return 'global';
|
||||
case 'queue':
|
||||
return `queue:${queueName}`;
|
||||
case 'handler':
|
||||
return `handler:${queueName}:${handler}`;
|
||||
case 'operation':
|
||||
return `operation:${queueName}:${handler}:${operation}`;
|
||||
default:
|
||||
return level;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get consumer key for rate limiting (what gets counted)
|
||||
*/
|
||||
private getConsumerKey(queueName: string, handler: string, operation: string): string {
|
||||
return `${queueName}:${handler}:${operation}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current rate limit status for a queue/handler/operation
|
||||
*/
|
||||
async getStatus(queueName: string, handler: string, operation: string): Promise<{
|
||||
queueName: string;
|
||||
handler: string;
|
||||
operation: string;
|
||||
appliedRule?: RateLimitRule;
|
||||
limit?: {
|
||||
level: string;
|
||||
points: number;
|
||||
duration: number;
|
||||
remaining: number;
|
||||
resetIn: number;
|
||||
};
|
||||
}> {
|
||||
const applicableRule = this.getMostSpecificRule(queueName, handler, operation);
|
||||
|
||||
if (!applicableRule) {
|
||||
return {
|
||||
queueName,
|
||||
handler,
|
||||
operation,
|
||||
};
|
||||
}
|
||||
|
||||
const key = this.getRuleKey(applicableRule.level, applicableRule.queueName, applicableRule.handler, applicableRule.operation);
|
||||
const limiter = this.limiters.get(key);
|
||||
|
||||
if (!limiter) {
|
||||
return {
|
||||
queueName,
|
||||
handler,
|
||||
operation,
|
||||
appliedRule: applicableRule,
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const consumerKey = this.getConsumerKey(queueName, handler, operation);
|
||||
const result = await limiter.get(consumerKey);
|
||||
|
||||
const limit = {
|
||||
level: applicableRule.level,
|
||||
points: limiter.points,
|
||||
duration: limiter.duration,
|
||||
remaining: result?.remainingPoints ?? limiter.points,
|
||||
resetIn: result?.msBeforeNext ?? 0,
|
||||
};
|
||||
|
||||
return {
|
||||
queueName,
|
||||
handler,
|
||||
operation,
|
||||
appliedRule: applicableRule,
|
||||
limit,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Failed to get rate limit status', { queueName, handler, operation, error });
|
||||
return {
|
||||
queueName,
|
||||
handler,
|
||||
operation,
|
||||
appliedRule: applicableRule,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset rate limits for a specific consumer
|
||||
*/
|
||||
async reset(queueName: string, handler?: string, operation?: string): Promise<void> {
|
||||
if (handler && operation) {
|
||||
// Reset specific operation
|
||||
const consumerKey = this.getConsumerKey(queueName, handler, operation);
|
||||
const rule = this.getMostSpecificRule(queueName, handler, operation);
|
||||
|
||||
if (rule) {
|
||||
const key = this.getRuleKey(rule.level, rule.queueName, rule.handler, rule.operation);
|
||||
const limiter = this.limiters.get(key);
|
||||
if (limiter) {
|
||||
await limiter.delete(consumerKey);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Reset broader scope - this is more complex with the new hierarchy
|
||||
logger.warn('Broad reset not implemented yet', { queueName, handler, operation });
|
||||
}
|
||||
|
||||
logger.info('Rate limits reset', { queueName, handler, operation });
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all configured rate limit rules
|
||||
*/
|
||||
getRules(): RateLimitRule[] {
|
||||
return [...this.rules];
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a rate limit rule
|
||||
*/
|
||||
removeRule(level: string, queueName?: string, handler?: string, operation?: string): boolean {
|
||||
const key = this.getRuleKey(level, queueName, handler, operation);
|
||||
const ruleIndex = this.rules.findIndex(r =>
|
||||
r.level === level &&
|
||||
(!queueName || r.queueName === queueName) &&
|
||||
(!handler || r.handler === handler) &&
|
||||
(!operation || r.operation === operation)
|
||||
);
|
||||
|
||||
if (ruleIndex >= 0) {
|
||||
this.rules.splice(ruleIndex, 1);
|
||||
this.limiters.delete(key);
|
||||
|
||||
logger.info('Rate limit rule removed', { level, queueName, handler, operation });
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
import { RateLimiterRedis, RateLimiterRes } from 'rate-limiter-flexible';
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import type { RateLimitConfig as BaseRateLimitConfig, RateLimitRule } from './types';
|
||||
|
||||
const logger = getLogger('rate-limiter');
|
||||
|
||||
// Extend the base config to add rate-limiter specific fields
|
||||
export interface RateLimitConfig extends BaseRateLimitConfig {
|
||||
keyPrefix?: string;
|
||||
}
|
||||
|
||||
export class QueueRateLimiter {
|
||||
private limiters = new Map<string, RateLimiterRedis>();
|
||||
private rules: RateLimitRule[] = [];
|
||||
|
||||
constructor(private redisClient: ReturnType<typeof import('./utils').getRedisConnection>) {}
|
||||
|
||||
/**
|
||||
* Add a rate limit rule
|
||||
*/
|
||||
addRule(rule: RateLimitRule): void {
|
||||
this.rules.push(rule);
|
||||
|
||||
const key = this.getRuleKey(rule.level, rule.queueName, rule.handler, rule.operation);
|
||||
const limiter = new RateLimiterRedis({
|
||||
storeClient: this.redisClient,
|
||||
keyPrefix: `rl:${key}`,
|
||||
points: rule.config.points,
|
||||
duration: rule.config.duration,
|
||||
blockDuration: rule.config.blockDuration || 0,
|
||||
});
|
||||
|
||||
this.limiters.set(key, limiter);
|
||||
|
||||
logger.info('Rate limit rule added', {
|
||||
level: rule.level,
|
||||
queueName: rule.queueName,
|
||||
handler: rule.handler,
|
||||
operation: rule.operation,
|
||||
points: rule.config.points,
|
||||
duration: rule.config.duration,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a job can be processed based on rate limits
|
||||
* Uses hierarchical precedence: operation > handler > queue > global
|
||||
* The most specific matching rule takes precedence
|
||||
*/
|
||||
async checkLimit(
|
||||
queueName: string,
|
||||
handler: string,
|
||||
operation: string
|
||||
): Promise<{
|
||||
allowed: boolean;
|
||||
retryAfter?: number;
|
||||
remainingPoints?: number;
|
||||
appliedRule?: RateLimitRule;
|
||||
}> {
|
||||
const applicableRule = this.getMostSpecificRule(queueName, handler, operation);
|
||||
|
||||
if (!applicableRule) {
|
||||
return { allowed: true };
|
||||
}
|
||||
|
||||
const key = this.getRuleKey(
|
||||
applicableRule.level,
|
||||
applicableRule.queueName,
|
||||
applicableRule.handler,
|
||||
applicableRule.operation
|
||||
);
|
||||
const limiter = this.limiters.get(key);
|
||||
|
||||
if (!limiter) {
|
||||
logger.warn('Rate limiter not found for rule', { key, rule: applicableRule });
|
||||
return { allowed: true };
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await this.consumePoint(
|
||||
limiter,
|
||||
this.getConsumerKey(queueName, handler, operation)
|
||||
);
|
||||
|
||||
return {
|
||||
...result,
|
||||
appliedRule: applicableRule,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Rate limit check failed', { queueName, handler, operation, error });
|
||||
// On error, allow the request to proceed
|
||||
return { allowed: true };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the most specific rule that applies to this job
|
||||
* Precedence: operation > handler > queue > global
|
||||
*/
|
||||
private getMostSpecificRule(
|
||||
queueName: string,
|
||||
handler: string,
|
||||
operation: string
|
||||
): RateLimitRule | undefined {
|
||||
// 1. Check for operation-specific rule (most specific)
|
||||
let rule = this.rules.find(
|
||||
r =>
|
||||
r.level === 'operation' &&
|
||||
r.queueName === queueName &&
|
||||
r.handler === handler &&
|
||||
r.operation === operation
|
||||
);
|
||||
if (rule) {
|
||||
return rule;
|
||||
}
|
||||
|
||||
// 2. Check for handler-specific rule
|
||||
rule = this.rules.find(
|
||||
r => r.level === 'handler' && r.queueName === queueName && r.handler === handler
|
||||
);
|
||||
if (rule) {
|
||||
return rule;
|
||||
}
|
||||
|
||||
// 3. Check for queue-specific rule
|
||||
rule = this.rules.find(r => r.level === 'queue' && r.queueName === queueName);
|
||||
if (rule) {
|
||||
return rule;
|
||||
}
|
||||
|
||||
// 4. Check for global rule (least specific)
|
||||
rule = this.rules.find(r => r.level === 'global');
|
||||
return rule;
|
||||
}
|
||||
|
||||
/**
|
||||
* Consume a point from the rate limiter
|
||||
*/
|
||||
private async consumePoint(
|
||||
limiter: RateLimiterRedis,
|
||||
key: string
|
||||
): Promise<{ allowed: boolean; retryAfter?: number; remainingPoints?: number }> {
|
||||
try {
|
||||
const result = await limiter.consume(key);
|
||||
return {
|
||||
allowed: true,
|
||||
remainingPoints: result.remainingPoints,
|
||||
};
|
||||
} catch (rejRes) {
|
||||
if (rejRes instanceof RateLimiterRes) {
|
||||
logger.warn('Rate limit exceeded', {
|
||||
key,
|
||||
retryAfter: rejRes.msBeforeNext,
|
||||
});
|
||||
|
||||
return {
|
||||
allowed: false,
|
||||
retryAfter: rejRes.msBeforeNext,
|
||||
remainingPoints: rejRes.remainingPoints,
|
||||
};
|
||||
}
|
||||
throw rejRes;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get rule key for storing rate limiter
|
||||
*/
|
||||
private getRuleKey(
|
||||
level: string,
|
||||
queueName?: string,
|
||||
handler?: string,
|
||||
operation?: string
|
||||
): string {
|
||||
switch (level) {
|
||||
case 'global':
|
||||
return 'global';
|
||||
case 'queue':
|
||||
return `queue:${queueName}`;
|
||||
case 'handler':
|
||||
return `handler:${queueName}:${handler}`;
|
||||
case 'operation':
|
||||
return `operation:${queueName}:${handler}:${operation}`;
|
||||
default:
|
||||
return level;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get consumer key for rate limiting (what gets counted)
|
||||
*/
|
||||
private getConsumerKey(queueName: string, handler: string, operation: string): string {
|
||||
return `${queueName}:${handler}:${operation}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current rate limit status for a queue/handler/operation
|
||||
*/
|
||||
async getStatus(
|
||||
queueName: string,
|
||||
handler: string,
|
||||
operation: string
|
||||
): Promise<{
|
||||
queueName: string;
|
||||
handler: string;
|
||||
operation: string;
|
||||
appliedRule?: RateLimitRule;
|
||||
limit?: {
|
||||
level: string;
|
||||
points: number;
|
||||
duration: number;
|
||||
remaining: number;
|
||||
resetIn: number;
|
||||
};
|
||||
}> {
|
||||
const applicableRule = this.getMostSpecificRule(queueName, handler, operation);
|
||||
|
||||
if (!applicableRule) {
|
||||
return {
|
||||
queueName,
|
||||
handler,
|
||||
operation,
|
||||
};
|
||||
}
|
||||
|
||||
const key = this.getRuleKey(
|
||||
applicableRule.level,
|
||||
applicableRule.queueName,
|
||||
applicableRule.handler,
|
||||
applicableRule.operation
|
||||
);
|
||||
const limiter = this.limiters.get(key);
|
||||
|
||||
if (!limiter) {
|
||||
return {
|
||||
queueName,
|
||||
handler,
|
||||
operation,
|
||||
appliedRule: applicableRule,
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const consumerKey = this.getConsumerKey(queueName, handler, operation);
|
||||
const result = await limiter.get(consumerKey);
|
||||
|
||||
const limit = {
|
||||
level: applicableRule.level,
|
||||
points: limiter.points,
|
||||
duration: limiter.duration,
|
||||
remaining: result?.remainingPoints ?? limiter.points,
|
||||
resetIn: result?.msBeforeNext ?? 0,
|
||||
};
|
||||
|
||||
return {
|
||||
queueName,
|
||||
handler,
|
||||
operation,
|
||||
appliedRule: applicableRule,
|
||||
limit,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Failed to get rate limit status', { queueName, handler, operation, error });
|
||||
return {
|
||||
queueName,
|
||||
handler,
|
||||
operation,
|
||||
appliedRule: applicableRule,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset rate limits for a specific consumer
|
||||
*/
|
||||
async reset(queueName: string, handler?: string, operation?: string): Promise<void> {
|
||||
if (handler && operation) {
|
||||
// Reset specific operation
|
||||
const consumerKey = this.getConsumerKey(queueName, handler, operation);
|
||||
const rule = this.getMostSpecificRule(queueName, handler, operation);
|
||||
|
||||
if (rule) {
|
||||
const key = this.getRuleKey(rule.level, rule.queueName, rule.handler, rule.operation);
|
||||
const limiter = this.limiters.get(key);
|
||||
if (limiter) {
|
||||
await limiter.delete(consumerKey);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Reset broader scope - this is more complex with the new hierarchy
|
||||
logger.warn('Broad reset not implemented yet', { queueName, handler, operation });
|
||||
}
|
||||
|
||||
logger.info('Rate limits reset', { queueName, handler, operation });
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all configured rate limit rules
|
||||
*/
|
||||
getRules(): RateLimitRule[] {
|
||||
return [...this.rules];
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a rate limit rule
|
||||
*/
|
||||
removeRule(level: string, queueName?: string, handler?: string, operation?: string): boolean {
|
||||
const key = this.getRuleKey(level, queueName, handler, operation);
|
||||
const ruleIndex = this.rules.findIndex(
|
||||
r =>
|
||||
r.level === level &&
|
||||
(!queueName || r.queueName === queueName) &&
|
||||
(!handler || r.handler === handler) &&
|
||||
(!operation || r.operation === operation)
|
||||
);
|
||||
|
||||
if (ruleIndex >= 0) {
|
||||
this.rules.splice(ruleIndex, 1);
|
||||
this.limiters.delete(key);
|
||||
|
||||
logger.info('Rate limit rule removed', { level, queueName, handler, operation });
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ export interface QueueOptions {
|
|||
enableMetrics?: boolean;
|
||||
enableDLQ?: boolean;
|
||||
enableRateLimit?: boolean;
|
||||
rateLimitRules?: RateLimitRule[]; // Queue-specific rate limit rules
|
||||
rateLimitRules?: RateLimitRule[]; // Queue-specific rate limit rules
|
||||
}
|
||||
|
||||
export interface QueueManagerConfig {
|
||||
|
|
@ -79,8 +79,8 @@ export interface QueueManagerConfig {
|
|||
defaultQueueOptions?: QueueOptions;
|
||||
enableScheduledJobs?: boolean;
|
||||
globalRateLimit?: RateLimitConfig;
|
||||
rateLimitRules?: RateLimitRule[]; // Global rate limit rules
|
||||
delayWorkerStart?: boolean; // If true, workers won't start automatically
|
||||
rateLimitRules?: RateLimitRule[]; // Global rate limit rules
|
||||
delayWorkerStart?: boolean; // If true, workers won't start automatically
|
||||
}
|
||||
|
||||
export interface QueueStats {
|
||||
|
|
@ -118,7 +118,7 @@ export interface BatchJobData {
|
|||
batchIndex: number;
|
||||
totalBatches: number;
|
||||
itemCount: number;
|
||||
totalDelayHours: number; // Total time to distribute all batches
|
||||
totalDelayHours: number; // Total time to distribute all batches
|
||||
}
|
||||
|
||||
export interface HandlerInitializer {
|
||||
|
|
@ -134,9 +134,9 @@ export interface RateLimitConfig {
|
|||
|
||||
export interface RateLimitRule {
|
||||
level: 'global' | 'queue' | 'handler' | 'operation';
|
||||
queueName?: string; // For queue-level limits
|
||||
handler?: string; // For handler-level limits
|
||||
operation?: string; // For operation-level limits (most specific)
|
||||
queueName?: string; // For queue-level limits
|
||||
handler?: string; // For handler-level limits
|
||||
operation?: string; // For operation-level limits (most specific)
|
||||
config: RateLimitConfig;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import type { RedisConfig } from './types';
|
|||
*/
|
||||
export function getRedisConnection(config: RedisConfig) {
|
||||
const isTest = process.env.NODE_ENV === 'test' || process.env['BUNIT'] === '1';
|
||||
|
||||
|
||||
return {
|
||||
host: config.host,
|
||||
port: config.port,
|
||||
|
|
|
|||
|
|
@ -1,355 +1,364 @@
|
|||
import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
|
||||
import { QueueManager, Queue, handlerRegistry, processItems } from '../src';
|
||||
|
||||
// Suppress Redis connection errors in tests
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
if (reason && typeof reason === 'object' && 'message' in reason) {
|
||||
const message = (reason as Error).message;
|
||||
if (message.includes('Connection is closed') ||
|
||||
message.includes('Connection is in monitoring mode')) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
});
|
||||
|
||||
describe('Batch Processor', () => {
|
||||
let queueManager: QueueManager;
|
||||
let queue: Queue;
|
||||
let queueName: string;
|
||||
|
||||
const redisConfig = {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
password: '',
|
||||
db: 0,
|
||||
};
|
||||
|
||||
|
||||
beforeEach(async () => {
|
||||
// Clear handler registry
|
||||
handlerRegistry.clear();
|
||||
|
||||
// Register test handler
|
||||
handlerRegistry.register('batch-test', {
|
||||
'process-item': async (payload) => {
|
||||
return { processed: true, data: payload };
|
||||
},
|
||||
'generic': async (payload) => {
|
||||
return { processed: true, data: payload };
|
||||
},
|
||||
'process-batch-items': async (_batchData) => {
|
||||
// This is called by the batch processor internally
|
||||
return { batchProcessed: true };
|
||||
},
|
||||
});
|
||||
|
||||
// Use unique queue name per test to avoid conflicts
|
||||
queueName = `batch-test-queue-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
|
||||
// Reset and initialize singleton QueueManager for tests
|
||||
await QueueManager.reset();
|
||||
queueManager = QueueManager.initialize({
|
||||
redis: redisConfig,
|
||||
defaultQueueOptions: {
|
||||
workers: 0, // No workers in tests
|
||||
concurrency: 5,
|
||||
},
|
||||
});
|
||||
|
||||
// Get queue using the new getQueue() method (batch cache is now auto-initialized)
|
||||
queue = queueManager.getQueue(queueName);
|
||||
// Note: Batch cache is now automatically initialized when getting the queue
|
||||
|
||||
// Ensure completely clean state - wait for queue to be ready first
|
||||
await queue.getBullQueue().waitUntilReady();
|
||||
|
||||
// Clear all job states
|
||||
await queue.getBullQueue().drain(true);
|
||||
await queue.getBullQueue().clean(0, 1000, 'completed');
|
||||
await queue.getBullQueue().clean(0, 1000, 'failed');
|
||||
await queue.getBullQueue().clean(0, 1000, 'active');
|
||||
await queue.getBullQueue().clean(0, 1000, 'waiting');
|
||||
await queue.getBullQueue().clean(0, 1000, 'delayed');
|
||||
|
||||
// Add a small delay to ensure cleanup is complete
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
try {
|
||||
// Clean up jobs first
|
||||
if (queue) {
|
||||
try {
|
||||
await queue.getBullQueue().drain(true);
|
||||
await queue.getBullQueue().clean(0, 1000, 'completed');
|
||||
await queue.getBullQueue().clean(0, 1000, 'failed');
|
||||
await queue.getBullQueue().clean(0, 1000, 'active');
|
||||
await queue.getBullQueue().clean(0, 1000, 'waiting');
|
||||
await queue.getBullQueue().clean(0, 1000, 'delayed');
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
await queue.close();
|
||||
}
|
||||
|
||||
if (queueManager) {
|
||||
await Promise.race([
|
||||
QueueManager.reset(),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Shutdown timeout')), 3000)
|
||||
)
|
||||
]);
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Cleanup error:', error.message);
|
||||
} finally {
|
||||
handlerRegistry.clear();
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
}
|
||||
});
|
||||
|
||||
describe('Direct Processing', () => {
|
||||
test('should process items directly without batching', async () => {
|
||||
const items = ['item1', 'item2', 'item3', 'item4', 'item5'];
|
||||
|
||||
const result = await processItems(items, queueName, {
|
||||
totalDelayHours: 0.001, // 3.6 seconds total
|
||||
useBatching: false,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
priority: 1,
|
||||
});
|
||||
|
||||
expect(result.mode).toBe('direct');
|
||||
expect(result.totalItems).toBe(5);
|
||||
expect(result.jobsCreated).toBe(5);
|
||||
|
||||
// Verify jobs were created - BullMQ has an issue where job ID "1" doesn't show up in state queries
|
||||
// but exists when queried directly, so we need to check both ways
|
||||
const [delayedJobs, waitingJobs, activeJobs, completedJobs, failedJobs, job1] = await Promise.all([
|
||||
queue.getBullQueue().getJobs(['delayed']),
|
||||
queue.getBullQueue().getJobs(['waiting']),
|
||||
queue.getBullQueue().getJobs(['active']),
|
||||
queue.getBullQueue().getJobs(['completed']),
|
||||
queue.getBullQueue().getJobs(['failed']),
|
||||
queue.getBullQueue().getJob('1'), // Job 1 often doesn't show up in state queries
|
||||
]);
|
||||
|
||||
const jobs = [...delayedJobs, ...waitingJobs, ...activeJobs, ...completedJobs, ...failedJobs];
|
||||
const ourJobs = jobs.filter(j => j.name === 'process-item' && j.data.handler === 'batch-test');
|
||||
|
||||
// Include job 1 if we found it directly but it wasn't in the state queries
|
||||
if (job1 && job1.name === 'process-item' && job1.data.handler === 'batch-test' && !ourJobs.find(j => j.id === '1')) {
|
||||
ourJobs.push(job1);
|
||||
}
|
||||
|
||||
expect(ourJobs.length).toBe(5);
|
||||
|
||||
// Check delays are distributed
|
||||
const delays = ourJobs.map(j => j.opts.delay || 0).sort((a, b) => a - b);
|
||||
expect(delays[0]).toBe(0);
|
||||
expect(delays[4]).toBeGreaterThan(delays[0]);
|
||||
});
|
||||
|
||||
test('should process complex objects directly', async () => {
|
||||
const items = [
|
||||
{ id: 1, name: 'Product A', price: 100 },
|
||||
{ id: 2, name: 'Product B', price: 200 },
|
||||
{ id: 3, name: 'Product C', price: 300 },
|
||||
];
|
||||
|
||||
const result = await processItems(items, queueName, {
|
||||
totalDelayHours: 0.001,
|
||||
useBatching: false,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
});
|
||||
|
||||
expect(result.jobsCreated).toBe(3);
|
||||
|
||||
// Check job payloads
|
||||
const jobs = await queue.getBullQueue().getJobs(['waiting', 'delayed']);
|
||||
const ourJobs = jobs.filter(j => j.name === 'process-item' && j.data.handler === 'batch-test');
|
||||
const payloads = ourJobs.map(j => j.data.payload);
|
||||
|
||||
expect(payloads).toContainEqual({ id: 1, name: 'Product A', price: 100 });
|
||||
expect(payloads).toContainEqual({ id: 2, name: 'Product B', price: 200 });
|
||||
expect(payloads).toContainEqual({ id: 3, name: 'Product C', price: 300 });
|
||||
});
|
||||
});
|
||||
|
||||
describe('Batch Processing', () => {
|
||||
test('should process items in batches', async () => {
|
||||
const items = Array.from({ length: 50 }, (_, i) => ({ id: i, value: `item-${i}` }));
|
||||
|
||||
const result = await processItems(items, queueName, {
|
||||
totalDelayHours: 0.001,
|
||||
useBatching: true,
|
||||
batchSize: 10,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
});
|
||||
|
||||
expect(result.mode).toBe('batch');
|
||||
expect(result.totalItems).toBe(50);
|
||||
expect(result.batchesCreated).toBe(5); // 50 items / 10 per batch
|
||||
expect(result.jobsCreated).toBe(5); // 5 batch jobs
|
||||
|
||||
// Verify batch jobs were created
|
||||
const jobs = await queue.getBullQueue().getJobs(['delayed', 'waiting']);
|
||||
const batchJobs = jobs.filter(j => j.name === 'process-batch');
|
||||
expect(batchJobs.length).toBe(5);
|
||||
});
|
||||
|
||||
test('should handle different batch sizes', async () => {
|
||||
const items = Array.from({ length: 23 }, (_, i) => i);
|
||||
|
||||
const result = await processItems(items, queueName, {
|
||||
totalDelayHours: 0.001,
|
||||
useBatching: true,
|
||||
batchSize: 7,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
});
|
||||
|
||||
expect(result.batchesCreated).toBe(4); // 23/7 = 3.28, rounded up to 4
|
||||
expect(result.jobsCreated).toBe(4);
|
||||
});
|
||||
|
||||
test('should store batch payloads in cache', async () => {
|
||||
const items = [
|
||||
{ type: 'A', data: 'test1' },
|
||||
{ type: 'B', data: 'test2' },
|
||||
];
|
||||
|
||||
const result = await processItems(items, queueName, {
|
||||
totalDelayHours: 0.001,
|
||||
useBatching: true,
|
||||
batchSize: 2,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
ttl: 3600, // 1 hour TTL
|
||||
});
|
||||
|
||||
expect(result.jobsCreated).toBe(1);
|
||||
|
||||
// Get the batch job
|
||||
const jobs = await queue.getBullQueue().getJobs(['waiting', 'delayed']);
|
||||
expect(jobs.length).toBe(1);
|
||||
|
||||
const batchJob = jobs[0];
|
||||
expect(batchJob.data.payload.payloadKey).toBeDefined();
|
||||
expect(batchJob.data.payload.itemCount).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Empty and Edge Cases', () => {
|
||||
test('should handle empty item list', async () => {
|
||||
const result = await processItems([], queueName, {
|
||||
totalDelayHours: 1,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
});
|
||||
|
||||
expect(result.totalItems).toBe(0);
|
||||
expect(result.jobsCreated).toBe(0);
|
||||
expect(result.duration).toBeDefined();
|
||||
});
|
||||
|
||||
test('should handle single item', async () => {
|
||||
const result = await processItems(['single-item'], queueName, {
|
||||
totalDelayHours: 0.001,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
});
|
||||
|
||||
expect(result.totalItems).toBe(1);
|
||||
expect(result.jobsCreated).toBe(1);
|
||||
});
|
||||
|
||||
test('should handle large batch with delays', async () => {
|
||||
const items = Array.from({ length: 100 }, (_, i) => ({ index: i }));
|
||||
|
||||
const result = await processItems(items, queueName, {
|
||||
totalDelayHours: 0.01, // 36 seconds total
|
||||
useBatching: true,
|
||||
batchSize: 25,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
});
|
||||
|
||||
expect(result.batchesCreated).toBe(4); // 100/25
|
||||
expect(result.jobsCreated).toBe(4);
|
||||
|
||||
// Check delays are distributed
|
||||
const jobs = await queue.getBullQueue().getJobs(['delayed', 'waiting']);
|
||||
const delays = jobs.map(j => j.opts.delay || 0).sort((a, b) => a - b);
|
||||
|
||||
expect(delays[0]).toBe(0); // First batch has no delay
|
||||
expect(delays[3]).toBeGreaterThan(0); // Last batch has delay
|
||||
});
|
||||
});
|
||||
|
||||
describe('Job Options', () => {
|
||||
test('should respect custom job options', async () => {
|
||||
const items = ['a', 'b', 'c'];
|
||||
|
||||
await processItems(items, queueName, {
|
||||
totalDelayHours: 0,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
priority: 5,
|
||||
retries: 10,
|
||||
removeOnComplete: 100,
|
||||
removeOnFail: 50,
|
||||
});
|
||||
|
||||
// Check all states including job ID "1" specifically (as it often doesn't show up in state queries)
|
||||
const [waitingJobs, delayedJobs, job1, job2, job3] = await Promise.all([
|
||||
queue.getBullQueue().getJobs(['waiting']),
|
||||
queue.getBullQueue().getJobs(['delayed']),
|
||||
queue.getBullQueue().getJob('1'),
|
||||
queue.getBullQueue().getJob('2'),
|
||||
queue.getBullQueue().getJob('3'),
|
||||
]);
|
||||
|
||||
const jobs = [...waitingJobs, ...delayedJobs];
|
||||
// Add any missing jobs that exist but don't show up in state queries
|
||||
[job1, job2, job3].forEach(job => {
|
||||
if (job && !jobs.find(j => j.id === job.id)) {
|
||||
jobs.push(job);
|
||||
}
|
||||
});
|
||||
|
||||
expect(jobs.length).toBe(3);
|
||||
|
||||
jobs.forEach(job => {
|
||||
expect(job.opts.priority).toBe(5);
|
||||
expect(job.opts.attempts).toBe(10);
|
||||
expect(job.opts.removeOnComplete).toBe(100);
|
||||
expect(job.opts.removeOnFail).toBe(50);
|
||||
});
|
||||
});
|
||||
|
||||
test('should set handler and operation correctly', async () => {
|
||||
// Register custom handler for this test
|
||||
handlerRegistry.register('custom-handler', {
|
||||
'custom-operation': async (payload) => {
|
||||
return { processed: true, data: payload };
|
||||
},
|
||||
});
|
||||
|
||||
await processItems(['test'], queueName, {
|
||||
totalDelayHours: 0,
|
||||
handler: 'custom-handler',
|
||||
operation: 'custom-operation',
|
||||
});
|
||||
|
||||
const jobs = await queue.getBullQueue().getJobs(['waiting']);
|
||||
expect(jobs.length).toBe(1);
|
||||
expect(jobs[0].data.handler).toBe('custom-handler');
|
||||
expect(jobs[0].data.operation).toBe('custom-operation');
|
||||
});
|
||||
});
|
||||
});
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { handlerRegistry, processItems, Queue, QueueManager } from '../src';
|
||||
|
||||
// Suppress Redis connection errors in tests
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
if (reason && typeof reason === 'object' && 'message' in reason) {
|
||||
const message = (reason as Error).message;
|
||||
if (
|
||||
message.includes('Connection is closed') ||
|
||||
message.includes('Connection is in monitoring mode')
|
||||
) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
});
|
||||
|
||||
describe('Batch Processor', () => {
|
||||
let queueManager: QueueManager;
|
||||
let queue: Queue;
|
||||
let queueName: string;
|
||||
|
||||
const redisConfig = {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
password: '',
|
||||
db: 0,
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
// Clear handler registry
|
||||
handlerRegistry.clear();
|
||||
|
||||
// Register test handler
|
||||
handlerRegistry.register('batch-test', {
|
||||
'process-item': async payload => {
|
||||
return { processed: true, data: payload };
|
||||
},
|
||||
generic: async payload => {
|
||||
return { processed: true, data: payload };
|
||||
},
|
||||
'process-batch-items': async _batchData => {
|
||||
// This is called by the batch processor internally
|
||||
return { batchProcessed: true };
|
||||
},
|
||||
});
|
||||
|
||||
// Use unique queue name per test to avoid conflicts
|
||||
queueName = `batch-test-queue-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
|
||||
// Reset and initialize singleton QueueManager for tests
|
||||
await QueueManager.reset();
|
||||
queueManager = QueueManager.initialize({
|
||||
redis: redisConfig,
|
||||
defaultQueueOptions: {
|
||||
workers: 0, // No workers in tests
|
||||
concurrency: 5,
|
||||
},
|
||||
});
|
||||
|
||||
// Get queue using the new getQueue() method (batch cache is now auto-initialized)
|
||||
queue = queueManager.getQueue(queueName);
|
||||
// Note: Batch cache is now automatically initialized when getting the queue
|
||||
|
||||
// Ensure completely clean state - wait for queue to be ready first
|
||||
await queue.getBullQueue().waitUntilReady();
|
||||
|
||||
// Clear all job states
|
||||
await queue.getBullQueue().drain(true);
|
||||
await queue.getBullQueue().clean(0, 1000, 'completed');
|
||||
await queue.getBullQueue().clean(0, 1000, 'failed');
|
||||
await queue.getBullQueue().clean(0, 1000, 'active');
|
||||
await queue.getBullQueue().clean(0, 1000, 'waiting');
|
||||
await queue.getBullQueue().clean(0, 1000, 'delayed');
|
||||
|
||||
// Add a small delay to ensure cleanup is complete
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
try {
|
||||
// Clean up jobs first
|
||||
if (queue) {
|
||||
try {
|
||||
await queue.getBullQueue().drain(true);
|
||||
await queue.getBullQueue().clean(0, 1000, 'completed');
|
||||
await queue.getBullQueue().clean(0, 1000, 'failed');
|
||||
await queue.getBullQueue().clean(0, 1000, 'active');
|
||||
await queue.getBullQueue().clean(0, 1000, 'waiting');
|
||||
await queue.getBullQueue().clean(0, 1000, 'delayed');
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
await queue.close();
|
||||
}
|
||||
|
||||
if (queueManager) {
|
||||
await Promise.race([
|
||||
QueueManager.reset(),
|
||||
new Promise((_, reject) => setTimeout(() => reject(new Error('Shutdown timeout')), 3000)),
|
||||
]);
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Cleanup error:', error.message);
|
||||
} finally {
|
||||
handlerRegistry.clear();
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
}
|
||||
});
|
||||
|
||||
describe('Direct Processing', () => {
|
||||
test('should process items directly without batching', async () => {
|
||||
const items = ['item1', 'item2', 'item3', 'item4', 'item5'];
|
||||
|
||||
const result = await processItems(items, queueName, {
|
||||
totalDelayHours: 0.001, // 3.6 seconds total
|
||||
useBatching: false,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
priority: 1,
|
||||
});
|
||||
|
||||
expect(result.mode).toBe('direct');
|
||||
expect(result.totalItems).toBe(5);
|
||||
expect(result.jobsCreated).toBe(5);
|
||||
|
||||
// Verify jobs were created - BullMQ has an issue where job ID "1" doesn't show up in state queries
|
||||
// but exists when queried directly, so we need to check both ways
|
||||
const [delayedJobs, waitingJobs, activeJobs, completedJobs, failedJobs, job1] =
|
||||
await Promise.all([
|
||||
queue.getBullQueue().getJobs(['delayed']),
|
||||
queue.getBullQueue().getJobs(['waiting']),
|
||||
queue.getBullQueue().getJobs(['active']),
|
||||
queue.getBullQueue().getJobs(['completed']),
|
||||
queue.getBullQueue().getJobs(['failed']),
|
||||
queue.getBullQueue().getJob('1'), // Job 1 often doesn't show up in state queries
|
||||
]);
|
||||
|
||||
const jobs = [...delayedJobs, ...waitingJobs, ...activeJobs, ...completedJobs, ...failedJobs];
|
||||
const ourJobs = jobs.filter(
|
||||
j => j.name === 'process-item' && j.data.handler === 'batch-test'
|
||||
);
|
||||
|
||||
// Include job 1 if we found it directly but it wasn't in the state queries
|
||||
if (
|
||||
job1 &&
|
||||
job1.name === 'process-item' &&
|
||||
job1.data.handler === 'batch-test' &&
|
||||
!ourJobs.find(j => j.id === '1')
|
||||
) {
|
||||
ourJobs.push(job1);
|
||||
}
|
||||
|
||||
expect(ourJobs.length).toBe(5);
|
||||
|
||||
// Check delays are distributed
|
||||
const delays = ourJobs.map(j => j.opts.delay || 0).sort((a, b) => a - b);
|
||||
expect(delays[0]).toBe(0);
|
||||
expect(delays[4]).toBeGreaterThan(delays[0]);
|
||||
});
|
||||
|
||||
test('should process complex objects directly', async () => {
|
||||
const items = [
|
||||
{ id: 1, name: 'Product A', price: 100 },
|
||||
{ id: 2, name: 'Product B', price: 200 },
|
||||
{ id: 3, name: 'Product C', price: 300 },
|
||||
];
|
||||
|
||||
const result = await processItems(items, queueName, {
|
||||
totalDelayHours: 0.001,
|
||||
useBatching: false,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
});
|
||||
|
||||
expect(result.jobsCreated).toBe(3);
|
||||
|
||||
// Check job payloads
|
||||
const jobs = await queue.getBullQueue().getJobs(['waiting', 'delayed']);
|
||||
const ourJobs = jobs.filter(
|
||||
j => j.name === 'process-item' && j.data.handler === 'batch-test'
|
||||
);
|
||||
const payloads = ourJobs.map(j => j.data.payload);
|
||||
|
||||
expect(payloads).toContainEqual({ id: 1, name: 'Product A', price: 100 });
|
||||
expect(payloads).toContainEqual({ id: 2, name: 'Product B', price: 200 });
|
||||
expect(payloads).toContainEqual({ id: 3, name: 'Product C', price: 300 });
|
||||
});
|
||||
});
|
||||
|
||||
describe('Batch Processing', () => {
|
||||
test('should process items in batches', async () => {
|
||||
const items = Array.from({ length: 50 }, (_, i) => ({ id: i, value: `item-${i}` }));
|
||||
|
||||
const result = await processItems(items, queueName, {
|
||||
totalDelayHours: 0.001,
|
||||
useBatching: true,
|
||||
batchSize: 10,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
});
|
||||
|
||||
expect(result.mode).toBe('batch');
|
||||
expect(result.totalItems).toBe(50);
|
||||
expect(result.batchesCreated).toBe(5); // 50 items / 10 per batch
|
||||
expect(result.jobsCreated).toBe(5); // 5 batch jobs
|
||||
|
||||
// Verify batch jobs were created
|
||||
const jobs = await queue.getBullQueue().getJobs(['delayed', 'waiting']);
|
||||
const batchJobs = jobs.filter(j => j.name === 'process-batch');
|
||||
expect(batchJobs.length).toBe(5);
|
||||
});
|
||||
|
||||
test('should handle different batch sizes', async () => {
|
||||
const items = Array.from({ length: 23 }, (_, i) => i);
|
||||
|
||||
const result = await processItems(items, queueName, {
|
||||
totalDelayHours: 0.001,
|
||||
useBatching: true,
|
||||
batchSize: 7,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
});
|
||||
|
||||
expect(result.batchesCreated).toBe(4); // 23/7 = 3.28, rounded up to 4
|
||||
expect(result.jobsCreated).toBe(4);
|
||||
});
|
||||
|
||||
test('should store batch payloads in cache', async () => {
|
||||
const items = [
|
||||
{ type: 'A', data: 'test1' },
|
||||
{ type: 'B', data: 'test2' },
|
||||
];
|
||||
|
||||
const result = await processItems(items, queueName, {
|
||||
totalDelayHours: 0.001,
|
||||
useBatching: true,
|
||||
batchSize: 2,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
ttl: 3600, // 1 hour TTL
|
||||
});
|
||||
|
||||
expect(result.jobsCreated).toBe(1);
|
||||
|
||||
// Get the batch job
|
||||
const jobs = await queue.getBullQueue().getJobs(['waiting', 'delayed']);
|
||||
expect(jobs.length).toBe(1);
|
||||
|
||||
const batchJob = jobs[0];
|
||||
expect(batchJob.data.payload.payloadKey).toBeDefined();
|
||||
expect(batchJob.data.payload.itemCount).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Empty and Edge Cases', () => {
|
||||
test('should handle empty item list', async () => {
|
||||
const result = await processItems([], queueName, {
|
||||
totalDelayHours: 1,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
});
|
||||
|
||||
expect(result.totalItems).toBe(0);
|
||||
expect(result.jobsCreated).toBe(0);
|
||||
expect(result.duration).toBeDefined();
|
||||
});
|
||||
|
||||
test('should handle single item', async () => {
|
||||
const result = await processItems(['single-item'], queueName, {
|
||||
totalDelayHours: 0.001,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
});
|
||||
|
||||
expect(result.totalItems).toBe(1);
|
||||
expect(result.jobsCreated).toBe(1);
|
||||
});
|
||||
|
||||
test('should handle large batch with delays', async () => {
|
||||
const items = Array.from({ length: 100 }, (_, i) => ({ index: i }));
|
||||
|
||||
const result = await processItems(items, queueName, {
|
||||
totalDelayHours: 0.01, // 36 seconds total
|
||||
useBatching: true,
|
||||
batchSize: 25,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
});
|
||||
|
||||
expect(result.batchesCreated).toBe(4); // 100/25
|
||||
expect(result.jobsCreated).toBe(4);
|
||||
|
||||
// Check delays are distributed
|
||||
const jobs = await queue.getBullQueue().getJobs(['delayed', 'waiting']);
|
||||
const delays = jobs.map(j => j.opts.delay || 0).sort((a, b) => a - b);
|
||||
|
||||
expect(delays[0]).toBe(0); // First batch has no delay
|
||||
expect(delays[3]).toBeGreaterThan(0); // Last batch has delay
|
||||
});
|
||||
});
|
||||
|
||||
describe('Job Options', () => {
|
||||
test('should respect custom job options', async () => {
|
||||
const items = ['a', 'b', 'c'];
|
||||
|
||||
await processItems(items, queueName, {
|
||||
totalDelayHours: 0,
|
||||
handler: 'batch-test',
|
||||
operation: 'process-item',
|
||||
priority: 5,
|
||||
retries: 10,
|
||||
removeOnComplete: 100,
|
||||
removeOnFail: 50,
|
||||
});
|
||||
|
||||
// Check all states including job ID "1" specifically (as it often doesn't show up in state queries)
|
||||
const [waitingJobs, delayedJobs, job1, job2, job3] = await Promise.all([
|
||||
queue.getBullQueue().getJobs(['waiting']),
|
||||
queue.getBullQueue().getJobs(['delayed']),
|
||||
queue.getBullQueue().getJob('1'),
|
||||
queue.getBullQueue().getJob('2'),
|
||||
queue.getBullQueue().getJob('3'),
|
||||
]);
|
||||
|
||||
const jobs = [...waitingJobs, ...delayedJobs];
|
||||
// Add any missing jobs that exist but don't show up in state queries
|
||||
[job1, job2, job3].forEach(job => {
|
||||
if (job && !jobs.find(j => j.id === job.id)) {
|
||||
jobs.push(job);
|
||||
}
|
||||
});
|
||||
|
||||
expect(jobs.length).toBe(3);
|
||||
|
||||
jobs.forEach(job => {
|
||||
expect(job.opts.priority).toBe(5);
|
||||
expect(job.opts.attempts).toBe(10);
|
||||
expect(job.opts.removeOnComplete).toBe(100);
|
||||
expect(job.opts.removeOnFail).toBe(50);
|
||||
});
|
||||
});
|
||||
|
||||
test('should set handler and operation correctly', async () => {
|
||||
// Register custom handler for this test
|
||||
handlerRegistry.register('custom-handler', {
|
||||
'custom-operation': async payload => {
|
||||
return { processed: true, data: payload };
|
||||
},
|
||||
});
|
||||
|
||||
await processItems(['test'], queueName, {
|
||||
totalDelayHours: 0,
|
||||
handler: 'custom-handler',
|
||||
operation: 'custom-operation',
|
||||
});
|
||||
|
||||
const jobs = await queue.getBullQueue().getJobs(['waiting']);
|
||||
expect(jobs.length).toBe(1);
|
||||
expect(jobs[0].data.handler).toBe('custom-handler');
|
||||
expect(jobs[0].data.operation).toBe('custom-operation');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,357 +1,379 @@
|
|||
import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
|
||||
import { Queue, Worker } from 'bullmq';
|
||||
import { DeadLetterQueueHandler } from '../src/dlq-handler';
|
||||
import { getRedisConnection } from '../src/utils';
|
||||
|
||||
// Suppress Redis connection errors in tests
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
if (reason && typeof reason === 'object' && 'message' in reason) {
|
||||
const message = (reason as Error).message;
|
||||
if (message.includes('Connection is closed') ||
|
||||
message.includes('Connection is in monitoring mode')) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
});
|
||||
|
||||
describe('DeadLetterQueueHandler', () => {
|
||||
let mainQueue: Queue;
|
||||
let dlqHandler: DeadLetterQueueHandler;
|
||||
let worker: Worker;
|
||||
let connection: any;
|
||||
|
||||
const redisConfig = {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
password: '',
|
||||
db: 0,
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
connection = getRedisConnection(redisConfig);
|
||||
|
||||
// Create main queue
|
||||
mainQueue = new Queue('test-queue', { connection });
|
||||
|
||||
// Create DLQ handler
|
||||
dlqHandler = new DeadLetterQueueHandler(mainQueue, connection, {
|
||||
maxRetries: 3,
|
||||
retryDelay: 100,
|
||||
alertThreshold: 5,
|
||||
cleanupAge: 24,
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
try {
|
||||
if (worker) {
|
||||
await worker.close();
|
||||
}
|
||||
await dlqHandler.shutdown();
|
||||
await mainQueue.close();
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
});
|
||||
|
||||
describe('Failed Job Handling', () => {
|
||||
test('should move job to DLQ after max retries', async () => {
|
||||
let attemptCount = 0;
|
||||
|
||||
// Create worker that always fails
|
||||
worker = new Worker('test-queue', async () => {
|
||||
attemptCount++;
|
||||
throw new Error('Job failed');
|
||||
}, {
|
||||
connection,
|
||||
autorun: false,
|
||||
});
|
||||
|
||||
// Add job with limited attempts
|
||||
const _job = await mainQueue.add('failing-job', { test: true }, {
|
||||
attempts: 3,
|
||||
backoff: { type: 'fixed', delay: 50 },
|
||||
});
|
||||
|
||||
// Process job manually
|
||||
await worker.run();
|
||||
|
||||
// Wait for retries
|
||||
await new Promise(resolve => setTimeout(resolve, 300));
|
||||
|
||||
// Job should have failed 3 times
|
||||
expect(attemptCount).toBe(3);
|
||||
|
||||
// Check if job was moved to DLQ
|
||||
const dlqStats = await dlqHandler.getStats();
|
||||
expect(dlqStats.total).toBe(1);
|
||||
expect(dlqStats.byJobName['failing-job']).toBe(1);
|
||||
});
|
||||
|
||||
test('should track failure count correctly', async () => {
|
||||
const job = await mainQueue.add('test-job', { data: 'test' });
|
||||
const error = new Error('Test error');
|
||||
|
||||
// Simulate multiple failures
|
||||
await dlqHandler.handleFailedJob(job, error);
|
||||
await dlqHandler.handleFailedJob(job, error);
|
||||
|
||||
// On third failure with max attempts reached, should move to DLQ
|
||||
job.attemptsMade = 3;
|
||||
job.opts.attempts = 3;
|
||||
await dlqHandler.handleFailedJob(job, error);
|
||||
|
||||
const stats = await dlqHandler.getStats();
|
||||
expect(stats.total).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('DLQ Statistics', () => {
|
||||
test('should provide detailed statistics', async () => {
|
||||
// Add some failed jobs to DLQ
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: '1',
|
||||
name: 'job-type-a',
|
||||
data: { test: true },
|
||||
attemptsMade: 3,
|
||||
},
|
||||
error: { message: 'Error 1' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: '2',
|
||||
name: 'job-type-b',
|
||||
data: { test: true },
|
||||
attemptsMade: 3,
|
||||
},
|
||||
error: { message: 'Error 2' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
|
||||
const stats = await dlqHandler.getStats();
|
||||
expect(stats.total).toBe(2);
|
||||
expect(stats.recent).toBe(2); // Both are recent
|
||||
expect(Object.keys(stats.byJobName).length).toBe(2);
|
||||
expect(stats.oldestJob).toBeDefined();
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
|
||||
test('should count recent jobs correctly', async () => {
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
// Add old job (25 hours ago)
|
||||
const oldTimestamp = Date.now() - 25 * 60 * 60 * 1000;
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: { id: '1', name: 'old-job' },
|
||||
error: { message: 'Old error' },
|
||||
movedToDLQAt: new Date(oldTimestamp).toISOString(),
|
||||
}, { timestamp: oldTimestamp });
|
||||
|
||||
// Add recent job
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: { id: '2', name: 'recent-job' },
|
||||
error: { message: 'Recent error' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
|
||||
const stats = await dlqHandler.getStats();
|
||||
expect(stats.total).toBe(2);
|
||||
expect(stats.recent).toBe(1); // Only one is recent
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('DLQ Retry', () => {
|
||||
test('should retry jobs from DLQ', async () => {
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
// Add failed jobs to DLQ
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: '1',
|
||||
name: 'retry-job',
|
||||
data: { retry: true },
|
||||
opts: { priority: 1 },
|
||||
},
|
||||
error: { message: 'Failed' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: '2',
|
||||
name: 'retry-job-2',
|
||||
data: { retry: true },
|
||||
opts: {},
|
||||
},
|
||||
error: { message: 'Failed' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
|
||||
// Retry jobs
|
||||
const retriedCount = await dlqHandler.retryDLQJobs(10);
|
||||
expect(retriedCount).toBe(2);
|
||||
|
||||
// Check main queue has the retried jobs
|
||||
const mainQueueJobs = await mainQueue.getWaiting();
|
||||
expect(mainQueueJobs.length).toBe(2);
|
||||
expect(mainQueueJobs[0].name).toBe('retry-job');
|
||||
expect(mainQueueJobs[0].data).toEqual({ retry: true });
|
||||
|
||||
// DLQ should be empty
|
||||
const dlqJobs = await dlq.getCompleted();
|
||||
expect(dlqJobs.length).toBe(0);
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
|
||||
test('should respect retry limit', async () => {
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
// Add 5 failed jobs
|
||||
for (let i = 0; i < 5; i++) {
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: `${i}`,
|
||||
name: `job-${i}`,
|
||||
data: { index: i },
|
||||
},
|
||||
error: { message: 'Failed' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
|
||||
// Retry only 3 jobs
|
||||
const retriedCount = await dlqHandler.retryDLQJobs(3);
|
||||
expect(retriedCount).toBe(3);
|
||||
|
||||
// Check counts
|
||||
const mainQueueJobs = await mainQueue.getWaiting();
|
||||
expect(mainQueueJobs.length).toBe(3);
|
||||
|
||||
const remainingDLQ = await dlq.getCompleted();
|
||||
expect(remainingDLQ.length).toBe(2);
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('DLQ Cleanup', () => {
|
||||
test('should cleanup old DLQ entries', async () => {
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
// Add old job (25 hours ago)
|
||||
const oldTimestamp = Date.now() - 25 * 60 * 60 * 1000;
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: { id: '1', name: 'old-job' },
|
||||
error: { message: 'Old error' },
|
||||
}, { timestamp: oldTimestamp });
|
||||
|
||||
// Add recent job (1 hour ago)
|
||||
const recentTimestamp = Date.now() - 1 * 60 * 60 * 1000;
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: { id: '2', name: 'recent-job' },
|
||||
error: { message: 'Recent error' },
|
||||
}, { timestamp: recentTimestamp });
|
||||
|
||||
// Run cleanup (24 hour threshold)
|
||||
const removedCount = await dlqHandler.cleanup();
|
||||
expect(removedCount).toBe(1);
|
||||
|
||||
// Check remaining jobs
|
||||
const remaining = await dlq.getCompleted();
|
||||
expect(remaining.length).toBe(1);
|
||||
expect(remaining[0].data.originalJob.name).toBe('recent-job');
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Failed Job Inspection', () => {
|
||||
test('should inspect failed jobs', async () => {
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
// Add failed jobs with different error types
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: '1',
|
||||
name: 'network-job',
|
||||
data: { url: 'https://api.example.com' },
|
||||
attemptsMade: 3,
|
||||
},
|
||||
error: {
|
||||
message: 'Network timeout',
|
||||
stack: 'Error: Network timeout\n at ...',
|
||||
name: 'NetworkError',
|
||||
},
|
||||
movedToDLQAt: '2024-01-01T10:00:00Z',
|
||||
});
|
||||
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: '2',
|
||||
name: 'parse-job',
|
||||
data: { input: 'invalid-json' },
|
||||
attemptsMade: 2,
|
||||
},
|
||||
error: {
|
||||
message: 'Invalid JSON',
|
||||
stack: 'SyntaxError: Invalid JSON\n at ...',
|
||||
name: 'SyntaxError',
|
||||
},
|
||||
movedToDLQAt: '2024-01-01T11:00:00Z',
|
||||
});
|
||||
|
||||
const failedJobs = await dlqHandler.inspectFailedJobs(10);
|
||||
expect(failedJobs.length).toBe(2);
|
||||
|
||||
expect(failedJobs[0]).toMatchObject({
|
||||
id: '1',
|
||||
name: 'network-job',
|
||||
data: { url: 'https://api.example.com' },
|
||||
error: {
|
||||
message: 'Network timeout',
|
||||
name: 'NetworkError',
|
||||
},
|
||||
failedAt: '2024-01-01T10:00:00Z',
|
||||
attempts: 3,
|
||||
});
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Alert Threshold', () => {
|
||||
test('should detect when alert threshold is exceeded', async () => {
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
// Add jobs to exceed threshold (5)
|
||||
for (let i = 0; i < 6; i++) {
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: `${i}`,
|
||||
name: `job-${i}`,
|
||||
data: { index: i },
|
||||
},
|
||||
error: { message: 'Failed' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
|
||||
const stats = await dlqHandler.getStats();
|
||||
expect(stats.total).toBe(6);
|
||||
// In a real implementation, this would trigger alerts
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
});
|
||||
});
|
||||
import { Queue, Worker } from 'bullmq';
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { DeadLetterQueueHandler } from '../src/dlq-handler';
|
||||
import { getRedisConnection } from '../src/utils';
|
||||
|
||||
// Suppress Redis connection errors in tests
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
if (reason && typeof reason === 'object' && 'message' in reason) {
|
||||
const message = (reason as Error).message;
|
||||
if (
|
||||
message.includes('Connection is closed') ||
|
||||
message.includes('Connection is in monitoring mode')
|
||||
) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
});
|
||||
|
||||
describe('DeadLetterQueueHandler', () => {
|
||||
let mainQueue: Queue;
|
||||
let dlqHandler: DeadLetterQueueHandler;
|
||||
let worker: Worker;
|
||||
let connection: any;
|
||||
|
||||
const redisConfig = {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
password: '',
|
||||
db: 0,
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
connection = getRedisConnection(redisConfig);
|
||||
|
||||
// Create main queue
|
||||
mainQueue = new Queue('test-queue', { connection });
|
||||
|
||||
// Create DLQ handler
|
||||
dlqHandler = new DeadLetterQueueHandler(mainQueue, connection, {
|
||||
maxRetries: 3,
|
||||
retryDelay: 100,
|
||||
alertThreshold: 5,
|
||||
cleanupAge: 24,
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
try {
|
||||
if (worker) {
|
||||
await worker.close();
|
||||
}
|
||||
await dlqHandler.shutdown();
|
||||
await mainQueue.close();
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
});
|
||||
|
||||
describe('Failed Job Handling', () => {
|
||||
test('should move job to DLQ after max retries', async () => {
|
||||
let attemptCount = 0;
|
||||
|
||||
// Create worker that always fails
|
||||
worker = new Worker(
|
||||
'test-queue',
|
||||
async () => {
|
||||
attemptCount++;
|
||||
throw new Error('Job failed');
|
||||
},
|
||||
{
|
||||
connection,
|
||||
autorun: false,
|
||||
}
|
||||
);
|
||||
|
||||
// Add job with limited attempts
|
||||
const _job = await mainQueue.add(
|
||||
'failing-job',
|
||||
{ test: true },
|
||||
{
|
||||
attempts: 3,
|
||||
backoff: { type: 'fixed', delay: 50 },
|
||||
}
|
||||
);
|
||||
|
||||
// Process job manually
|
||||
await worker.run();
|
||||
|
||||
// Wait for retries
|
||||
await new Promise(resolve => setTimeout(resolve, 300));
|
||||
|
||||
// Job should have failed 3 times
|
||||
expect(attemptCount).toBe(3);
|
||||
|
||||
// Check if job was moved to DLQ
|
||||
const dlqStats = await dlqHandler.getStats();
|
||||
expect(dlqStats.total).toBe(1);
|
||||
expect(dlqStats.byJobName['failing-job']).toBe(1);
|
||||
});
|
||||
|
||||
test('should track failure count correctly', async () => {
|
||||
const job = await mainQueue.add('test-job', { data: 'test' });
|
||||
const error = new Error('Test error');
|
||||
|
||||
// Simulate multiple failures
|
||||
await dlqHandler.handleFailedJob(job, error);
|
||||
await dlqHandler.handleFailedJob(job, error);
|
||||
|
||||
// On third failure with max attempts reached, should move to DLQ
|
||||
job.attemptsMade = 3;
|
||||
job.opts.attempts = 3;
|
||||
await dlqHandler.handleFailedJob(job, error);
|
||||
|
||||
const stats = await dlqHandler.getStats();
|
||||
expect(stats.total).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('DLQ Statistics', () => {
|
||||
test('should provide detailed statistics', async () => {
|
||||
// Add some failed jobs to DLQ
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: '1',
|
||||
name: 'job-type-a',
|
||||
data: { test: true },
|
||||
attemptsMade: 3,
|
||||
},
|
||||
error: { message: 'Error 1' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: '2',
|
||||
name: 'job-type-b',
|
||||
data: { test: true },
|
||||
attemptsMade: 3,
|
||||
},
|
||||
error: { message: 'Error 2' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
|
||||
const stats = await dlqHandler.getStats();
|
||||
expect(stats.total).toBe(2);
|
||||
expect(stats.recent).toBe(2); // Both are recent
|
||||
expect(Object.keys(stats.byJobName).length).toBe(2);
|
||||
expect(stats.oldestJob).toBeDefined();
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
|
||||
test('should count recent jobs correctly', async () => {
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
// Add old job (25 hours ago)
|
||||
const oldTimestamp = Date.now() - 25 * 60 * 60 * 1000;
|
||||
await dlq.add(
|
||||
'failed-job',
|
||||
{
|
||||
originalJob: { id: '1', name: 'old-job' },
|
||||
error: { message: 'Old error' },
|
||||
movedToDLQAt: new Date(oldTimestamp).toISOString(),
|
||||
},
|
||||
{ timestamp: oldTimestamp }
|
||||
);
|
||||
|
||||
// Add recent job
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: { id: '2', name: 'recent-job' },
|
||||
error: { message: 'Recent error' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
|
||||
const stats = await dlqHandler.getStats();
|
||||
expect(stats.total).toBe(2);
|
||||
expect(stats.recent).toBe(1); // Only one is recent
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('DLQ Retry', () => {
|
||||
test('should retry jobs from DLQ', async () => {
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
// Add failed jobs to DLQ
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: '1',
|
||||
name: 'retry-job',
|
||||
data: { retry: true },
|
||||
opts: { priority: 1 },
|
||||
},
|
||||
error: { message: 'Failed' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: '2',
|
||||
name: 'retry-job-2',
|
||||
data: { retry: true },
|
||||
opts: {},
|
||||
},
|
||||
error: { message: 'Failed' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
|
||||
// Retry jobs
|
||||
const retriedCount = await dlqHandler.retryDLQJobs(10);
|
||||
expect(retriedCount).toBe(2);
|
||||
|
||||
// Check main queue has the retried jobs
|
||||
const mainQueueJobs = await mainQueue.getWaiting();
|
||||
expect(mainQueueJobs.length).toBe(2);
|
||||
expect(mainQueueJobs[0].name).toBe('retry-job');
|
||||
expect(mainQueueJobs[0].data).toEqual({ retry: true });
|
||||
|
||||
// DLQ should be empty
|
||||
const dlqJobs = await dlq.getCompleted();
|
||||
expect(dlqJobs.length).toBe(0);
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
|
||||
test('should respect retry limit', async () => {
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
// Add 5 failed jobs
|
||||
for (let i = 0; i < 5; i++) {
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: `${i}`,
|
||||
name: `job-${i}`,
|
||||
data: { index: i },
|
||||
},
|
||||
error: { message: 'Failed' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
|
||||
// Retry only 3 jobs
|
||||
const retriedCount = await dlqHandler.retryDLQJobs(3);
|
||||
expect(retriedCount).toBe(3);
|
||||
|
||||
// Check counts
|
||||
const mainQueueJobs = await mainQueue.getWaiting();
|
||||
expect(mainQueueJobs.length).toBe(3);
|
||||
|
||||
const remainingDLQ = await dlq.getCompleted();
|
||||
expect(remainingDLQ.length).toBe(2);
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('DLQ Cleanup', () => {
|
||||
test('should cleanup old DLQ entries', async () => {
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
// Add old job (25 hours ago)
|
||||
const oldTimestamp = Date.now() - 25 * 60 * 60 * 1000;
|
||||
await dlq.add(
|
||||
'failed-job',
|
||||
{
|
||||
originalJob: { id: '1', name: 'old-job' },
|
||||
error: { message: 'Old error' },
|
||||
},
|
||||
{ timestamp: oldTimestamp }
|
||||
);
|
||||
|
||||
// Add recent job (1 hour ago)
|
||||
const recentTimestamp = Date.now() - 1 * 60 * 60 * 1000;
|
||||
await dlq.add(
|
||||
'failed-job',
|
||||
{
|
||||
originalJob: { id: '2', name: 'recent-job' },
|
||||
error: { message: 'Recent error' },
|
||||
},
|
||||
{ timestamp: recentTimestamp }
|
||||
);
|
||||
|
||||
// Run cleanup (24 hour threshold)
|
||||
const removedCount = await dlqHandler.cleanup();
|
||||
expect(removedCount).toBe(1);
|
||||
|
||||
// Check remaining jobs
|
||||
const remaining = await dlq.getCompleted();
|
||||
expect(remaining.length).toBe(1);
|
||||
expect(remaining[0].data.originalJob.name).toBe('recent-job');
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Failed Job Inspection', () => {
|
||||
test('should inspect failed jobs', async () => {
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
// Add failed jobs with different error types
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: '1',
|
||||
name: 'network-job',
|
||||
data: { url: 'https://api.example.com' },
|
||||
attemptsMade: 3,
|
||||
},
|
||||
error: {
|
||||
message: 'Network timeout',
|
||||
stack: 'Error: Network timeout\n at ...',
|
||||
name: 'NetworkError',
|
||||
},
|
||||
movedToDLQAt: '2024-01-01T10:00:00Z',
|
||||
});
|
||||
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: '2',
|
||||
name: 'parse-job',
|
||||
data: { input: 'invalid-json' },
|
||||
attemptsMade: 2,
|
||||
},
|
||||
error: {
|
||||
message: 'Invalid JSON',
|
||||
stack: 'SyntaxError: Invalid JSON\n at ...',
|
||||
name: 'SyntaxError',
|
||||
},
|
||||
movedToDLQAt: '2024-01-01T11:00:00Z',
|
||||
});
|
||||
|
||||
const failedJobs = await dlqHandler.inspectFailedJobs(10);
|
||||
expect(failedJobs.length).toBe(2);
|
||||
|
||||
expect(failedJobs[0]).toMatchObject({
|
||||
id: '1',
|
||||
name: 'network-job',
|
||||
data: { url: 'https://api.example.com' },
|
||||
error: {
|
||||
message: 'Network timeout',
|
||||
name: 'NetworkError',
|
||||
},
|
||||
failedAt: '2024-01-01T10:00:00Z',
|
||||
attempts: 3,
|
||||
});
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Alert Threshold', () => {
|
||||
test('should detect when alert threshold is exceeded', async () => {
|
||||
const dlq = new Queue(`test-queue-dlq`, { connection });
|
||||
|
||||
// Add jobs to exceed threshold (5)
|
||||
for (let i = 0; i < 6; i++) {
|
||||
await dlq.add('failed-job', {
|
||||
originalJob: {
|
||||
id: `${i}`,
|
||||
name: `job-${i}`,
|
||||
data: { index: i },
|
||||
},
|
||||
error: { message: 'Failed' },
|
||||
movedToDLQAt: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
|
||||
const stats = await dlqHandler.getStats();
|
||||
expect(stats.total).toBe(6);
|
||||
// In a real implementation, this would trigger alerts
|
||||
|
||||
await dlq.close();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,12 +1,14 @@
|
|||
import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
|
||||
import { QueueManager, handlerRegistry } from '../src';
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { handlerRegistry, QueueManager } from '../src';
|
||||
|
||||
// Suppress Redis connection errors in tests
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
if (reason && typeof reason === 'object' && 'message' in reason) {
|
||||
const message = (reason as Error).message;
|
||||
if (message.includes('Connection is closed') ||
|
||||
message.includes('Connection is in monitoring mode')) {
|
||||
if (
|
||||
message.includes('Connection is closed') ||
|
||||
message.includes('Connection is in monitoring mode')
|
||||
) {
|
||||
// Suppress these specific Redis errors in tests
|
||||
return;
|
||||
}
|
||||
|
|
@ -34,9 +36,7 @@ describe('QueueManager Integration Tests', () => {
|
|||
try {
|
||||
await Promise.race([
|
||||
queueManager.shutdown(),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Shutdown timeout')), 3000)
|
||||
)
|
||||
new Promise((_, reject) => setTimeout(() => reject(new Error('Shutdown timeout')), 3000)),
|
||||
]);
|
||||
} catch (error) {
|
||||
// Ignore shutdown errors in tests
|
||||
|
|
@ -45,10 +45,10 @@ describe('QueueManager Integration Tests', () => {
|
|||
queueManager = null as any;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Clear handler registry to prevent conflicts
|
||||
handlerRegistry.clear();
|
||||
|
||||
|
||||
// Add delay to allow connections to close
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,371 +1,371 @@
|
|||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { handlerRegistry, QueueManager } from '../src';
|
||||
|
||||
// Suppress Redis connection errors in tests
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
if (reason && typeof reason === 'object' && 'message' in reason) {
|
||||
const message = (reason as Error).message;
|
||||
if (message.includes('Connection is closed') ||
|
||||
message.includes('Connection is in monitoring mode')) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
});
|
||||
|
||||
describe('QueueManager', () => {
|
||||
let queueManager: QueueManager;
|
||||
|
||||
// Use local Redis/Dragonfly
|
||||
const redisConfig = {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
password: '',
|
||||
db: 0,
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
handlerRegistry.clear();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (queueManager) {
|
||||
try {
|
||||
await Promise.race([
|
||||
queueManager.shutdown(),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Shutdown timeout')), 3000)
|
||||
)
|
||||
]);
|
||||
} catch (error) {
|
||||
console.warn('Shutdown error:', error.message);
|
||||
} finally {
|
||||
queueManager = null as any;
|
||||
}
|
||||
}
|
||||
|
||||
handlerRegistry.clear();
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
});
|
||||
|
||||
describe('Basic Operations', () => {
|
||||
test('should initialize queue manager', async () => {
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 1,
|
||||
concurrency: 5,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
expect(queueManager.queueName).toBe('test-queue');
|
||||
});
|
||||
|
||||
test('should add and process a job', async () => {
|
||||
let processedPayload: any;
|
||||
|
||||
// Register handler
|
||||
handlerRegistry.register('test-handler', {
|
||||
'test-operation': async payload => {
|
||||
processedPayload = payload;
|
||||
return { success: true, data: payload };
|
||||
},
|
||||
});
|
||||
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 1,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
// Add job
|
||||
const job = await queueManager.add('test-job', {
|
||||
handler: 'test-handler',
|
||||
operation: 'test-operation',
|
||||
payload: { message: 'Hello, Queue!' },
|
||||
});
|
||||
|
||||
expect(job.name).toBe('test-job');
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
|
||||
expect(processedPayload).toEqual({ message: 'Hello, Queue!' });
|
||||
});
|
||||
|
||||
test('should handle missing handler gracefully', async () => {
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 1,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
const job = await queueManager.add('test-job', {
|
||||
handler: 'non-existent',
|
||||
operation: 'test-operation',
|
||||
payload: { test: true },
|
||||
});
|
||||
|
||||
// Wait for job to fail
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
|
||||
const failed = await job.isFailed();
|
||||
expect(failed).toBe(true);
|
||||
});
|
||||
|
||||
test('should add multiple jobs in bulk', async () => {
|
||||
let processedCount = 0;
|
||||
|
||||
handlerRegistry.register('bulk-handler', {
|
||||
process: async _payload => {
|
||||
processedCount++;
|
||||
return { processed: true };
|
||||
},
|
||||
});
|
||||
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 2,
|
||||
concurrency: 5,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
const jobs = await queueManager.addBulk([
|
||||
{
|
||||
name: 'job1',
|
||||
data: { handler: 'bulk-handler', operation: 'process', payload: { id: 1 } },
|
||||
},
|
||||
{
|
||||
name: 'job2',
|
||||
data: { handler: 'bulk-handler', operation: 'process', payload: { id: 2 } },
|
||||
},
|
||||
{
|
||||
name: 'job3',
|
||||
data: { handler: 'bulk-handler', operation: 'process', payload: { id: 3 } },
|
||||
},
|
||||
]);
|
||||
|
||||
expect(jobs.length).toBe(3);
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
|
||||
expect(processedCount).toBe(3);
|
||||
});
|
||||
|
||||
test('should get queue statistics', async () => {
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 0, // No workers, jobs will stay in waiting
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
// Add some jobs
|
||||
await queueManager.add('job1', {
|
||||
handler: 'test',
|
||||
operation: 'test',
|
||||
payload: { id: 1 },
|
||||
});
|
||||
|
||||
await queueManager.add('job2', {
|
||||
handler: 'test',
|
||||
operation: 'test',
|
||||
payload: { id: 2 },
|
||||
});
|
||||
|
||||
const stats = await queueManager.getStats();
|
||||
|
||||
expect(stats.waiting).toBe(2);
|
||||
expect(stats.active).toBe(0);
|
||||
expect(stats.completed).toBe(0);
|
||||
expect(stats.failed).toBe(0);
|
||||
});
|
||||
|
||||
test('should pause and resume queue', async () => {
|
||||
let processedCount = 0;
|
||||
|
||||
handlerRegistry.register('pause-test', {
|
||||
process: async () => {
|
||||
processedCount++;
|
||||
return { ok: true };
|
||||
},
|
||||
});
|
||||
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 1,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
// Pause queue
|
||||
await queueManager.pause();
|
||||
|
||||
// Add job while paused
|
||||
await queueManager.add('job1', {
|
||||
handler: 'pause-test',
|
||||
operation: 'process',
|
||||
payload: {},
|
||||
});
|
||||
|
||||
// Wait a bit - job should not be processed
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
expect(processedCount).toBe(0);
|
||||
|
||||
// Resume queue
|
||||
await queueManager.resume();
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
expect(processedCount).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Scheduled Jobs', () => {
|
||||
test('should register and process scheduled jobs', async () => {
|
||||
let executionCount = 0;
|
||||
|
||||
handlerRegistry.registerWithSchedule({
|
||||
name: 'scheduled-handler',
|
||||
operations: {
|
||||
'scheduled-task': async _payload => {
|
||||
executionCount++;
|
||||
return { executed: true, timestamp: Date.now() };
|
||||
},
|
||||
},
|
||||
scheduledJobs: [
|
||||
{
|
||||
type: 'test-schedule',
|
||||
operation: 'scheduled-task',
|
||||
payload: { test: true },
|
||||
cronPattern: '*/1 * * * * *', // Every second
|
||||
description: 'Test scheduled job',
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 1,
|
||||
enableScheduledJobs: true,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
// Wait for scheduled job to execute
|
||||
await new Promise(resolve => setTimeout(resolve, 2500));
|
||||
|
||||
expect(executionCount).toBeGreaterThanOrEqual(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handling', () => {
|
||||
test('should handle job errors with retries', async () => {
|
||||
let attemptCount = 0;
|
||||
|
||||
handlerRegistry.register('retry-handler', {
|
||||
'failing-operation': async () => {
|
||||
attemptCount++;
|
||||
if (attemptCount < 3) {
|
||||
throw new Error(`Attempt ${attemptCount} failed`);
|
||||
}
|
||||
return { success: true };
|
||||
},
|
||||
});
|
||||
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 1,
|
||||
defaultJobOptions: {
|
||||
attempts: 3,
|
||||
backoff: {
|
||||
type: 'fixed',
|
||||
delay: 50,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
const job = await queueManager.add('retry-job', {
|
||||
handler: 'retry-handler',
|
||||
operation: 'failing-operation',
|
||||
payload: {},
|
||||
});
|
||||
|
||||
// Wait for retries
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
|
||||
const completed = await job.isCompleted();
|
||||
expect(completed).toBe(true);
|
||||
expect(attemptCount).toBe(3);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Multiple Handlers', () => {
|
||||
test('should handle multiple handlers with different operations', async () => {
|
||||
const results: any[] = [];
|
||||
|
||||
handlerRegistry.register('handler-a', {
|
||||
'operation-1': async payload => {
|
||||
results.push({ handler: 'a', op: '1', payload });
|
||||
return { handler: 'a', op: '1' };
|
||||
},
|
||||
'operation-2': async payload => {
|
||||
results.push({ handler: 'a', op: '2', payload });
|
||||
return { handler: 'a', op: '2' };
|
||||
},
|
||||
});
|
||||
|
||||
handlerRegistry.register('handler-b', {
|
||||
'operation-1': async payload => {
|
||||
results.push({ handler: 'b', op: '1', payload });
|
||||
return { handler: 'b', op: '1' };
|
||||
},
|
||||
});
|
||||
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 2,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
// Add jobs for different handlers
|
||||
await queueManager.addBulk([
|
||||
{
|
||||
name: 'job1',
|
||||
data: { handler: 'handler-a', operation: 'operation-1', payload: { id: 1 } },
|
||||
},
|
||||
{
|
||||
name: 'job2',
|
||||
data: { handler: 'handler-a', operation: 'operation-2', payload: { id: 2 } },
|
||||
},
|
||||
{
|
||||
name: 'job3',
|
||||
data: { handler: 'handler-b', operation: 'operation-1', payload: { id: 3 } },
|
||||
},
|
||||
]);
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
|
||||
expect(results.length).toBe(3);
|
||||
expect(results).toContainEqual({ handler: 'a', op: '1', payload: { id: 1 } });
|
||||
expect(results).toContainEqual({ handler: 'a', op: '2', payload: { id: 2 } });
|
||||
expect(results).toContainEqual({ handler: 'b', op: '1', payload: { id: 3 } });
|
||||
});
|
||||
});
|
||||
});
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { handlerRegistry, QueueManager } from '../src';
|
||||
|
||||
// Suppress Redis connection errors in tests
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
if (reason && typeof reason === 'object' && 'message' in reason) {
|
||||
const message = (reason as Error).message;
|
||||
if (
|
||||
message.includes('Connection is closed') ||
|
||||
message.includes('Connection is in monitoring mode')
|
||||
) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
});
|
||||
|
||||
describe('QueueManager', () => {
|
||||
let queueManager: QueueManager;
|
||||
|
||||
// Use local Redis/Dragonfly
|
||||
const redisConfig = {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
password: '',
|
||||
db: 0,
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
handlerRegistry.clear();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (queueManager) {
|
||||
try {
|
||||
await Promise.race([
|
||||
queueManager.shutdown(),
|
||||
new Promise((_, reject) => setTimeout(() => reject(new Error('Shutdown timeout')), 3000)),
|
||||
]);
|
||||
} catch (error) {
|
||||
console.warn('Shutdown error:', error.message);
|
||||
} finally {
|
||||
queueManager = null as any;
|
||||
}
|
||||
}
|
||||
|
||||
handlerRegistry.clear();
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
});
|
||||
|
||||
describe('Basic Operations', () => {
|
||||
test('should initialize queue manager', async () => {
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 1,
|
||||
concurrency: 5,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
expect(queueManager.queueName).toBe('test-queue');
|
||||
});
|
||||
|
||||
test('should add and process a job', async () => {
|
||||
let processedPayload: any;
|
||||
|
||||
// Register handler
|
||||
handlerRegistry.register('test-handler', {
|
||||
'test-operation': async payload => {
|
||||
processedPayload = payload;
|
||||
return { success: true, data: payload };
|
||||
},
|
||||
});
|
||||
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 1,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
// Add job
|
||||
const job = await queueManager.add('test-job', {
|
||||
handler: 'test-handler',
|
||||
operation: 'test-operation',
|
||||
payload: { message: 'Hello, Queue!' },
|
||||
});
|
||||
|
||||
expect(job.name).toBe('test-job');
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
|
||||
expect(processedPayload).toEqual({ message: 'Hello, Queue!' });
|
||||
});
|
||||
|
||||
test('should handle missing handler gracefully', async () => {
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 1,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
const job = await queueManager.add('test-job', {
|
||||
handler: 'non-existent',
|
||||
operation: 'test-operation',
|
||||
payload: { test: true },
|
||||
});
|
||||
|
||||
// Wait for job to fail
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
|
||||
const failed = await job.isFailed();
|
||||
expect(failed).toBe(true);
|
||||
});
|
||||
|
||||
test('should add multiple jobs in bulk', async () => {
|
||||
let processedCount = 0;
|
||||
|
||||
handlerRegistry.register('bulk-handler', {
|
||||
process: async _payload => {
|
||||
processedCount++;
|
||||
return { processed: true };
|
||||
},
|
||||
});
|
||||
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 2,
|
||||
concurrency: 5,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
const jobs = await queueManager.addBulk([
|
||||
{
|
||||
name: 'job1',
|
||||
data: { handler: 'bulk-handler', operation: 'process', payload: { id: 1 } },
|
||||
},
|
||||
{
|
||||
name: 'job2',
|
||||
data: { handler: 'bulk-handler', operation: 'process', payload: { id: 2 } },
|
||||
},
|
||||
{
|
||||
name: 'job3',
|
||||
data: { handler: 'bulk-handler', operation: 'process', payload: { id: 3 } },
|
||||
},
|
||||
]);
|
||||
|
||||
expect(jobs.length).toBe(3);
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
|
||||
expect(processedCount).toBe(3);
|
||||
});
|
||||
|
||||
test('should get queue statistics', async () => {
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 0, // No workers, jobs will stay in waiting
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
// Add some jobs
|
||||
await queueManager.add('job1', {
|
||||
handler: 'test',
|
||||
operation: 'test',
|
||||
payload: { id: 1 },
|
||||
});
|
||||
|
||||
await queueManager.add('job2', {
|
||||
handler: 'test',
|
||||
operation: 'test',
|
||||
payload: { id: 2 },
|
||||
});
|
||||
|
||||
const stats = await queueManager.getStats();
|
||||
|
||||
expect(stats.waiting).toBe(2);
|
||||
expect(stats.active).toBe(0);
|
||||
expect(stats.completed).toBe(0);
|
||||
expect(stats.failed).toBe(0);
|
||||
});
|
||||
|
||||
test('should pause and resume queue', async () => {
|
||||
let processedCount = 0;
|
||||
|
||||
handlerRegistry.register('pause-test', {
|
||||
process: async () => {
|
||||
processedCount++;
|
||||
return { ok: true };
|
||||
},
|
||||
});
|
||||
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 1,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
// Pause queue
|
||||
await queueManager.pause();
|
||||
|
||||
// Add job while paused
|
||||
await queueManager.add('job1', {
|
||||
handler: 'pause-test',
|
||||
operation: 'process',
|
||||
payload: {},
|
||||
});
|
||||
|
||||
// Wait a bit - job should not be processed
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
expect(processedCount).toBe(0);
|
||||
|
||||
// Resume queue
|
||||
await queueManager.resume();
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
expect(processedCount).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Scheduled Jobs', () => {
|
||||
test('should register and process scheduled jobs', async () => {
|
||||
let executionCount = 0;
|
||||
|
||||
handlerRegistry.registerWithSchedule({
|
||||
name: 'scheduled-handler',
|
||||
operations: {
|
||||
'scheduled-task': async _payload => {
|
||||
executionCount++;
|
||||
return { executed: true, timestamp: Date.now() };
|
||||
},
|
||||
},
|
||||
scheduledJobs: [
|
||||
{
|
||||
type: 'test-schedule',
|
||||
operation: 'scheduled-task',
|
||||
payload: { test: true },
|
||||
cronPattern: '*/1 * * * * *', // Every second
|
||||
description: 'Test scheduled job',
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 1,
|
||||
enableScheduledJobs: true,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
// Wait for scheduled job to execute
|
||||
await new Promise(resolve => setTimeout(resolve, 2500));
|
||||
|
||||
expect(executionCount).toBeGreaterThanOrEqual(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handling', () => {
|
||||
test('should handle job errors with retries', async () => {
|
||||
let attemptCount = 0;
|
||||
|
||||
handlerRegistry.register('retry-handler', {
|
||||
'failing-operation': async () => {
|
||||
attemptCount++;
|
||||
if (attemptCount < 3) {
|
||||
throw new Error(`Attempt ${attemptCount} failed`);
|
||||
}
|
||||
return { success: true };
|
||||
},
|
||||
});
|
||||
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 1,
|
||||
defaultJobOptions: {
|
||||
attempts: 3,
|
||||
backoff: {
|
||||
type: 'fixed',
|
||||
delay: 50,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
const job = await queueManager.add('retry-job', {
|
||||
handler: 'retry-handler',
|
||||
operation: 'failing-operation',
|
||||
payload: {},
|
||||
});
|
||||
|
||||
// Wait for retries
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
|
||||
const completed = await job.isCompleted();
|
||||
expect(completed).toBe(true);
|
||||
expect(attemptCount).toBe(3);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Multiple Handlers', () => {
|
||||
test('should handle multiple handlers with different operations', async () => {
|
||||
const results: any[] = [];
|
||||
|
||||
handlerRegistry.register('handler-a', {
|
||||
'operation-1': async payload => {
|
||||
results.push({ handler: 'a', op: '1', payload });
|
||||
return { handler: 'a', op: '1' };
|
||||
},
|
||||
'operation-2': async payload => {
|
||||
results.push({ handler: 'a', op: '2', payload });
|
||||
return { handler: 'a', op: '2' };
|
||||
},
|
||||
});
|
||||
|
||||
handlerRegistry.register('handler-b', {
|
||||
'operation-1': async payload => {
|
||||
results.push({ handler: 'b', op: '1', payload });
|
||||
return { handler: 'b', op: '1' };
|
||||
},
|
||||
});
|
||||
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
workers: 2,
|
||||
});
|
||||
|
||||
await queueManager.initialize();
|
||||
|
||||
// Add jobs for different handlers
|
||||
await queueManager.addBulk([
|
||||
{
|
||||
name: 'job1',
|
||||
data: { handler: 'handler-a', operation: 'operation-1', payload: { id: 1 } },
|
||||
},
|
||||
{
|
||||
name: 'job2',
|
||||
data: { handler: 'handler-a', operation: 'operation-2', payload: { id: 2 } },
|
||||
},
|
||||
{
|
||||
name: 'job3',
|
||||
data: { handler: 'handler-b', operation: 'operation-1', payload: { id: 3 } },
|
||||
},
|
||||
]);
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
|
||||
expect(results.length).toBe(3);
|
||||
expect(results).toContainEqual({ handler: 'a', op: '1', payload: { id: 1 } });
|
||||
expect(results).toContainEqual({ handler: 'a', op: '2', payload: { id: 2 } });
|
||||
expect(results).toContainEqual({ handler: 'b', op: '1', payload: { id: 3 } });
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,303 +1,327 @@
|
|||
import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
|
||||
import { Queue, QueueEvents, Worker } from 'bullmq';
|
||||
import { QueueMetricsCollector } from '../src/queue-metrics';
|
||||
import { getRedisConnection } from '../src/utils';
|
||||
|
||||
// Suppress Redis connection errors in tests
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
if (reason && typeof reason === 'object' && 'message' in reason) {
|
||||
const message = (reason as Error).message;
|
||||
if (message.includes('Connection is closed') ||
|
||||
message.includes('Connection is in monitoring mode')) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
});
|
||||
|
||||
describe('QueueMetricsCollector', () => {
|
||||
let queue: Queue;
|
||||
let queueEvents: QueueEvents;
|
||||
let metricsCollector: QueueMetricsCollector;
|
||||
let worker: Worker;
|
||||
let connection: any;
|
||||
|
||||
const redisConfig = {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
password: '',
|
||||
db: 0,
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
connection = getRedisConnection(redisConfig);
|
||||
|
||||
// Create queue and events
|
||||
queue = new Queue('metrics-test-queue', { connection });
|
||||
queueEvents = new QueueEvents('metrics-test-queue', { connection });
|
||||
|
||||
// Create metrics collector
|
||||
metricsCollector = new QueueMetricsCollector(queue, queueEvents);
|
||||
|
||||
// Wait for connections
|
||||
await queue.waitUntilReady();
|
||||
await queueEvents.waitUntilReady();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
try {
|
||||
if (worker) {
|
||||
await worker.close();
|
||||
}
|
||||
await queueEvents.close();
|
||||
await queue.close();
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
});
|
||||
|
||||
describe('Job Count Metrics', () => {
|
||||
test('should collect basic job counts', async () => {
|
||||
// Add jobs in different states
|
||||
await queue.add('waiting-job', { test: true });
|
||||
await queue.add('delayed-job', { test: true }, { delay: 60000 });
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.waiting).toBe(1);
|
||||
expect(metrics.delayed).toBe(1);
|
||||
expect(metrics.active).toBe(0);
|
||||
expect(metrics.completed).toBe(0);
|
||||
expect(metrics.failed).toBe(0);
|
||||
});
|
||||
|
||||
test('should track completed and failed jobs', async () => {
|
||||
let jobCount = 0;
|
||||
|
||||
// Create worker that alternates between success and failure
|
||||
worker = new Worker('metrics-test-queue', async () => {
|
||||
jobCount++;
|
||||
if (jobCount % 2 === 0) {
|
||||
throw new Error('Test failure');
|
||||
}
|
||||
return { success: true };
|
||||
}, { connection });
|
||||
|
||||
// Add jobs
|
||||
await queue.add('job1', { test: 1 });
|
||||
await queue.add('job2', { test: 2 });
|
||||
await queue.add('job3', { test: 3 });
|
||||
await queue.add('job4', { test: 4 });
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.completed).toBe(2);
|
||||
expect(metrics.failed).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Processing Time Metrics', () => {
|
||||
test('should track processing times', async () => {
|
||||
const processingTimes = [50, 100, 150, 200, 250];
|
||||
let jobIndex = 0;
|
||||
|
||||
// Create worker with variable processing times
|
||||
worker = new Worker('metrics-test-queue', async () => {
|
||||
const delay = processingTimes[jobIndex++] || 100;
|
||||
await new Promise(resolve => setTimeout(resolve, delay));
|
||||
return { processed: true };
|
||||
}, { connection });
|
||||
|
||||
// Add jobs
|
||||
for (let i = 0; i < processingTimes.length; i++) {
|
||||
await queue.add(`job${i}`, { index: i });
|
||||
}
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 1500));
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.processingTime.avg).toBeGreaterThan(0);
|
||||
expect(metrics.processingTime.min).toBeGreaterThanOrEqual(50);
|
||||
expect(metrics.processingTime.max).toBeLessThanOrEqual(300);
|
||||
expect(metrics.processingTime.p95).toBeGreaterThan(metrics.processingTime.avg);
|
||||
});
|
||||
|
||||
test('should handle empty processing times', async () => {
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.processingTime).toEqual({
|
||||
avg: 0,
|
||||
min: 0,
|
||||
max: 0,
|
||||
p95: 0,
|
||||
p99: 0,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Throughput Metrics', () => {
|
||||
test('should calculate throughput correctly', async () => {
|
||||
// Create fast worker
|
||||
worker = new Worker('metrics-test-queue', async () => {
|
||||
return { success: true };
|
||||
}, { connection, concurrency: 5 });
|
||||
|
||||
// Add multiple jobs
|
||||
const jobPromises = [];
|
||||
for (let i = 0; i < 10; i++) {
|
||||
jobPromises.push(queue.add(`job${i}`, { index: i }));
|
||||
}
|
||||
await Promise.all(jobPromises);
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.throughput.completedPerMinute).toBeGreaterThan(0);
|
||||
expect(metrics.throughput.totalPerMinute).toBe(
|
||||
metrics.throughput.completedPerMinute + metrics.throughput.failedPerMinute
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Queue Health', () => {
|
||||
test('should report healthy queue', async () => {
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.isHealthy).toBe(true);
|
||||
expect(metrics.healthIssues).toEqual([]);
|
||||
});
|
||||
|
||||
test('should detect high failure rate', async () => {
|
||||
// Create worker that always fails
|
||||
worker = new Worker('metrics-test-queue', async () => {
|
||||
throw new Error('Always fails');
|
||||
}, { connection });
|
||||
|
||||
// Add jobs
|
||||
for (let i = 0; i < 10; i++) {
|
||||
await queue.add(`job${i}`, { index: i });
|
||||
}
|
||||
|
||||
// Wait for failures
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.isHealthy).toBe(false);
|
||||
expect(metrics.healthIssues).toContain(
|
||||
expect.stringMatching(/High failure rate/)
|
||||
);
|
||||
});
|
||||
|
||||
test('should detect large queue backlog', async () => {
|
||||
// Add many jobs without workers
|
||||
for (let i = 0; i < 1001; i++) {
|
||||
await queue.add(`job${i}`, { index: i });
|
||||
}
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.isHealthy).toBe(false);
|
||||
expect(metrics.healthIssues).toContain(
|
||||
expect.stringMatching(/Large queue backlog/)
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Oldest Waiting Job', () => {
|
||||
test('should track oldest waiting job', async () => {
|
||||
const beforeAdd = Date.now();
|
||||
|
||||
// Add jobs with delays
|
||||
await queue.add('old-job', { test: true });
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
await queue.add('new-job', { test: true });
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.oldestWaitingJob).toBeDefined();
|
||||
expect(metrics.oldestWaitingJob!.getTime()).toBeGreaterThanOrEqual(beforeAdd);
|
||||
});
|
||||
|
||||
test('should return null when no waiting jobs', async () => {
|
||||
// Create worker that processes immediately
|
||||
worker = new Worker('metrics-test-queue', async () => {
|
||||
return { success: true };
|
||||
}, { connection });
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
expect(metrics.oldestWaitingJob).toBe(null);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Metrics Report', () => {
|
||||
test('should generate formatted report', async () => {
|
||||
// Add some jobs
|
||||
await queue.add('job1', { test: true });
|
||||
await queue.add('job2', { test: true }, { delay: 5000 });
|
||||
|
||||
const report = await metricsCollector.getReport();
|
||||
|
||||
expect(report).toContain('Queue Metrics Report');
|
||||
expect(report).toContain('Status:');
|
||||
expect(report).toContain('Job Counts:');
|
||||
expect(report).toContain('Performance:');
|
||||
expect(report).toContain('Throughput:');
|
||||
expect(report).toContain('Waiting: 1');
|
||||
expect(report).toContain('Delayed: 1');
|
||||
});
|
||||
|
||||
test('should include health issues in report', async () => {
|
||||
// Add many jobs to trigger health issue
|
||||
for (let i = 0; i < 1001; i++) {
|
||||
await queue.add(`job${i}`, { index: i });
|
||||
}
|
||||
|
||||
const report = await metricsCollector.getReport();
|
||||
|
||||
expect(report).toContain('Issues Detected');
|
||||
expect(report).toContain('Health Issues:');
|
||||
expect(report).toContain('Large queue backlog');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Prometheus Metrics', () => {
|
||||
test('should export metrics in Prometheus format', async () => {
|
||||
// Add some jobs and process them
|
||||
worker = new Worker('metrics-test-queue', async () => {
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
return { success: true };
|
||||
}, { connection });
|
||||
|
||||
await queue.add('job1', { test: true });
|
||||
await queue.add('job2', { test: true });
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
|
||||
const prometheusMetrics = await metricsCollector.getPrometheusMetrics();
|
||||
|
||||
// Check format
|
||||
expect(prometheusMetrics).toContain('# HELP queue_jobs_total');
|
||||
expect(prometheusMetrics).toContain('# TYPE queue_jobs_total gauge');
|
||||
expect(prometheusMetrics).toContain('queue_jobs_total{queue="metrics-test-queue",status="completed"}');
|
||||
|
||||
expect(prometheusMetrics).toContain('# HELP queue_processing_time_seconds');
|
||||
expect(prometheusMetrics).toContain('# TYPE queue_processing_time_seconds summary');
|
||||
|
||||
expect(prometheusMetrics).toContain('# HELP queue_throughput_per_minute');
|
||||
expect(prometheusMetrics).toContain('# TYPE queue_throughput_per_minute gauge');
|
||||
|
||||
expect(prometheusMetrics).toContain('# HELP queue_health');
|
||||
expect(prometheusMetrics).toContain('# TYPE queue_health gauge');
|
||||
});
|
||||
});
|
||||
});
|
||||
import { Queue, QueueEvents, Worker } from 'bullmq';
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { QueueMetricsCollector } from '../src/queue-metrics';
|
||||
import { getRedisConnection } from '../src/utils';
|
||||
|
||||
// Suppress Redis connection errors in tests
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
if (reason && typeof reason === 'object' && 'message' in reason) {
|
||||
const message = (reason as Error).message;
|
||||
if (
|
||||
message.includes('Connection is closed') ||
|
||||
message.includes('Connection is in monitoring mode')
|
||||
) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
});
|
||||
|
||||
describe('QueueMetricsCollector', () => {
|
||||
let queue: Queue;
|
||||
let queueEvents: QueueEvents;
|
||||
let metricsCollector: QueueMetricsCollector;
|
||||
let worker: Worker;
|
||||
let connection: any;
|
||||
|
||||
const redisConfig = {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
password: '',
|
||||
db: 0,
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
connection = getRedisConnection(redisConfig);
|
||||
|
||||
// Create queue and events
|
||||
queue = new Queue('metrics-test-queue', { connection });
|
||||
queueEvents = new QueueEvents('metrics-test-queue', { connection });
|
||||
|
||||
// Create metrics collector
|
||||
metricsCollector = new QueueMetricsCollector(queue, queueEvents);
|
||||
|
||||
// Wait for connections
|
||||
await queue.waitUntilReady();
|
||||
await queueEvents.waitUntilReady();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
try {
|
||||
if (worker) {
|
||||
await worker.close();
|
||||
}
|
||||
await queueEvents.close();
|
||||
await queue.close();
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
});
|
||||
|
||||
describe('Job Count Metrics', () => {
|
||||
test('should collect basic job counts', async () => {
|
||||
// Add jobs in different states
|
||||
await queue.add('waiting-job', { test: true });
|
||||
await queue.add('delayed-job', { test: true }, { delay: 60000 });
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.waiting).toBe(1);
|
||||
expect(metrics.delayed).toBe(1);
|
||||
expect(metrics.active).toBe(0);
|
||||
expect(metrics.completed).toBe(0);
|
||||
expect(metrics.failed).toBe(0);
|
||||
});
|
||||
|
||||
test('should track completed and failed jobs', async () => {
|
||||
let jobCount = 0;
|
||||
|
||||
// Create worker that alternates between success and failure
|
||||
worker = new Worker(
|
||||
'metrics-test-queue',
|
||||
async () => {
|
||||
jobCount++;
|
||||
if (jobCount % 2 === 0) {
|
||||
throw new Error('Test failure');
|
||||
}
|
||||
return { success: true };
|
||||
},
|
||||
{ connection }
|
||||
);
|
||||
|
||||
// Add jobs
|
||||
await queue.add('job1', { test: 1 });
|
||||
await queue.add('job2', { test: 2 });
|
||||
await queue.add('job3', { test: 3 });
|
||||
await queue.add('job4', { test: 4 });
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.completed).toBe(2);
|
||||
expect(metrics.failed).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Processing Time Metrics', () => {
|
||||
test('should track processing times', async () => {
|
||||
const processingTimes = [50, 100, 150, 200, 250];
|
||||
let jobIndex = 0;
|
||||
|
||||
// Create worker with variable processing times
|
||||
worker = new Worker(
|
||||
'metrics-test-queue',
|
||||
async () => {
|
||||
const delay = processingTimes[jobIndex++] || 100;
|
||||
await new Promise(resolve => setTimeout(resolve, delay));
|
||||
return { processed: true };
|
||||
},
|
||||
{ connection }
|
||||
);
|
||||
|
||||
// Add jobs
|
||||
for (let i = 0; i < processingTimes.length; i++) {
|
||||
await queue.add(`job${i}`, { index: i });
|
||||
}
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 1500));
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.processingTime.avg).toBeGreaterThan(0);
|
||||
expect(metrics.processingTime.min).toBeGreaterThanOrEqual(50);
|
||||
expect(metrics.processingTime.max).toBeLessThanOrEqual(300);
|
||||
expect(metrics.processingTime.p95).toBeGreaterThan(metrics.processingTime.avg);
|
||||
});
|
||||
|
||||
test('should handle empty processing times', async () => {
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.processingTime).toEqual({
|
||||
avg: 0,
|
||||
min: 0,
|
||||
max: 0,
|
||||
p95: 0,
|
||||
p99: 0,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Throughput Metrics', () => {
|
||||
test('should calculate throughput correctly', async () => {
|
||||
// Create fast worker
|
||||
worker = new Worker(
|
||||
'metrics-test-queue',
|
||||
async () => {
|
||||
return { success: true };
|
||||
},
|
||||
{ connection, concurrency: 5 }
|
||||
);
|
||||
|
||||
// Add multiple jobs
|
||||
const jobPromises = [];
|
||||
for (let i = 0; i < 10; i++) {
|
||||
jobPromises.push(queue.add(`job${i}`, { index: i }));
|
||||
}
|
||||
await Promise.all(jobPromises);
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.throughput.completedPerMinute).toBeGreaterThan(0);
|
||||
expect(metrics.throughput.totalPerMinute).toBe(
|
||||
metrics.throughput.completedPerMinute + metrics.throughput.failedPerMinute
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Queue Health', () => {
|
||||
test('should report healthy queue', async () => {
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.isHealthy).toBe(true);
|
||||
expect(metrics.healthIssues).toEqual([]);
|
||||
});
|
||||
|
||||
test('should detect high failure rate', async () => {
|
||||
// Create worker that always fails
|
||||
worker = new Worker(
|
||||
'metrics-test-queue',
|
||||
async () => {
|
||||
throw new Error('Always fails');
|
||||
},
|
||||
{ connection }
|
||||
);
|
||||
|
||||
// Add jobs
|
||||
for (let i = 0; i < 10; i++) {
|
||||
await queue.add(`job${i}`, { index: i });
|
||||
}
|
||||
|
||||
// Wait for failures
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.isHealthy).toBe(false);
|
||||
expect(metrics.healthIssues).toContain(expect.stringMatching(/High failure rate/));
|
||||
});
|
||||
|
||||
test('should detect large queue backlog', async () => {
|
||||
// Add many jobs without workers
|
||||
for (let i = 0; i < 1001; i++) {
|
||||
await queue.add(`job${i}`, { index: i });
|
||||
}
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.isHealthy).toBe(false);
|
||||
expect(metrics.healthIssues).toContain(expect.stringMatching(/Large queue backlog/));
|
||||
});
|
||||
});
|
||||
|
||||
describe('Oldest Waiting Job', () => {
|
||||
test('should track oldest waiting job', async () => {
|
||||
const beforeAdd = Date.now();
|
||||
|
||||
// Add jobs with delays
|
||||
await queue.add('old-job', { test: true });
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
await queue.add('new-job', { test: true });
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
|
||||
expect(metrics.oldestWaitingJob).toBeDefined();
|
||||
expect(metrics.oldestWaitingJob!.getTime()).toBeGreaterThanOrEqual(beforeAdd);
|
||||
});
|
||||
|
||||
test('should return null when no waiting jobs', async () => {
|
||||
// Create worker that processes immediately
|
||||
worker = new Worker(
|
||||
'metrics-test-queue',
|
||||
async () => {
|
||||
return { success: true };
|
||||
},
|
||||
{ connection }
|
||||
);
|
||||
|
||||
const metrics = await metricsCollector.collect();
|
||||
expect(metrics.oldestWaitingJob).toBe(null);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Metrics Report', () => {
|
||||
test('should generate formatted report', async () => {
|
||||
// Add some jobs
|
||||
await queue.add('job1', { test: true });
|
||||
await queue.add('job2', { test: true }, { delay: 5000 });
|
||||
|
||||
const report = await metricsCollector.getReport();
|
||||
|
||||
expect(report).toContain('Queue Metrics Report');
|
||||
expect(report).toContain('Status:');
|
||||
expect(report).toContain('Job Counts:');
|
||||
expect(report).toContain('Performance:');
|
||||
expect(report).toContain('Throughput:');
|
||||
expect(report).toContain('Waiting: 1');
|
||||
expect(report).toContain('Delayed: 1');
|
||||
});
|
||||
|
||||
test('should include health issues in report', async () => {
|
||||
// Add many jobs to trigger health issue
|
||||
for (let i = 0; i < 1001; i++) {
|
||||
await queue.add(`job${i}`, { index: i });
|
||||
}
|
||||
|
||||
const report = await metricsCollector.getReport();
|
||||
|
||||
expect(report).toContain('Issues Detected');
|
||||
expect(report).toContain('Health Issues:');
|
||||
expect(report).toContain('Large queue backlog');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Prometheus Metrics', () => {
|
||||
test('should export metrics in Prometheus format', async () => {
|
||||
// Add some jobs and process them
|
||||
worker = new Worker(
|
||||
'metrics-test-queue',
|
||||
async () => {
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
return { success: true };
|
||||
},
|
||||
{ connection }
|
||||
);
|
||||
|
||||
await queue.add('job1', { test: true });
|
||||
await queue.add('job2', { test: true });
|
||||
|
||||
// Wait for processing
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
|
||||
const prometheusMetrics = await metricsCollector.getPrometheusMetrics();
|
||||
|
||||
// Check format
|
||||
expect(prometheusMetrics).toContain('# HELP queue_jobs_total');
|
||||
expect(prometheusMetrics).toContain('# TYPE queue_jobs_total gauge');
|
||||
expect(prometheusMetrics).toContain(
|
||||
'queue_jobs_total{queue="metrics-test-queue",status="completed"}'
|
||||
);
|
||||
|
||||
expect(prometheusMetrics).toContain('# HELP queue_processing_time_seconds');
|
||||
expect(prometheusMetrics).toContain('# TYPE queue_processing_time_seconds summary');
|
||||
|
||||
expect(prometheusMetrics).toContain('# HELP queue_throughput_per_minute');
|
||||
expect(prometheusMetrics).toContain('# TYPE queue_throughput_per_minute gauge');
|
||||
|
||||
expect(prometheusMetrics).toContain('# HELP queue_health');
|
||||
expect(prometheusMetrics).toContain('# TYPE queue_health gauge');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,81 +1,81 @@
|
|||
import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
|
||||
import { QueueManager, handlerRegistry } from '../src';
|
||||
|
||||
describe('QueueManager Simple Tests', () => {
|
||||
let queueManager: QueueManager;
|
||||
|
||||
// Assumes Redis is running locally on default port
|
||||
const redisConfig = {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
handlerRegistry.clear();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (queueManager) {
|
||||
try {
|
||||
await queueManager.shutdown();
|
||||
} catch {
|
||||
// Ignore errors during cleanup
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
test('should create queue manager instance', () => {
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
});
|
||||
|
||||
expect(queueManager.queueName).toBe('test-queue');
|
||||
});
|
||||
|
||||
test('should handle missing Redis gracefully', async () => {
|
||||
// Use a port that's likely not running Redis
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: {
|
||||
host: 'localhost',
|
||||
port: 9999,
|
||||
},
|
||||
});
|
||||
|
||||
await expect(queueManager.initialize()).rejects.toThrow();
|
||||
});
|
||||
|
||||
test('handler registry should work', () => {
|
||||
const testHandler = async (payload: any) => {
|
||||
return { success: true, payload };
|
||||
};
|
||||
|
||||
handlerRegistry.register('test-handler', {
|
||||
'test-op': testHandler,
|
||||
});
|
||||
|
||||
const handler = handlerRegistry.getHandler('test-handler', 'test-op');
|
||||
expect(handler).toBe(testHandler);
|
||||
});
|
||||
|
||||
test('handler registry should return null for missing handler', () => {
|
||||
const handler = handlerRegistry.getHandler('missing', 'op');
|
||||
expect(handler).toBe(null);
|
||||
});
|
||||
|
||||
test('should get handler statistics', () => {
|
||||
handlerRegistry.register('handler1', {
|
||||
'op1': async () => ({}),
|
||||
'op2': async () => ({}),
|
||||
});
|
||||
|
||||
handlerRegistry.register('handler2', {
|
||||
'op1': async () => ({}),
|
||||
});
|
||||
|
||||
const stats = handlerRegistry.getStats();
|
||||
expect(stats.handlers).toBe(2);
|
||||
expect(stats.totalOperations).toBe(3);
|
||||
});
|
||||
});
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import { handlerRegistry, QueueManager } from '../src';
|
||||
|
||||
describe('QueueManager Simple Tests', () => {
|
||||
let queueManager: QueueManager;
|
||||
|
||||
// Assumes Redis is running locally on default port
|
||||
const redisConfig = {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
handlerRegistry.clear();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (queueManager) {
|
||||
try {
|
||||
await queueManager.shutdown();
|
||||
} catch {
|
||||
// Ignore errors during cleanup
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
test('should create queue manager instance', () => {
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: redisConfig,
|
||||
});
|
||||
|
||||
expect(queueManager.queueName).toBe('test-queue');
|
||||
});
|
||||
|
||||
test('should handle missing Redis gracefully', async () => {
|
||||
// Use a port that's likely not running Redis
|
||||
queueManager = new QueueManager({
|
||||
queueName: 'test-queue',
|
||||
redis: {
|
||||
host: 'localhost',
|
||||
port: 9999,
|
||||
},
|
||||
});
|
||||
|
||||
await expect(queueManager.initialize()).rejects.toThrow();
|
||||
});
|
||||
|
||||
test('handler registry should work', () => {
|
||||
const testHandler = async (payload: any) => {
|
||||
return { success: true, payload };
|
||||
};
|
||||
|
||||
handlerRegistry.register('test-handler', {
|
||||
'test-op': testHandler,
|
||||
});
|
||||
|
||||
const handler = handlerRegistry.getHandler('test-handler', 'test-op');
|
||||
expect(handler).toBe(testHandler);
|
||||
});
|
||||
|
||||
test('handler registry should return null for missing handler', () => {
|
||||
const handler = handlerRegistry.getHandler('missing', 'op');
|
||||
expect(handler).toBe(null);
|
||||
});
|
||||
|
||||
test('should get handler statistics', () => {
|
||||
handlerRegistry.register('handler1', {
|
||||
op1: async () => ({}),
|
||||
op2: async () => ({}),
|
||||
});
|
||||
|
||||
handlerRegistry.register('handler2', {
|
||||
op1: async () => ({}),
|
||||
});
|
||||
|
||||
const stats = handlerRegistry.getStats();
|
||||
expect(stats.handlers).toBe(2);
|
||||
expect(stats.totalOperations).toBe(3);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,309 +1,311 @@
|
|||
import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
|
||||
import { QueueRateLimiter } from '../src/rate-limiter';
|
||||
import { getRedisConnection } from '../src/utils';
|
||||
import Redis from 'ioredis';
|
||||
|
||||
// Suppress Redis connection errors in tests
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
if (reason && typeof reason === 'object' && 'message' in reason) {
|
||||
const message = (reason as Error).message;
|
||||
if (message.includes('Connection is closed') ||
|
||||
message.includes('Connection is in monitoring mode')) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
});
|
||||
|
||||
describe('QueueRateLimiter', () => {
|
||||
let redisClient: Redis;
|
||||
let rateLimiter: QueueRateLimiter;
|
||||
|
||||
const redisConfig = {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
password: '',
|
||||
db: 0,
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
// Create Redis client
|
||||
redisClient = new Redis(getRedisConnection(redisConfig));
|
||||
|
||||
// Clear Redis keys for tests
|
||||
try {
|
||||
const keys = await redisClient.keys('rl:*');
|
||||
if (keys.length > 0) {
|
||||
await redisClient.del(...keys);
|
||||
}
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
rateLimiter = new QueueRateLimiter(redisClient);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (redisClient) {
|
||||
try {
|
||||
await redisClient.quit();
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
}
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
});
|
||||
|
||||
describe('Rate Limit Rules', () => {
|
||||
test('should add and enforce global rate limit', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'global',
|
||||
config: {
|
||||
points: 5,
|
||||
duration: 1, // 1 second
|
||||
},
|
||||
});
|
||||
|
||||
// Consume 5 points
|
||||
for (let i = 0; i < 5; i++) {
|
||||
const result = await rateLimiter.checkLimit('any-handler', 'any-operation');
|
||||
expect(result.allowed).toBe(true);
|
||||
}
|
||||
|
||||
// 6th request should be blocked
|
||||
const blocked = await rateLimiter.checkLimit('any-handler', 'any-operation');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
expect(blocked.retryAfter).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
test('should add and enforce handler-level rate limit', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'api-handler',
|
||||
config: {
|
||||
points: 3,
|
||||
duration: 1,
|
||||
},
|
||||
});
|
||||
|
||||
// api-handler should be limited
|
||||
for (let i = 0; i < 3; i++) {
|
||||
const result = await rateLimiter.checkLimit('api-handler', 'any-operation');
|
||||
expect(result.allowed).toBe(true);
|
||||
}
|
||||
|
||||
const blocked = await rateLimiter.checkLimit('api-handler', 'any-operation');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
|
||||
// Other handlers should not be limited
|
||||
const otherHandler = await rateLimiter.checkLimit('other-handler', 'any-operation');
|
||||
expect(otherHandler.allowed).toBe(true);
|
||||
});
|
||||
|
||||
test('should add and enforce operation-level rate limit', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'operation',
|
||||
handler: 'data-handler',
|
||||
operation: 'fetch-prices',
|
||||
config: {
|
||||
points: 2,
|
||||
duration: 1,
|
||||
},
|
||||
});
|
||||
|
||||
// Specific operation should be limited
|
||||
for (let i = 0; i < 2; i++) {
|
||||
const result = await rateLimiter.checkLimit('data-handler', 'fetch-prices');
|
||||
expect(result.allowed).toBe(true);
|
||||
}
|
||||
|
||||
const blocked = await rateLimiter.checkLimit('data-handler', 'fetch-prices');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
|
||||
// Other operations on same handler should work
|
||||
const otherOp = await rateLimiter.checkLimit('data-handler', 'fetch-volume');
|
||||
expect(otherOp.allowed).toBe(true);
|
||||
});
|
||||
|
||||
test('should enforce multiple rate limits (most restrictive wins)', async () => {
|
||||
// Global: 10/sec
|
||||
rateLimiter.addRule({
|
||||
level: 'global',
|
||||
config: { points: 10, duration: 1 },
|
||||
});
|
||||
|
||||
// Handler: 5/sec
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'test-handler',
|
||||
config: { points: 5, duration: 1 },
|
||||
});
|
||||
|
||||
// Operation: 2/sec
|
||||
rateLimiter.addRule({
|
||||
level: 'operation',
|
||||
handler: 'test-handler',
|
||||
operation: 'test-op',
|
||||
config: { points: 2, duration: 1 },
|
||||
});
|
||||
|
||||
// Should be limited by operation level (most restrictive)
|
||||
for (let i = 0; i < 2; i++) {
|
||||
const result = await rateLimiter.checkLimit('test-handler', 'test-op');
|
||||
expect(result.allowed).toBe(true);
|
||||
}
|
||||
|
||||
const blocked = await rateLimiter.checkLimit('test-handler', 'test-op');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Rate Limit Status', () => {
|
||||
test('should get rate limit status', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'status-test',
|
||||
config: { points: 10, duration: 60 },
|
||||
});
|
||||
|
||||
// Consume some points
|
||||
await rateLimiter.checkLimit('status-test', 'operation');
|
||||
await rateLimiter.checkLimit('status-test', 'operation');
|
||||
|
||||
const status = await rateLimiter.getStatus('status-test', 'operation');
|
||||
expect(status.handler).toBe('status-test');
|
||||
expect(status.operation).toBe('operation');
|
||||
expect(status.limits.length).toBe(1);
|
||||
expect(status.limits[0].points).toBe(10);
|
||||
expect(status.limits[0].remaining).toBe(8);
|
||||
});
|
||||
|
||||
test('should show multiple applicable limits in status', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'global',
|
||||
config: { points: 100, duration: 60 },
|
||||
});
|
||||
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'multi-test',
|
||||
config: { points: 50, duration: 60 },
|
||||
});
|
||||
|
||||
const status = await rateLimiter.getStatus('multi-test', 'operation');
|
||||
expect(status.limits.length).toBe(2);
|
||||
|
||||
const globalLimit = status.limits.find(l => l.level === 'global');
|
||||
const handlerLimit = status.limits.find(l => l.level === 'handler');
|
||||
|
||||
expect(globalLimit?.points).toBe(100);
|
||||
expect(handlerLimit?.points).toBe(50);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Rate Limit Management', () => {
|
||||
test('should reset rate limits', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'reset-test',
|
||||
config: { points: 1, duration: 60 },
|
||||
});
|
||||
|
||||
// Consume the limit
|
||||
await rateLimiter.checkLimit('reset-test', 'operation');
|
||||
const blocked = await rateLimiter.checkLimit('reset-test', 'operation');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
|
||||
// Reset limits
|
||||
await rateLimiter.reset('reset-test');
|
||||
|
||||
// Should be allowed again
|
||||
const afterReset = await rateLimiter.checkLimit('reset-test', 'operation');
|
||||
expect(afterReset.allowed).toBe(true);
|
||||
});
|
||||
|
||||
test('should get all rules', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'global',
|
||||
config: { points: 100, duration: 60 },
|
||||
});
|
||||
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'test',
|
||||
config: { points: 50, duration: 60 },
|
||||
});
|
||||
|
||||
const rules = rateLimiter.getRules();
|
||||
expect(rules.length).toBe(2);
|
||||
expect(rules[0].level).toBe('global');
|
||||
expect(rules[1].level).toBe('handler');
|
||||
});
|
||||
|
||||
test('should remove specific rule', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'remove-test',
|
||||
config: { points: 1, duration: 1 },
|
||||
});
|
||||
|
||||
// Verify rule exists
|
||||
await rateLimiter.checkLimit('remove-test', 'op');
|
||||
const blocked = await rateLimiter.checkLimit('remove-test', 'op');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
|
||||
// Remove rule
|
||||
const removed = rateLimiter.removeRule('handler', 'remove-test');
|
||||
expect(removed).toBe(true);
|
||||
|
||||
// Should not be limited anymore
|
||||
const afterRemove = await rateLimiter.checkLimit('remove-test', 'op');
|
||||
expect(afterRemove.allowed).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Block Duration', () => {
|
||||
test('should block for specified duration after limit exceeded', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'block-test',
|
||||
config: {
|
||||
points: 1,
|
||||
duration: 1,
|
||||
blockDuration: 2, // Block for 2 seconds
|
||||
},
|
||||
});
|
||||
|
||||
// Consume limit
|
||||
await rateLimiter.checkLimit('block-test', 'op');
|
||||
|
||||
// Should be blocked
|
||||
const blocked = await rateLimiter.checkLimit('block-test', 'op');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
expect(blocked.retryAfter).toBeGreaterThanOrEqual(1000); // At least 1 second
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handling', () => {
|
||||
test('should allow requests when rate limiter fails', async () => {
|
||||
// Create a rate limiter with invalid redis client
|
||||
const badRedis = new Redis({
|
||||
host: 'invalid-host',
|
||||
port: 9999,
|
||||
retryStrategy: () => null, // Disable retries
|
||||
});
|
||||
|
||||
const failingLimiter = new QueueRateLimiter(badRedis);
|
||||
|
||||
failingLimiter.addRule({
|
||||
level: 'global',
|
||||
config: { points: 1, duration: 1 },
|
||||
});
|
||||
|
||||
// Should allow even though Redis is not available
|
||||
const result = await failingLimiter.checkLimit('test', 'test');
|
||||
expect(result.allowed).toBe(true);
|
||||
|
||||
badRedis.disconnect();
|
||||
});
|
||||
});
|
||||
});
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test';
|
||||
import Redis from 'ioredis';
|
||||
import { QueueRateLimiter } from '../src/rate-limiter';
|
||||
import { getRedisConnection } from '../src/utils';
|
||||
|
||||
// Suppress Redis connection errors in tests
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
if (reason && typeof reason === 'object' && 'message' in reason) {
|
||||
const message = (reason as Error).message;
|
||||
if (
|
||||
message.includes('Connection is closed') ||
|
||||
message.includes('Connection is in monitoring mode')
|
||||
) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
});
|
||||
|
||||
describe('QueueRateLimiter', () => {
|
||||
let redisClient: Redis;
|
||||
let rateLimiter: QueueRateLimiter;
|
||||
|
||||
const redisConfig = {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
password: '',
|
||||
db: 0,
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
// Create Redis client
|
||||
redisClient = new Redis(getRedisConnection(redisConfig));
|
||||
|
||||
// Clear Redis keys for tests
|
||||
try {
|
||||
const keys = await redisClient.keys('rl:*');
|
||||
if (keys.length > 0) {
|
||||
await redisClient.del(...keys);
|
||||
}
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
rateLimiter = new QueueRateLimiter(redisClient);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (redisClient) {
|
||||
try {
|
||||
await redisClient.quit();
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
}
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
});
|
||||
|
||||
describe('Rate Limit Rules', () => {
|
||||
test('should add and enforce global rate limit', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'global',
|
||||
config: {
|
||||
points: 5,
|
||||
duration: 1, // 1 second
|
||||
},
|
||||
});
|
||||
|
||||
// Consume 5 points
|
||||
for (let i = 0; i < 5; i++) {
|
||||
const result = await rateLimiter.checkLimit('any-handler', 'any-operation');
|
||||
expect(result.allowed).toBe(true);
|
||||
}
|
||||
|
||||
// 6th request should be blocked
|
||||
const blocked = await rateLimiter.checkLimit('any-handler', 'any-operation');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
expect(blocked.retryAfter).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
test('should add and enforce handler-level rate limit', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'api-handler',
|
||||
config: {
|
||||
points: 3,
|
||||
duration: 1,
|
||||
},
|
||||
});
|
||||
|
||||
// api-handler should be limited
|
||||
for (let i = 0; i < 3; i++) {
|
||||
const result = await rateLimiter.checkLimit('api-handler', 'any-operation');
|
||||
expect(result.allowed).toBe(true);
|
||||
}
|
||||
|
||||
const blocked = await rateLimiter.checkLimit('api-handler', 'any-operation');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
|
||||
// Other handlers should not be limited
|
||||
const otherHandler = await rateLimiter.checkLimit('other-handler', 'any-operation');
|
||||
expect(otherHandler.allowed).toBe(true);
|
||||
});
|
||||
|
||||
test('should add and enforce operation-level rate limit', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'operation',
|
||||
handler: 'data-handler',
|
||||
operation: 'fetch-prices',
|
||||
config: {
|
||||
points: 2,
|
||||
duration: 1,
|
||||
},
|
||||
});
|
||||
|
||||
// Specific operation should be limited
|
||||
for (let i = 0; i < 2; i++) {
|
||||
const result = await rateLimiter.checkLimit('data-handler', 'fetch-prices');
|
||||
expect(result.allowed).toBe(true);
|
||||
}
|
||||
|
||||
const blocked = await rateLimiter.checkLimit('data-handler', 'fetch-prices');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
|
||||
// Other operations on same handler should work
|
||||
const otherOp = await rateLimiter.checkLimit('data-handler', 'fetch-volume');
|
||||
expect(otherOp.allowed).toBe(true);
|
||||
});
|
||||
|
||||
test('should enforce multiple rate limits (most restrictive wins)', async () => {
|
||||
// Global: 10/sec
|
||||
rateLimiter.addRule({
|
||||
level: 'global',
|
||||
config: { points: 10, duration: 1 },
|
||||
});
|
||||
|
||||
// Handler: 5/sec
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'test-handler',
|
||||
config: { points: 5, duration: 1 },
|
||||
});
|
||||
|
||||
// Operation: 2/sec
|
||||
rateLimiter.addRule({
|
||||
level: 'operation',
|
||||
handler: 'test-handler',
|
||||
operation: 'test-op',
|
||||
config: { points: 2, duration: 1 },
|
||||
});
|
||||
|
||||
// Should be limited by operation level (most restrictive)
|
||||
for (let i = 0; i < 2; i++) {
|
||||
const result = await rateLimiter.checkLimit('test-handler', 'test-op');
|
||||
expect(result.allowed).toBe(true);
|
||||
}
|
||||
|
||||
const blocked = await rateLimiter.checkLimit('test-handler', 'test-op');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Rate Limit Status', () => {
|
||||
test('should get rate limit status', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'status-test',
|
||||
config: { points: 10, duration: 60 },
|
||||
});
|
||||
|
||||
// Consume some points
|
||||
await rateLimiter.checkLimit('status-test', 'operation');
|
||||
await rateLimiter.checkLimit('status-test', 'operation');
|
||||
|
||||
const status = await rateLimiter.getStatus('status-test', 'operation');
|
||||
expect(status.handler).toBe('status-test');
|
||||
expect(status.operation).toBe('operation');
|
||||
expect(status.limits.length).toBe(1);
|
||||
expect(status.limits[0].points).toBe(10);
|
||||
expect(status.limits[0].remaining).toBe(8);
|
||||
});
|
||||
|
||||
test('should show multiple applicable limits in status', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'global',
|
||||
config: { points: 100, duration: 60 },
|
||||
});
|
||||
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'multi-test',
|
||||
config: { points: 50, duration: 60 },
|
||||
});
|
||||
|
||||
const status = await rateLimiter.getStatus('multi-test', 'operation');
|
||||
expect(status.limits.length).toBe(2);
|
||||
|
||||
const globalLimit = status.limits.find(l => l.level === 'global');
|
||||
const handlerLimit = status.limits.find(l => l.level === 'handler');
|
||||
|
||||
expect(globalLimit?.points).toBe(100);
|
||||
expect(handlerLimit?.points).toBe(50);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Rate Limit Management', () => {
|
||||
test('should reset rate limits', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'reset-test',
|
||||
config: { points: 1, duration: 60 },
|
||||
});
|
||||
|
||||
// Consume the limit
|
||||
await rateLimiter.checkLimit('reset-test', 'operation');
|
||||
const blocked = await rateLimiter.checkLimit('reset-test', 'operation');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
|
||||
// Reset limits
|
||||
await rateLimiter.reset('reset-test');
|
||||
|
||||
// Should be allowed again
|
||||
const afterReset = await rateLimiter.checkLimit('reset-test', 'operation');
|
||||
expect(afterReset.allowed).toBe(true);
|
||||
});
|
||||
|
||||
test('should get all rules', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'global',
|
||||
config: { points: 100, duration: 60 },
|
||||
});
|
||||
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'test',
|
||||
config: { points: 50, duration: 60 },
|
||||
});
|
||||
|
||||
const rules = rateLimiter.getRules();
|
||||
expect(rules.length).toBe(2);
|
||||
expect(rules[0].level).toBe('global');
|
||||
expect(rules[1].level).toBe('handler');
|
||||
});
|
||||
|
||||
test('should remove specific rule', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'remove-test',
|
||||
config: { points: 1, duration: 1 },
|
||||
});
|
||||
|
||||
// Verify rule exists
|
||||
await rateLimiter.checkLimit('remove-test', 'op');
|
||||
const blocked = await rateLimiter.checkLimit('remove-test', 'op');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
|
||||
// Remove rule
|
||||
const removed = rateLimiter.removeRule('handler', 'remove-test');
|
||||
expect(removed).toBe(true);
|
||||
|
||||
// Should not be limited anymore
|
||||
const afterRemove = await rateLimiter.checkLimit('remove-test', 'op');
|
||||
expect(afterRemove.allowed).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Block Duration', () => {
|
||||
test('should block for specified duration after limit exceeded', async () => {
|
||||
rateLimiter.addRule({
|
||||
level: 'handler',
|
||||
handler: 'block-test',
|
||||
config: {
|
||||
points: 1,
|
||||
duration: 1,
|
||||
blockDuration: 2, // Block for 2 seconds
|
||||
},
|
||||
});
|
||||
|
||||
// Consume limit
|
||||
await rateLimiter.checkLimit('block-test', 'op');
|
||||
|
||||
// Should be blocked
|
||||
const blocked = await rateLimiter.checkLimit('block-test', 'op');
|
||||
expect(blocked.allowed).toBe(false);
|
||||
expect(blocked.retryAfter).toBeGreaterThanOrEqual(1000); // At least 1 second
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handling', () => {
|
||||
test('should allow requests when rate limiter fails', async () => {
|
||||
// Create a rate limiter with invalid redis client
|
||||
const badRedis = new Redis({
|
||||
host: 'invalid-host',
|
||||
port: 9999,
|
||||
retryStrategy: () => null, // Disable retries
|
||||
});
|
||||
|
||||
const failingLimiter = new QueueRateLimiter(badRedis);
|
||||
|
||||
failingLimiter.addRule({
|
||||
level: 'global',
|
||||
config: { points: 1, duration: 1 },
|
||||
});
|
||||
|
||||
// Should allow even though Redis is not available
|
||||
const result = await failingLimiter.checkLimit('test', 'test');
|
||||
expect(result.allowed).toBe(true);
|
||||
|
||||
badRedis.disconnect();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -9,7 +9,12 @@ import type { ShutdownResult } from './types';
|
|||
|
||||
// Core shutdown classes and types
|
||||
export { Shutdown } from './shutdown';
|
||||
export type { ShutdownCallback, ShutdownOptions, ShutdownResult, PrioritizedShutdownCallback } from './types';
|
||||
export type {
|
||||
ShutdownCallback,
|
||||
ShutdownOptions,
|
||||
ShutdownResult,
|
||||
PrioritizedShutdownCallback,
|
||||
} from './types';
|
||||
|
||||
// Global singleton instance
|
||||
let globalInstance: Shutdown | null = null;
|
||||
|
|
@ -31,7 +36,11 @@ function getGlobalInstance(): Shutdown {
|
|||
/**
|
||||
* Register a cleanup callback that will be executed during shutdown
|
||||
*/
|
||||
export function onShutdown(callback: () => Promise<void> | void, priority?: number, name?: string): void {
|
||||
export function onShutdown(
|
||||
callback: () => Promise<void> | void,
|
||||
priority?: number,
|
||||
name?: string
|
||||
): void {
|
||||
getGlobalInstance().onShutdown(callback, priority, name);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,12 @@
|
|||
* - Platform-specific signal support (Windows/Unix)
|
||||
*/
|
||||
|
||||
import type { PrioritizedShutdownCallback, ShutdownCallback, ShutdownOptions, ShutdownResult } from './types';
|
||||
import type {
|
||||
PrioritizedShutdownCallback,
|
||||
ShutdownCallback,
|
||||
ShutdownOptions,
|
||||
ShutdownResult,
|
||||
} from './types';
|
||||
|
||||
// Global flag that works across all processes/workers
|
||||
declare global {
|
||||
|
|
|
|||
|
|
@ -6,6 +6,5 @@
|
|||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
]
|
||||
"references": []
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,4 +20,4 @@
|
|||
"typescript": "^5.3.0",
|
||||
"bun-types": "^1.2.15"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,25 +37,25 @@ export type {
|
|||
HasClose,
|
||||
HasOHLC,
|
||||
HasVolume,
|
||||
HasTimestamp
|
||||
HasTimestamp,
|
||||
} from '@stock-bot/types';
|
||||
|
||||
// Export working calculation functions
|
||||
export * from './basic-calculations';
|
||||
|
||||
// Export working technical indicators (building one by one)
|
||||
export {
|
||||
sma,
|
||||
ema,
|
||||
rsi,
|
||||
macd,
|
||||
bollingerBands,
|
||||
atr,
|
||||
obv,
|
||||
stochastic,
|
||||
williamsR,
|
||||
cci,
|
||||
mfi,
|
||||
export {
|
||||
sma,
|
||||
ema,
|
||||
rsi,
|
||||
macd,
|
||||
bollingerBands,
|
||||
atr,
|
||||
obv,
|
||||
stochastic,
|
||||
williamsR,
|
||||
cci,
|
||||
mfi,
|
||||
vwma,
|
||||
momentum,
|
||||
roc,
|
||||
|
|
@ -80,7 +80,7 @@ export {
|
|||
balanceOfPower,
|
||||
trix,
|
||||
massIndex,
|
||||
coppockCurve
|
||||
coppockCurve,
|
||||
} from './technical-indicators';
|
||||
export * from './risk-metrics';
|
||||
export * from './performance-metrics';
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
import { ulcerIndex } from './risk-metrics';
|
||||
|
||||
/**
|
||||
* Performance Metrics and Analysis
|
||||
* Comprehensive performance measurement tools for trading strategies and portfolios
|
||||
|
|
@ -18,7 +20,6 @@ export interface PortfolioMetrics {
|
|||
alpha: number;
|
||||
volatility: number;
|
||||
}
|
||||
import { ulcerIndex } from './risk-metrics';
|
||||
|
||||
export interface TradePerformance {
|
||||
totalTrades: number;
|
||||
|
|
@ -156,8 +157,10 @@ export function analyzeDrawdowns(
|
|||
}
|
||||
|
||||
const first = equityCurve[0];
|
||||
if (!first) {return { maxDrawdown: 0, maxDrawdownDuration: 0, averageDrawdown: 0, drawdownPeriods: [] };}
|
||||
|
||||
if (!first) {
|
||||
return { maxDrawdown: 0, maxDrawdownDuration: 0, averageDrawdown: 0, drawdownPeriods: [] };
|
||||
}
|
||||
|
||||
let peak = first.value;
|
||||
let peakDate = first.date;
|
||||
let maxDrawdown = 0;
|
||||
|
|
@ -175,18 +178,21 @@ export function analyzeDrawdowns(
|
|||
|
||||
for (let i = 1; i < equityCurve.length; i++) {
|
||||
const current = equityCurve[i];
|
||||
if (!current) {continue;}
|
||||
if (!current) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (current.value > peak) {
|
||||
// New peak - end any current drawdown
|
||||
if (currentDrawdownStart) {
|
||||
const prev = equityCurve[i - 1];
|
||||
if (!prev) {continue;}
|
||||
|
||||
if (!prev) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const drawdownMagnitude = (peak - prev.value) / peak;
|
||||
const duration = Math.floor(
|
||||
(prev.date.getTime() - currentDrawdownStart.getTime()) /
|
||||
(1000 * 60 * 60 * 24)
|
||||
(prev.date.getTime() - currentDrawdownStart.getTime()) / (1000 * 60 * 60 * 24)
|
||||
);
|
||||
|
||||
drawdownPeriods.push({
|
||||
|
|
@ -217,8 +223,10 @@ export function analyzeDrawdowns(
|
|||
// Handle ongoing drawdown
|
||||
if (currentDrawdownStart) {
|
||||
const lastPoint = equityCurve[equityCurve.length - 1];
|
||||
if (!lastPoint) {return { maxDrawdown, maxDrawdownDuration, averageDrawdown: 0, drawdownPeriods };}
|
||||
|
||||
if (!lastPoint) {
|
||||
return { maxDrawdown, maxDrawdownDuration, averageDrawdown: 0, drawdownPeriods };
|
||||
}
|
||||
|
||||
const drawdownMagnitude = (peak - lastPoint.value) / peak;
|
||||
const duration = Math.floor(
|
||||
(lastPoint.date.getTime() - currentDrawdownStart.getTime()) / (1000 * 60 * 60 * 24)
|
||||
|
|
@ -378,8 +386,10 @@ export function strategyPerformanceAttribution(
|
|||
for (let i = 0; i < sectorWeights.length; i++) {
|
||||
const portfolioWeight = sectorWeights[i];
|
||||
const sectorReturn = sectorReturns[i];
|
||||
if (portfolioWeight === undefined || sectorReturn === undefined) {continue;}
|
||||
|
||||
if (portfolioWeight === undefined || sectorReturn === undefined) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const benchmarkWeight = 1 / sectorWeights.length; // Assuming equal benchmark weights
|
||||
|
||||
// Allocation effect: (portfolio weight - benchmark weight) * (benchmark sector return - benchmark return)
|
||||
|
|
@ -483,16 +493,31 @@ export function calculateStrategyMetrics(
|
|||
for (let i = 1; i < equityCurve.length; i++) {
|
||||
const current = equityCurve[i];
|
||||
const previous = equityCurve[i - 1];
|
||||
if (!current || !previous) {continue;}
|
||||
|
||||
if (!current || !previous) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const ret = (current.value - previous.value) / previous.value;
|
||||
returns.push(ret);
|
||||
}
|
||||
|
||||
const lastPoint = equityCurve[equityCurve.length - 1];
|
||||
const firstPoint = equityCurve[0];
|
||||
if (!lastPoint || !firstPoint) {return { totalValue: 0, totalReturn: 0, totalReturnPercent: 0, dailyReturn: 0, dailyReturnPercent: 0, maxDrawdown: 0, sharpeRatio: 0, beta: 0, alpha: 0, volatility: 0 };}
|
||||
|
||||
if (!lastPoint || !firstPoint) {
|
||||
return {
|
||||
totalValue: 0,
|
||||
totalReturn: 0,
|
||||
totalReturnPercent: 0,
|
||||
dailyReturn: 0,
|
||||
dailyReturnPercent: 0,
|
||||
maxDrawdown: 0,
|
||||
sharpeRatio: 0,
|
||||
beta: 0,
|
||||
alpha: 0,
|
||||
volatility: 0,
|
||||
};
|
||||
}
|
||||
|
||||
const totalValue = lastPoint.value;
|
||||
const totalReturn = totalValue - firstPoint.value;
|
||||
const totalReturnPercent = (totalReturn / firstPoint.value) * 100;
|
||||
|
|
@ -562,12 +587,10 @@ export function informationRatio(portfolioReturns: number[], benchmarkReturns: n
|
|||
throw new Error('Portfolio and benchmark returns must have the same length.');
|
||||
}
|
||||
|
||||
const excessReturns = portfolioReturns.map(
|
||||
(portfolioReturn, index) => {
|
||||
const benchmark = benchmarkReturns[index];
|
||||
return benchmark !== undefined ? portfolioReturn - benchmark : 0;
|
||||
}
|
||||
);
|
||||
const excessReturns = portfolioReturns.map((portfolioReturn, index) => {
|
||||
const benchmark = benchmarkReturns[index];
|
||||
return benchmark !== undefined ? portfolioReturn - benchmark : 0;
|
||||
});
|
||||
const trackingError = calculateVolatility(excessReturns);
|
||||
const avgExcessReturn = excessReturns.reduce((sum, ret) => sum + ret, 0) / excessReturns.length;
|
||||
|
||||
|
|
@ -602,8 +625,10 @@ export function captureRatio(
|
|||
for (let i = 0; i < portfolioReturns.length; i++) {
|
||||
const benchmarkReturn = benchmarkReturns[i];
|
||||
const portfolioReturn = portfolioReturns[i];
|
||||
if (benchmarkReturn === undefined || portfolioReturn === undefined) {continue;}
|
||||
|
||||
if (benchmarkReturn === undefined || portfolioReturn === undefined) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (benchmarkReturn > 0) {
|
||||
upCapture += portfolioReturn;
|
||||
upMarketPeriods++;
|
||||
|
|
@ -733,17 +758,21 @@ export function timeWeightedRateOfReturn(
|
|||
if (cashFlows.length < 2) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
const first = cashFlows[0];
|
||||
if (!first) {return 0;}
|
||||
|
||||
if (!first) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let totalReturn = 1;
|
||||
let previousValue = first.value;
|
||||
|
||||
for (let i = 1; i < cashFlows.length; i++) {
|
||||
const current = cashFlows[i];
|
||||
if (!current) {continue;}
|
||||
|
||||
if (!current) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const periodReturn =
|
||||
(current.value - previousValue - current.amount) / (previousValue + current.amount);
|
||||
totalReturn *= 1 + periodReturn;
|
||||
|
|
@ -762,10 +791,12 @@ export function moneyWeightedRateOfReturn(
|
|||
if (cashFlows.length === 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
const first = cashFlows[0];
|
||||
if (!first) {return 0;}
|
||||
|
||||
if (!first) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Approximate MWRR using Internal Rate of Return (IRR)
|
||||
// This requires a numerical method or library for accurate IRR calculation
|
||||
// This is a simplified example and may not be accurate for all cases
|
||||
|
|
@ -826,8 +857,10 @@ function calculateBeta(portfolioReturns: number[], marketReturns: number[]): num
|
|||
for (let i = 0; i < portfolioReturns.length; i++) {
|
||||
const portfolioReturn = portfolioReturns[i];
|
||||
const marketReturn = marketReturns[i];
|
||||
if (portfolioReturn === undefined || marketReturn === undefined) {continue;}
|
||||
|
||||
if (portfolioReturn === undefined || marketReturn === undefined) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const portfolioDiff = portfolioReturn - portfolioMean;
|
||||
const marketDiff = marketReturn - marketMean;
|
||||
|
||||
|
|
|
|||
|
|
@ -71,14 +71,18 @@ export function maxDrawdown(equityCurve: number[]): number {
|
|||
|
||||
let maxDD = 0;
|
||||
const first = equityCurve[0];
|
||||
if (first === undefined) {return 0;}
|
||||
|
||||
if (first === undefined) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let peak = first;
|
||||
|
||||
for (let i = 1; i < equityCurve.length; i++) {
|
||||
const current = equityCurve[i];
|
||||
if (current === undefined) {continue;}
|
||||
|
||||
if (current === undefined) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (current > peak) {
|
||||
peak = current;
|
||||
} else {
|
||||
|
|
@ -150,8 +154,10 @@ export function beta(portfolioReturns: number[], marketReturns: number[]): numbe
|
|||
for (let i = 0; i < n; i++) {
|
||||
const portfolioReturn = portfolioReturns[i];
|
||||
const marketReturn = marketReturns[i];
|
||||
if (portfolioReturn === undefined || marketReturn === undefined) {continue;}
|
||||
|
||||
if (portfolioReturn === undefined || marketReturn === undefined) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const portfolioDiff = portfolioReturn - portfolioMean;
|
||||
const marketDiff = marketReturn - marketMean;
|
||||
|
||||
|
|
@ -187,12 +193,13 @@ export function treynorRatio(
|
|||
riskFreeRate: number = 0
|
||||
): number {
|
||||
const portfolioBeta = beta(portfolioReturns, marketReturns);
|
||||
|
||||
|
||||
if (portfolioBeta === 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const portfolioMean = portfolioReturns.reduce((sum, ret) => sum + ret, 0) / portfolioReturns.length;
|
||||
|
||||
const portfolioMean =
|
||||
portfolioReturns.reduce((sum, ret) => sum + ret, 0) / portfolioReturns.length;
|
||||
return (portfolioMean - riskFreeRate) / portfolioBeta;
|
||||
}
|
||||
|
||||
|
|
@ -412,7 +419,9 @@ export function riskContribution(
|
|||
for (let i = 0; i < n; i++) {
|
||||
let marginalContribution = 0;
|
||||
const row = covarianceMatrix[i];
|
||||
if (!row) {continue;}
|
||||
if (!row) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (let j = 0; j < n; j++) {
|
||||
const weight = weights[j];
|
||||
|
|
@ -442,8 +451,10 @@ export function ulcerIndex(equityCurve: Array<{ value: number; date: Date }>): n
|
|||
|
||||
let sumSquaredDrawdown = 0;
|
||||
const first = equityCurve[0];
|
||||
if (!first) {return 0;}
|
||||
|
||||
if (!first) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let peak = first.value;
|
||||
|
||||
for (const point of equityCurve) {
|
||||
|
|
|
|||
|
|
@ -540,7 +540,9 @@ export function adx(
|
|||
for (let i = 1; i < ohlcv.length; i++) {
|
||||
const current = ohlcv[i];
|
||||
const previous = ohlcv[i - 1];
|
||||
if (!current || !previous) {continue;}
|
||||
if (!current || !previous) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// True Range
|
||||
const tr = Math.max(
|
||||
|
|
@ -575,8 +577,10 @@ export function adx(
|
|||
const atr = atrValues[i];
|
||||
const plusDMSmoothed = smoothedPlusDM[i];
|
||||
const minusDMSmoothed = smoothedMinusDM[i];
|
||||
if (atr === undefined || plusDMSmoothed === undefined || minusDMSmoothed === undefined) {continue;}
|
||||
|
||||
if (atr === undefined || plusDMSmoothed === undefined || minusDMSmoothed === undefined) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const diPlus = atr > 0 ? (plusDMSmoothed / atr) * 100 : 0;
|
||||
const diMinus = atr > 0 ? (minusDMSmoothed / atr) * 100 : 0;
|
||||
|
||||
|
|
@ -602,17 +606,15 @@ export function adx(
|
|||
/**
|
||||
* Parabolic SAR
|
||||
*/
|
||||
export function parabolicSAR(
|
||||
ohlcv: OHLCV[],
|
||||
step: number = 0.02,
|
||||
maxStep: number = 0.2
|
||||
): number[] {
|
||||
export function parabolicSAR(ohlcv: OHLCV[], step: number = 0.02, maxStep: number = 0.2): number[] {
|
||||
if (ohlcv.length < 2) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const first = ohlcv[0];
|
||||
if (!first) {return [];}
|
||||
if (!first) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const result: number[] = [];
|
||||
let trend = 1; // 1 for uptrend, -1 for downtrend
|
||||
|
|
@ -625,7 +627,9 @@ export function parabolicSAR(
|
|||
for (let i = 1; i < ohlcv.length; i++) {
|
||||
const curr = ohlcv[i];
|
||||
const prev = ohlcv[i - 1];
|
||||
if (!curr || !prev) {continue;}
|
||||
if (!curr || !prev) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Calculate new SAR
|
||||
sar = sar + acceleration * (extremePoint - sar);
|
||||
|
|
@ -834,32 +838,37 @@ export function ultimateOscillator(
|
|||
// Calculate BP and TR
|
||||
for (let i = 0; i < ohlcv.length; i++) {
|
||||
const current = ohlcv[i]!;
|
||||
|
||||
|
||||
if (i === 0) {
|
||||
bp.push(current.close - current.low);
|
||||
tr.push(current.high - current.low);
|
||||
} else {
|
||||
const previous = ohlcv[i - 1]!;
|
||||
bp.push(current.close - Math.min(current.low, previous.close));
|
||||
tr.push(Math.max(
|
||||
current.high - current.low,
|
||||
Math.abs(current.high - previous.close),
|
||||
Math.abs(current.low - previous.close)
|
||||
));
|
||||
tr.push(
|
||||
Math.max(
|
||||
current.high - current.low,
|
||||
Math.abs(current.high - previous.close),
|
||||
Math.abs(current.low - previous.close)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const result: number[] = [];
|
||||
|
||||
for (let i = Math.max(period1, period2, period3) - 1; i < ohlcv.length; i++) {
|
||||
const avg1 = bp.slice(i - period1 + 1, i + 1).reduce((a, b) => a + b, 0) /
|
||||
tr.slice(i - period1 + 1, i + 1).reduce((a, b) => a + b, 0);
|
||||
const avg2 = bp.slice(i - period2 + 1, i + 1).reduce((a, b) => a + b, 0) /
|
||||
tr.slice(i - period2 + 1, i + 1).reduce((a, b) => a + b, 0);
|
||||
const avg3 = bp.slice(i - period3 + 1, i + 1).reduce((a, b) => a + b, 0) /
|
||||
tr.slice(i - period3 + 1, i + 1).reduce((a, b) => a + b, 0);
|
||||
const avg1 =
|
||||
bp.slice(i - period1 + 1, i + 1).reduce((a, b) => a + b, 0) /
|
||||
tr.slice(i - period1 + 1, i + 1).reduce((a, b) => a + b, 0);
|
||||
const avg2 =
|
||||
bp.slice(i - period2 + 1, i + 1).reduce((a, b) => a + b, 0) /
|
||||
tr.slice(i - period2 + 1, i + 1).reduce((a, b) => a + b, 0);
|
||||
const avg3 =
|
||||
bp.slice(i - period3 + 1, i + 1).reduce((a, b) => a + b, 0) /
|
||||
tr.slice(i - period3 + 1, i + 1).reduce((a, b) => a + b, 0);
|
||||
|
||||
const uo = 100 * ((4 * avg1) + (2 * avg2) + avg3) / (4 + 2 + 1);
|
||||
const uo = (100 * (4 * avg1 + 2 * avg2 + avg3)) / (4 + 2 + 1);
|
||||
result.push(uo);
|
||||
}
|
||||
|
||||
|
|
@ -880,7 +889,7 @@ export function easeOfMovement(ohlcv: OHLCV[], period: number = 14): number[] {
|
|||
const current = ohlcv[i]!;
|
||||
const previous = ohlcv[i - 1]!;
|
||||
|
||||
const distance = ((current.high + current.low) / 2) - ((previous.high + previous.low) / 2);
|
||||
const distance = (current.high + current.low) / 2 - (previous.high + previous.low) / 2;
|
||||
const boxHeight = current.high - current.low;
|
||||
const volume = current.volume;
|
||||
|
||||
|
|
@ -1028,7 +1037,14 @@ export function klingerVolumeOscillator(
|
|||
const prevTypicalPrice = (previous.high + previous.low + previous.close) / 3;
|
||||
|
||||
const trend = typicalPrice > prevTypicalPrice ? 1 : -1;
|
||||
const vf = current.volume * trend * Math.abs((2 * ((current.close - current.low) - (current.high - current.close))) / (current.high - current.low)) * 100;
|
||||
const vf =
|
||||
current.volume *
|
||||
trend *
|
||||
Math.abs(
|
||||
(2 * (current.close - current.low - (current.high - current.close))) /
|
||||
(current.high - current.low)
|
||||
) *
|
||||
100;
|
||||
|
||||
volumeForce.push(vf);
|
||||
}
|
||||
|
|
@ -1137,7 +1153,7 @@ export function stochasticRSI(
|
|||
smoothD: number = 3
|
||||
): { k: number[]; d: number[] } {
|
||||
const rsiValues = rsi(prices, rsiPeriod);
|
||||
|
||||
|
||||
if (rsiValues.length < stochPeriod) {
|
||||
return { k: [], d: [] };
|
||||
}
|
||||
|
|
@ -1266,17 +1282,17 @@ export function massIndex(ohlcv: OHLCV[], period: number = 25): number[] {
|
|||
|
||||
// Calculate high-low ranges
|
||||
const ranges = ohlcv.map(candle => candle.high - candle.low);
|
||||
|
||||
|
||||
// Calculate 9-period EMA of ranges
|
||||
const ema9 = ema(ranges, 9);
|
||||
|
||||
|
||||
// Calculate 9-period EMA of the EMA (double smoothing)
|
||||
const emaEma9 = ema(ema9, 9);
|
||||
|
||||
// Calculate ratio
|
||||
const ratios: number[] = [];
|
||||
const minLength = Math.min(ema9.length, emaEma9.length);
|
||||
|
||||
|
||||
for (let i = 0; i < minLength; i++) {
|
||||
const singleEMA = ema9[i];
|
||||
const doubleEMA = emaEma9[i];
|
||||
|
|
@ -1299,9 +1315,9 @@ export function massIndex(ohlcv: OHLCV[], period: number = 25): number[] {
|
|||
* Coppock Curve
|
||||
*/
|
||||
export function coppockCurve(
|
||||
prices: number[],
|
||||
shortROC: number = 11,
|
||||
longROC: number = 14,
|
||||
prices: number[],
|
||||
shortROC: number = 11,
|
||||
longROC: number = 14,
|
||||
wma: number = 10
|
||||
): number[] {
|
||||
const roc1 = roc(prices, shortROC);
|
||||
|
|
|
|||
|
|
@ -1,96 +1,94 @@
|
|||
/**
|
||||
* Enhanced fetch wrapper with proxy support and automatic debug logging
|
||||
* Drop-in replacement for native fetch with additional features
|
||||
*/
|
||||
|
||||
export interface BunRequestInit extends RequestInit {
|
||||
proxy?: string;
|
||||
}
|
||||
|
||||
export interface FetchOptions extends RequestInit {
|
||||
logger?: any;
|
||||
proxy?: string | null;
|
||||
timeout?: number;
|
||||
}
|
||||
|
||||
export async function fetch(
|
||||
input: RequestInfo | URL,
|
||||
options?: FetchOptions
|
||||
): Promise<Response> {
|
||||
const logger = options?.logger || console;
|
||||
const url = typeof input === 'string' ? input : input instanceof URL ? input.href : (input as Request).url;
|
||||
|
||||
// Build request options
|
||||
const requestOptions: RequestInit = {
|
||||
method: options?.method || 'GET',
|
||||
headers: options?.headers || {},
|
||||
body: options?.body,
|
||||
signal: options?.signal,
|
||||
credentials: options?.credentials,
|
||||
cache: options?.cache,
|
||||
redirect: options?.redirect,
|
||||
referrer: options?.referrer,
|
||||
referrerPolicy: options?.referrerPolicy,
|
||||
integrity: options?.integrity,
|
||||
keepalive: options?.keepalive,
|
||||
mode: options?.mode,
|
||||
};
|
||||
// Handle proxy for Bun
|
||||
if (options?.proxy) {
|
||||
// Bun supports proxy via fetch options
|
||||
(requestOptions as BunRequestInit).proxy = options.proxy;
|
||||
}
|
||||
|
||||
// Handle timeout
|
||||
if (options?.timeout) {
|
||||
const controller = new AbortController();
|
||||
const timeoutId = setTimeout(() => controller.abort(), options.timeout);
|
||||
requestOptions.signal = controller.signal;
|
||||
|
||||
try {
|
||||
const response = await performFetch(input, requestOptions, logger, url);
|
||||
clearTimeout(timeoutId);
|
||||
return response;
|
||||
} catch (error) {
|
||||
clearTimeout(timeoutId);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
return performFetch(input, requestOptions, logger, url);
|
||||
}
|
||||
|
||||
async function performFetch(
|
||||
input: RequestInfo | URL,
|
||||
requestOptions: RequestInit,
|
||||
logger: any,
|
||||
url: string
|
||||
): Promise<Response> {
|
||||
logger.debug('HTTP request', {
|
||||
method: requestOptions.method,
|
||||
url,
|
||||
headers: requestOptions.headers,
|
||||
proxy: (requestOptions as BunRequestInit).proxy || null
|
||||
});
|
||||
|
||||
try {
|
||||
const response = await globalThis.fetch(input, requestOptions);
|
||||
|
||||
logger.debug('HTTP response', {
|
||||
url,
|
||||
status: response.status,
|
||||
statusText: response.statusText,
|
||||
ok: response.ok,
|
||||
headers: Object.fromEntries(response.headers.entries())
|
||||
});
|
||||
|
||||
return response;
|
||||
} catch (error) {
|
||||
logger.debug('HTTP error', {
|
||||
url,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
name: error instanceof Error ? error.name : 'Unknown'
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Enhanced fetch wrapper with proxy support and automatic debug logging
|
||||
* Drop-in replacement for native fetch with additional features
|
||||
*/
|
||||
|
||||
export interface BunRequestInit extends RequestInit {
|
||||
proxy?: string;
|
||||
}
|
||||
|
||||
export interface FetchOptions extends RequestInit {
|
||||
logger?: any;
|
||||
proxy?: string | null;
|
||||
timeout?: number;
|
||||
}
|
||||
|
||||
export async function fetch(input: RequestInfo | URL, options?: FetchOptions): Promise<Response> {
|
||||
const logger = options?.logger || console;
|
||||
const url =
|
||||
typeof input === 'string' ? input : input instanceof URL ? input.href : (input as Request).url;
|
||||
|
||||
// Build request options
|
||||
const requestOptions: RequestInit = {
|
||||
method: options?.method || 'GET',
|
||||
headers: options?.headers || {},
|
||||
body: options?.body,
|
||||
signal: options?.signal,
|
||||
credentials: options?.credentials,
|
||||
cache: options?.cache,
|
||||
redirect: options?.redirect,
|
||||
referrer: options?.referrer,
|
||||
referrerPolicy: options?.referrerPolicy,
|
||||
integrity: options?.integrity,
|
||||
keepalive: options?.keepalive,
|
||||
mode: options?.mode,
|
||||
};
|
||||
// Handle proxy for Bun
|
||||
if (options?.proxy) {
|
||||
// Bun supports proxy via fetch options
|
||||
(requestOptions as BunRequestInit).proxy = options.proxy;
|
||||
}
|
||||
|
||||
// Handle timeout
|
||||
if (options?.timeout) {
|
||||
const controller = new AbortController();
|
||||
const timeoutId = setTimeout(() => controller.abort(), options.timeout);
|
||||
requestOptions.signal = controller.signal;
|
||||
|
||||
try {
|
||||
const response = await performFetch(input, requestOptions, logger, url);
|
||||
clearTimeout(timeoutId);
|
||||
return response;
|
||||
} catch (error) {
|
||||
clearTimeout(timeoutId);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
return performFetch(input, requestOptions, logger, url);
|
||||
}
|
||||
|
||||
async function performFetch(
|
||||
input: RequestInfo | URL,
|
||||
requestOptions: RequestInit,
|
||||
logger: any,
|
||||
url: string
|
||||
): Promise<Response> {
|
||||
logger.debug('HTTP request', {
|
||||
method: requestOptions.method,
|
||||
url,
|
||||
headers: requestOptions.headers,
|
||||
proxy: (requestOptions as BunRequestInit).proxy || null,
|
||||
});
|
||||
|
||||
try {
|
||||
const response = await globalThis.fetch(input, requestOptions);
|
||||
|
||||
logger.debug('HTTP response', {
|
||||
url,
|
||||
status: response.status,
|
||||
statusText: response.statusText,
|
||||
ok: response.ok,
|
||||
headers: Object.fromEntries(response.headers.entries()),
|
||||
});
|
||||
|
||||
return response;
|
||||
} catch (error) {
|
||||
logger.debug('HTTP error', {
|
||||
url,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
name: error instanceof Error ? error.name : 'Unknown',
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
* These functions demonstrate how to use generic types with OHLCV data
|
||||
*/
|
||||
|
||||
import type { OHLCV, HasClose, HasOHLC, HasVolume } from '@stock-bot/types';
|
||||
import type { HasClose, HasOHLC, HasVolume, OHLCV } from '@stock-bot/types';
|
||||
|
||||
/**
|
||||
* Extract close prices from any data structure that has a close field
|
||||
|
|
@ -16,7 +16,9 @@ export function extractCloses<T extends HasClose>(data: T[]): number[] {
|
|||
/**
|
||||
* Extract OHLC prices from any data structure that has OHLC fields
|
||||
*/
|
||||
export function extractOHLC<T extends HasOHLC>(data: T[]): {
|
||||
export function extractOHLC<T extends HasOHLC>(
|
||||
data: T[]
|
||||
): {
|
||||
opens: number[];
|
||||
highs: number[];
|
||||
lows: number[];
|
||||
|
|
@ -43,12 +45,12 @@ export function extractVolumes<T extends HasVolume>(data: T[]): number[] {
|
|||
export function calculateSMA<T extends HasClose>(data: T[], period: number): number[] {
|
||||
const closes = extractCloses(data);
|
||||
const result: number[] = [];
|
||||
|
||||
|
||||
for (let i = period - 1; i < closes.length; i++) {
|
||||
const sum = closes.slice(i - period + 1, i + 1).reduce((a, b) => a + b, 0);
|
||||
result.push(sum / period);
|
||||
}
|
||||
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -64,7 +66,7 @@ export function calculateTypicalPrice<T extends HasOHLC>(data: T[]): number[] {
|
|||
*/
|
||||
export function calculateTrueRange<T extends HasOHLC>(data: T[]): number[] {
|
||||
const result: number[] = [];
|
||||
|
||||
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
if (i === 0) {
|
||||
result.push(data[i]!.high - data[i]!.low);
|
||||
|
|
@ -79,7 +81,7 @@ export function calculateTrueRange<T extends HasOHLC>(data: T[]): number[] {
|
|||
result.push(tr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -89,7 +91,7 @@ export function calculateTrueRange<T extends HasOHLC>(data: T[]): number[] {
|
|||
export function calculateReturns<T extends HasClose>(data: T[]): number[] {
|
||||
const closes = extractCloses(data);
|
||||
const returns: number[] = [];
|
||||
|
||||
|
||||
for (let i = 1; i < closes.length; i++) {
|
||||
const current = closes[i]!;
|
||||
const previous = closes[i - 1]!;
|
||||
|
|
@ -99,7 +101,7 @@ export function calculateReturns<T extends HasClose>(data: T[]): number[] {
|
|||
returns.push(0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return returns;
|
||||
}
|
||||
|
||||
|
|
@ -109,7 +111,7 @@ export function calculateReturns<T extends HasClose>(data: T[]): number[] {
|
|||
export function calculateLogReturns<T extends HasClose>(data: T[]): number[] {
|
||||
const closes = extractCloses(data);
|
||||
const logReturns: number[] = [];
|
||||
|
||||
|
||||
for (let i = 1; i < closes.length; i++) {
|
||||
const current = closes[i]!;
|
||||
const previous = closes[i - 1]!;
|
||||
|
|
@ -119,7 +121,7 @@ export function calculateLogReturns<T extends HasClose>(data: T[]): number[] {
|
|||
logReturns.push(0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return logReturns;
|
||||
}
|
||||
|
||||
|
|
@ -130,19 +132,19 @@ export function calculateVWAP<T extends HasOHLC & HasVolume>(data: T[]): number[
|
|||
const result: number[] = [];
|
||||
let cumulativeVolumePrice = 0;
|
||||
let cumulativeVolume = 0;
|
||||
|
||||
|
||||
for (const item of data) {
|
||||
const typicalPrice = (item.high + item.low + item.close) / 3;
|
||||
cumulativeVolumePrice += typicalPrice * item.volume;
|
||||
cumulativeVolume += item.volume;
|
||||
|
||||
|
||||
if (cumulativeVolume > 0) {
|
||||
result.push(cumulativeVolumePrice / cumulativeVolume);
|
||||
} else {
|
||||
result.push(typicalPrice);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -156,11 +158,7 @@ export function filterBySymbol(data: OHLCV[], symbol: string): OHLCV[] {
|
|||
/**
|
||||
* Filter OHLCV data by time range
|
||||
*/
|
||||
export function filterByTimeRange(
|
||||
data: OHLCV[],
|
||||
startTime: number,
|
||||
endTime: number
|
||||
): OHLCV[] {
|
||||
export function filterByTimeRange(data: OHLCV[], startTime: number, endTime: number): OHLCV[] {
|
||||
return data.filter(item => item.timestamp >= startTime && item.timestamp <= endTime);
|
||||
}
|
||||
|
||||
|
|
@ -169,14 +167,14 @@ export function filterByTimeRange(
|
|||
*/
|
||||
export function groupBySymbol(data: OHLCV[]): Record<string, OHLCV[]> {
|
||||
const grouped: Record<string, OHLCV[]> = {};
|
||||
|
||||
|
||||
for (const item of data) {
|
||||
if (!grouped[item.symbol]) {
|
||||
grouped[item.symbol] = [];
|
||||
}
|
||||
grouped[item.symbol]!.push(item);
|
||||
}
|
||||
|
||||
|
||||
return grouped;
|
||||
}
|
||||
|
||||
|
|
@ -186,6 +184,6 @@ export function groupBySymbol(data: OHLCV[]): Record<string, OHLCV[]> {
|
|||
export function convertTimestamps(data: OHLCV[]): Array<OHLCV & { date: Date }> {
|
||||
return data.map(item => ({
|
||||
...item,
|
||||
date: new Date(item.timestamp)
|
||||
date: new Date(item.timestamp),
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,30 +1,30 @@
|
|||
/**
|
||||
* User Agent utility for generating random user agents
|
||||
*/
|
||||
|
||||
// Simple list of common user agents to avoid external dependency
|
||||
const USER_AGENTS = [
|
||||
// Chrome on Windows
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
|
||||
// Chrome on Mac
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
|
||||
// Firefox on Windows
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:120.0) Gecko/20100101 Firefox/120.0',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:119.0) Gecko/20100101 Firefox/119.0',
|
||||
// Firefox on Mac
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15) Gecko/20100101 Firefox/120.0',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15) Gecko/20100101 Firefox/119.0',
|
||||
// Safari on Mac
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.1 Safari/605.1.15',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.0 Safari/605.1.15',
|
||||
// Edge on Windows
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0',
|
||||
];
|
||||
|
||||
export function getRandomUserAgent(): string {
|
||||
const index = Math.floor(Math.random() * USER_AGENTS.length);
|
||||
return USER_AGENTS[index]!;
|
||||
}
|
||||
/**
|
||||
* User Agent utility for generating random user agents
|
||||
*/
|
||||
|
||||
// Simple list of common user agents to avoid external dependency
|
||||
const USER_AGENTS = [
|
||||
// Chrome on Windows
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
|
||||
// Chrome on Mac
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
|
||||
// Firefox on Windows
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:120.0) Gecko/20100101 Firefox/120.0',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:119.0) Gecko/20100101 Firefox/119.0',
|
||||
// Firefox on Mac
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15) Gecko/20100101 Firefox/120.0',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15) Gecko/20100101 Firefox/119.0',
|
||||
// Safari on Mac
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.1 Safari/605.1.15',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.0 Safari/605.1.15',
|
||||
// Edge on Windows
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0',
|
||||
];
|
||||
|
||||
export function getRandomUserAgent(): string {
|
||||
const index = Math.floor(Math.random() * USER_AGENTS.length);
|
||||
return USER_AGENTS[index]!;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue