huge refactor to remove depenencie hell and add typesafe container

This commit is contained in:
Boki 2025-06-24 09:37:51 -04:00
parent 28b9822d55
commit 843a7b9b9b
148 changed files with 3603 additions and 2378 deletions

View file

@ -1,23 +1,23 @@
import { NamespacedCache } from './namespaced-cache';
import type { CacheProvider } from './types';
/**
* Factory function to create namespaced caches
* Provides a clean API for services to get their own namespaced cache
*/
export function createNamespacedCache(
cache: CacheProvider | null | undefined,
namespace: string
): CacheProvider | null {
if (!cache) {
return null;
}
return new NamespacedCache(cache, namespace);
}
/**
* Type guard to check if cache is available
*/
export function isCacheAvailable(cache: any): cache is CacheProvider {
return cache !== null && cache !== undefined && typeof cache.get === 'function';
}
import { NamespacedCache } from './namespaced-cache';
import type { CacheProvider } from './types';
/**
* Factory function to create namespaced caches
* Provides a clean API for services to get their own namespaced cache
*/
export function createNamespacedCache(
cache: CacheProvider | null | undefined,
namespace: string
): CacheProvider | null {
if (!cache) {
return null;
}
return new NamespacedCache(cache, namespace);
}
/**
* Type guard to check if cache is available
*/
export function isCacheAvailable(cache: any): cache is CacheProvider {
return cache !== null && cache !== undefined && typeof cache.get === 'function';
}

View file

@ -88,7 +88,7 @@ export class RedisConnectionManager {
};
const redis = new Redis(redisOptions);
// Use the provided logger or fall back to instance logger
const log = logger || this.logger;

View file

@ -1,101 +1,100 @@
import type { CacheProvider } from './types';
/**
* A cache wrapper that automatically prefixes all keys with a namespace
* Used to provide isolated cache spaces for different services
*/
export class NamespacedCache implements CacheProvider {
private readonly prefix: string;
constructor(
private readonly cache: CacheProvider,
private readonly namespace: string
) {
this.prefix = `cache:${namespace}:`;
}
async get<T = any>(key: string): Promise<T | null> {
return this.cache.get(`${this.prefix}${key}`);
}
async set<T>(
key: string,
value: T,
options?:
| number
| {
ttl?: number;
preserveTTL?: boolean;
onlyIfExists?: boolean;
onlyIfNotExists?: boolean;
getOldValue?: boolean;
}
): Promise<T | null> {
return this.cache.set(`${this.prefix}${key}`, value, options);
}
async del(key: string): Promise<void> {
return this.cache.del(`${this.prefix}${key}`);
}
async exists(key: string): Promise<boolean> {
return this.cache.exists(`${this.prefix}${key}`);
}
async keys(pattern: string = '*'): Promise<string[]> {
const fullPattern = `${this.prefix}${pattern}`;
const keys = await this.cache.keys(fullPattern);
// Remove the prefix from returned keys for cleaner API
return keys.map(k => k.substring(this.prefix.length));
}
async clear(): Promise<void> {
// Clear only keys with this namespace prefix
const keys = await this.cache.keys(`${this.prefix}*`);
if (keys.length > 0) {
await Promise.all(keys.map(key => this.cache.del(key)));
}
}
getStats() {
return this.cache.getStats();
}
async health(): Promise<boolean> {
return this.cache.health();
}
isReady(): boolean {
return this.cache.isReady();
}
async waitForReady(timeout?: number): Promise<void> {
return this.cache.waitForReady(timeout);
}
async close(): Promise<void> {
// Namespaced cache doesn't own the connection, so we don't close it
// The underlying cache instance should be closed by its owner
}
getNamespace(): string {
return this.namespace;
}
getFullPrefix(): string {
return this.prefix;
}
/**
* Get a value using a raw Redis key (bypassing the namespace prefix)
* Delegates to the underlying cache's getRaw method if available
*/
async getRaw<T = unknown>(key: string): Promise<T | null> {
if (this.cache.getRaw) {
return this.cache.getRaw<T>(key);
}
// Fallback for caches that don't implement getRaw
return null;
}
}
import type { CacheProvider } from './types';
/**
* A cache wrapper that automatically prefixes all keys with a namespace
* Used to provide isolated cache spaces for different services
*/
export class NamespacedCache implements CacheProvider {
private readonly prefix: string;
constructor(
private readonly cache: CacheProvider,
private readonly namespace: string
) {
this.prefix = `cache:${namespace}:`;
}
async get<T = any>(key: string): Promise<T | null> {
return this.cache.get(`${this.prefix}${key}`);
}
async set<T>(
key: string,
value: T,
options?:
| number
| {
ttl?: number;
preserveTTL?: boolean;
onlyIfExists?: boolean;
onlyIfNotExists?: boolean;
getOldValue?: boolean;
}
): Promise<T | null> {
return this.cache.set(`${this.prefix}${key}`, value, options);
}
async del(key: string): Promise<void> {
return this.cache.del(`${this.prefix}${key}`);
}
async exists(key: string): Promise<boolean> {
return this.cache.exists(`${this.prefix}${key}`);
}
async keys(pattern: string = '*'): Promise<string[]> {
const fullPattern = `${this.prefix}${pattern}`;
const keys = await this.cache.keys(fullPattern);
// Remove the prefix from returned keys for cleaner API
return keys.map(k => k.substring(this.prefix.length));
}
async clear(): Promise<void> {
// Clear only keys with this namespace prefix
const keys = await this.cache.keys(`${this.prefix}*`);
if (keys.length > 0) {
await Promise.all(keys.map(key => this.cache.del(key)));
}
}
getStats() {
return this.cache.getStats();
}
async health(): Promise<boolean> {
return this.cache.health();
}
isReady(): boolean {
return this.cache.isReady();
}
async waitForReady(timeout?: number): Promise<void> {
return this.cache.waitForReady(timeout);
}
async close(): Promise<void> {
// Namespaced cache doesn't own the connection, so we don't close it
// The underlying cache instance should be closed by its owner
}
getNamespace(): string {
return this.namespace;
}
getFullPrefix(): string {
return this.prefix;
}
/**
* Get a value using a raw Redis key (bypassing the namespace prefix)
* Delegates to the underlying cache's getRaw method if available
*/
async getRaw<T = unknown>(key: string): Promise<T | null> {
if (this.cache.getRaw) {
return this.cache.getRaw<T>(key);
}
// Fallback for caches that don't implement getRaw
return null;
}
}

View file

@ -1,9 +1,9 @@
import { join } from 'path';
import { z } from 'zod';
import { getLogger } from '@stock-bot/logger';
import { EnvLoader } from './loaders/env.loader';
import { FileLoader } from './loaders/file.loader';
import { ConfigError, ConfigValidationError } from './errors';
import { getLogger } from '@stock-bot/logger';
import type {
ConfigLoader,
ConfigManagerOptions,
@ -82,9 +82,9 @@ export class ConfigManager<T = Record<string, unknown>> {
expected: (err as any).expected,
received: (err as any).received,
}));
this.logger.error('Configuration validation failed:', errorDetails);
throw new ConfigValidationError('Configuration validation failed', error.errors);
}
throw error;

View file

@ -1,10 +1,10 @@
// Import necessary types
import { z } from 'zod';
import { EnvLoader } from './loaders/env.loader';
import { FileLoader } from './loaders/file.loader';
import { ConfigManager } from './config-manager';
import type { BaseAppConfig } from './schemas';
import { baseAppSchema } from './schemas';
import { z } from 'zod';
// Legacy singleton instance for backward compatibility
let configInstance: ConfigManager<BaseAppConfig> | null = null;
@ -56,7 +56,6 @@ function loadCriticalEnvVarsSync(): void {
// Load critical env vars immediately
loadCriticalEnvVarsSync();
/**
* Initialize configuration for a service in a monorepo.
* Automatically loads configs from:
@ -121,8 +120,6 @@ export function getLogConfig() {
return getConfig().log;
}
export function getQueueConfig() {
return getConfig().queue;
}

View file

@ -1,5 +1,5 @@
import { describe, expect, it } from 'bun:test';
import { unifiedAppSchema, toUnifiedConfig, getStandardServiceName } from '../unified-app.schema';
import { getStandardServiceName, toUnifiedConfig, unifiedAppSchema } from '../unified-app.schema';
describe('UnifiedAppConfig', () => {
describe('getStandardServiceName', () => {
@ -74,13 +74,13 @@ describe('UnifiedAppConfig', () => {
};
const result = unifiedAppSchema.parse(config);
// Should have both nested and flat structure
expect(result.postgres).toBeDefined();
expect(result.mongodb).toBeDefined();
expect(result.database?.postgres).toBeDefined();
expect(result.database?.mongodb).toBeDefined();
// Values should match
expect(result.postgres?.host).toBe('localhost');
expect(result.postgres?.port).toBe(5432);
@ -144,7 +144,7 @@ describe('UnifiedAppConfig', () => {
};
const unified = toUnifiedConfig(stockBotConfig);
expect(unified.service.serviceName).toBe('data-ingestion');
expect(unified.redis).toBeDefined();
expect(unified.redis?.host).toBe('localhost');
@ -152,4 +152,4 @@ describe('UnifiedAppConfig', () => {
expect(unified.postgres?.host).toBe('localhost');
});
});
});
});

View file

@ -1,61 +1,63 @@
import { z } from 'zod';
import { environmentSchema } from './base.schema';
import {
postgresConfigSchema,
mongodbConfigSchema,
questdbConfigSchema,
dragonflyConfigSchema
} from './database.schema';
import {
serviceConfigSchema,
loggingConfigSchema,
queueConfigSchema,
httpConfigSchema,
webshareConfigSchema,
browserConfigSchema,
proxyConfigSchema
} from './service.schema';
/**
* Generic base application schema that can be extended by specific apps
*/
export const baseAppSchema = z.object({
// Basic app info
name: z.string(),
version: z.string(),
environment: environmentSchema.default('development'),
// Service configuration
service: serviceConfigSchema,
// Logging configuration
log: loggingConfigSchema,
// Database configuration - apps can choose which databases they need
database: z.object({
postgres: postgresConfigSchema.optional(),
mongodb: mongodbConfigSchema.optional(),
questdb: questdbConfigSchema.optional(),
dragonfly: dragonflyConfigSchema.optional(),
}).optional(),
// Redis configuration (used for cache and queue)
redis: dragonflyConfigSchema.optional(),
// Queue configuration
queue: queueConfigSchema.optional(),
// HTTP client configuration
http: httpConfigSchema.optional(),
// WebShare proxy configuration
webshare: webshareConfigSchema.optional(),
// Browser configuration
browser: browserConfigSchema.optional(),
// Proxy manager configuration
proxy: proxyConfigSchema.optional(),
});
export type BaseAppConfig = z.infer<typeof baseAppSchema>;
import { z } from 'zod';
import { environmentSchema } from './base.schema';
import {
dragonflyConfigSchema,
mongodbConfigSchema,
postgresConfigSchema,
questdbConfigSchema,
} from './database.schema';
import {
browserConfigSchema,
httpConfigSchema,
loggingConfigSchema,
proxyConfigSchema,
queueConfigSchema,
serviceConfigSchema,
webshareConfigSchema,
} from './service.schema';
/**
* Generic base application schema that can be extended by specific apps
*/
export const baseAppSchema = z.object({
// Basic app info
name: z.string(),
version: z.string(),
environment: environmentSchema.default('development'),
// Service configuration
service: serviceConfigSchema,
// Logging configuration
log: loggingConfigSchema,
// Database configuration - apps can choose which databases they need
database: z
.object({
postgres: postgresConfigSchema.optional(),
mongodb: mongodbConfigSchema.optional(),
questdb: questdbConfigSchema.optional(),
dragonfly: dragonflyConfigSchema.optional(),
})
.optional(),
// Redis configuration (used for cache and queue)
redis: dragonflyConfigSchema.optional(),
// Queue configuration
queue: queueConfigSchema.optional(),
// HTTP client configuration
http: httpConfigSchema.optional(),
// WebShare proxy configuration
webshare: webshareConfigSchema.optional(),
// Browser configuration
browser: browserConfigSchema.optional(),
// Proxy manager configuration
proxy: proxyConfigSchema.optional(),
});
export type BaseAppConfig = z.infer<typeof baseAppSchema>;

View file

@ -15,4 +15,3 @@ export type { BaseAppConfig } from './base-app.schema';
// Export unified schema for standardized configuration
export { unifiedAppSchema, toUnifiedConfig, getStandardServiceName } from './unified-app.schema';
export type { UnifiedAppConfig } from './unified-app.schema';

View file

@ -100,8 +100,10 @@ export const proxyConfigSchema = z.object({
enabled: z.boolean().default(false),
cachePrefix: z.string().default('proxy:'),
ttl: z.number().default(3600),
webshare: z.object({
apiKey: z.string(),
apiUrl: z.string().default('https://proxy.webshare.io/api/v2/'),
}).optional(),
webshare: z
.object({
apiKey: z.string(),
apiUrl: z.string().default('https://proxy.webshare.io/api/v2/'),
})
.optional(),
});

View file

@ -1,62 +1,67 @@
import { z } from 'zod';
import { baseAppSchema } from './base-app.schema';
import {
postgresConfigSchema,
mongodbConfigSchema,
questdbConfigSchema,
dragonflyConfigSchema
import {
dragonflyConfigSchema,
mongodbConfigSchema,
postgresConfigSchema,
questdbConfigSchema,
} from './database.schema';
/**
* Unified application configuration schema that provides both nested and flat access
* to database configurations for backward compatibility while maintaining a clean structure
*/
export const unifiedAppSchema = baseAppSchema.extend({
// Flat database configs for DI system (these take precedence)
redis: dragonflyConfigSchema.optional(),
mongodb: mongodbConfigSchema.optional(),
postgres: postgresConfigSchema.optional(),
questdb: questdbConfigSchema.optional(),
}).transform((data) => {
// Ensure service.serviceName is set from service.name if not provided
if (data.service && !data.service.serviceName) {
data.service.serviceName = data.service.name.replace(/([A-Z])/g, '-$1').toLowerCase().replace(/^-/, '');
}
export const unifiedAppSchema = baseAppSchema
.extend({
// Flat database configs for DI system (these take precedence)
redis: dragonflyConfigSchema.optional(),
mongodb: mongodbConfigSchema.optional(),
postgres: postgresConfigSchema.optional(),
questdb: questdbConfigSchema.optional(),
})
.transform(data => {
// Ensure service.serviceName is set from service.name if not provided
if (data.service && !data.service.serviceName) {
data.service.serviceName = data.service.name
.replace(/([A-Z])/g, '-$1')
.toLowerCase()
.replace(/^-/, '');
}
// If flat configs exist, ensure they're also in the nested database object
if (data.redis || data.mongodb || data.postgres || data.questdb) {
data.database = {
...data.database,
dragonfly: data.redis || data.database?.dragonfly,
mongodb: data.mongodb || data.database?.mongodb,
postgres: data.postgres || data.database?.postgres,
questdb: data.questdb || data.database?.questdb,
};
}
// If flat configs exist, ensure they're also in the nested database object
if (data.redis || data.mongodb || data.postgres || data.questdb) {
data.database = {
...data.database,
dragonfly: data.redis || data.database?.dragonfly,
mongodb: data.mongodb || data.database?.mongodb,
postgres: data.postgres || data.database?.postgres,
questdb: data.questdb || data.database?.questdb,
};
}
// If nested configs exist but flat ones don't, copy them to flat structure
if (data.database) {
if (data.database.dragonfly && !data.redis) {
data.redis = data.database.dragonfly;
}
if (data.database.mongodb && !data.mongodb) {
data.mongodb = data.database.mongodb;
}
if (data.database.postgres && !data.postgres) {
data.postgres = data.database.postgres;
}
if (data.database.questdb && !data.questdb) {
// Handle the ilpPort -> influxPort mapping for DI system
const questdbConfig = { ...data.database.questdb };
if ('ilpPort' in questdbConfig && !('influxPort' in questdbConfig)) {
(questdbConfig as any).influxPort = questdbConfig.ilpPort;
// If nested configs exist but flat ones don't, copy them to flat structure
if (data.database) {
if (data.database.dragonfly && !data.redis) {
data.redis = data.database.dragonfly;
}
if (data.database.mongodb && !data.mongodb) {
data.mongodb = data.database.mongodb;
}
if (data.database.postgres && !data.postgres) {
data.postgres = data.database.postgres;
}
if (data.database.questdb && !data.questdb) {
// Handle the ilpPort -> influxPort mapping for DI system
const questdbConfig = { ...data.database.questdb };
if ('ilpPort' in questdbConfig && !('influxPort' in questdbConfig)) {
(questdbConfig as any).influxPort = questdbConfig.ilpPort;
}
data.questdb = questdbConfig;
}
data.questdb = questdbConfig;
}
}
return data;
});
return data;
});
export type UnifiedAppConfig = z.infer<typeof unifiedAppSchema>;
@ -72,5 +77,8 @@ export function toUnifiedConfig(config: any): UnifiedAppConfig {
*/
export function getStandardServiceName(serviceName: string): string {
// Convert camelCase to kebab-case
return serviceName.replace(/([A-Z])/g, '-$1').toLowerCase().replace(/^-/, '');
}
return serviceName
.replace(/([A-Z])/g, '-$1')
.toLowerCase()
.replace(/^-/, '');
}

View file

@ -20,6 +20,8 @@
"@stock-bot/queue": "workspace:*",
"@stock-bot/shutdown": "workspace:*",
"@stock-bot/handlers": "workspace:*",
"@stock-bot/handler-registry": "workspace:*",
"glob": "^10.0.0",
"zod": "^3.23.8",
"hono": "^4.0.0",
"awilix": "^12.0.5"

View file

@ -3,16 +3,16 @@
* Creates a decoupled, reusable dependency injection container
*/
import { type AwilixContainer } from 'awilix';
import type { Browser } from '@stock-bot/browser';
import type { CacheProvider } from '@stock-bot/cache';
import type { IServiceContainer } from '@stock-bot/types';
import type { Logger } from '@stock-bot/logger';
import type { MongoDBClient } from '@stock-bot/mongodb';
import type { PostgreSQLClient } from '@stock-bot/postgres';
import type { ProxyManager } from '@stock-bot/proxy';
import type { QuestDBClient } from '@stock-bot/questdb';
import type { QueueManager } from '@stock-bot/queue';
import { type AwilixContainer } from 'awilix';
import type { IServiceContainer } from '@stock-bot/types';
import type { AppConfig } from './config/schemas';
// Re-export for backward compatibility
@ -41,8 +41,6 @@ export interface ServiceDefinitions {
serviceContainer: IServiceContainer;
}
// Export typed container
export type ServiceContainer = AwilixContainer<ServiceDefinitions>;
export type ServiceCradle = ServiceDefinitions;
@ -59,5 +57,3 @@ export interface ServiceContainerOptions {
enableBrowser?: boolean;
enableProxy?: boolean;
}

View file

@ -1,9 +1,9 @@
import { z } from 'zod';
import { redisConfigSchema } from './redis.schema';
import { mongodbConfigSchema } from './mongodb.schema';
import { postgresConfigSchema } from './postgres.schema';
import { questdbConfigSchema } from './questdb.schema';
import { proxyConfigSchema, browserConfigSchema, queueConfigSchema } from './service.schema';
import { redisConfigSchema } from './redis.schema';
import { browserConfigSchema, proxyConfigSchema, queueConfigSchema } from './service.schema';
export const appConfigSchema = z.object({
redis: redisConfigSchema,
@ -13,11 +13,13 @@ export const appConfigSchema = z.object({
proxy: proxyConfigSchema.optional(),
browser: browserConfigSchema.optional(),
queue: queueConfigSchema.optional(),
service: z.object({
name: z.string(),
serviceName: z.string().optional(), // Standard kebab-case service name
port: z.number().optional(),
}).optional(),
service: z
.object({
name: z.string(),
serviceName: z.string().optional(), // Standard kebab-case service name
port: z.number().optional(),
})
.optional(),
});
export type AppConfig = z.infer<typeof appConfigSchema>;
@ -27,4 +29,4 @@ export * from './redis.schema';
export * from './mongodb.schema';
export * from './postgres.schema';
export * from './questdb.schema';
export * from './service.schema';
export * from './service.schema';

View file

@ -1,9 +1,9 @@
import { z } from 'zod';
export const mongodbConfigSchema = z.object({
enabled: z.boolean().optional().default(true),
uri: z.string(),
database: z.string(),
});
export type MongoDBConfig = z.infer<typeof mongodbConfigSchema>;
import { z } from 'zod';
export const mongodbConfigSchema = z.object({
enabled: z.boolean().optional().default(true),
uri: z.string(),
database: z.string(),
});
export type MongoDBConfig = z.infer<typeof mongodbConfigSchema>;

View file

@ -1,12 +1,12 @@
import { z } from 'zod';
export const postgresConfigSchema = z.object({
enabled: z.boolean().optional().default(true),
host: z.string().default('localhost'),
port: z.number().default(5432),
database: z.string(),
user: z.string(),
password: z.string(),
});
export type PostgresConfig = z.infer<typeof postgresConfigSchema>;
import { z } from 'zod';
export const postgresConfigSchema = z.object({
enabled: z.boolean().optional().default(true),
host: z.string().default('localhost'),
port: z.number().default(5432),
database: z.string(),
user: z.string(),
password: z.string(),
});
export type PostgresConfig = z.infer<typeof postgresConfigSchema>;

View file

@ -1,12 +1,12 @@
import { z } from 'zod';
export const questdbConfigSchema = z.object({
enabled: z.boolean().optional().default(true),
host: z.string().default('localhost'),
httpPort: z.number().optional().default(9000),
pgPort: z.number().optional().default(8812),
influxPort: z.number().optional().default(9009),
database: z.string().optional().default('questdb'),
});
export type QuestDBConfig = z.infer<typeof questdbConfigSchema>;
import { z } from 'zod';
export const questdbConfigSchema = z.object({
enabled: z.boolean().optional().default(true),
host: z.string().default('localhost'),
httpPort: z.number().optional().default(9000),
pgPort: z.number().optional().default(8812),
influxPort: z.number().optional().default(9009),
database: z.string().optional().default('questdb'),
});
export type QuestDBConfig = z.infer<typeof questdbConfigSchema>;

View file

@ -1,12 +1,12 @@
import { z } from 'zod';
export const redisConfigSchema = z.object({
enabled: z.boolean().optional().default(true),
host: z.string().default('localhost'),
port: z.number().default(6379),
password: z.string().optional(),
username: z.string().optional(),
db: z.number().optional().default(0),
});
export type RedisConfig = z.infer<typeof redisConfigSchema>;
import { z } from 'zod';
export const redisConfigSchema = z.object({
enabled: z.boolean().optional().default(true),
host: z.string().default('localhost'),
port: z.number().default(6379),
password: z.string().optional(),
username: z.string().optional(),
db: z.number().optional().default(0),
});
export type RedisConfig = z.infer<typeof redisConfigSchema>;

View file

@ -4,10 +4,12 @@ export const proxyConfigSchema = z.object({
enabled: z.boolean().default(false),
cachePrefix: z.string().optional().default('proxy:'),
ttl: z.number().optional().default(3600),
webshare: z.object({
apiKey: z.string(),
apiUrl: z.string().default('https://proxy.webshare.io/api/v2/'),
}).optional(),
webshare: z
.object({
apiKey: z.string(),
apiUrl: z.string().default('https://proxy.webshare.io/api/v2/'),
})
.optional(),
});
export const browserConfigSchema = z.object({
@ -21,18 +23,23 @@ export const queueConfigSchema = z.object({
concurrency: z.number().optional().default(1),
enableScheduledJobs: z.boolean().optional().default(true),
delayWorkerStart: z.boolean().optional().default(false),
defaultJobOptions: z.object({
attempts: z.number().default(3),
backoff: z.object({
type: z.enum(['exponential', 'fixed']).default('exponential'),
delay: z.number().default(1000),
}).default({}),
removeOnComplete: z.number().default(100),
removeOnFail: z.number().default(50),
timeout: z.number().optional(),
}).optional().default({}),
defaultJobOptions: z
.object({
attempts: z.number().default(3),
backoff: z
.object({
type: z.enum(['exponential', 'fixed']).default('exponential'),
delay: z.number().default(1000),
})
.default({}),
removeOnComplete: z.number().default(100),
removeOnFail: z.number().default(50),
timeout: z.number().optional(),
})
.optional()
.default({}),
});
export type ProxyConfig = z.infer<typeof proxyConfigSchema>;
export type BrowserConfig = z.infer<typeof browserConfigSchema>;
export type QueueConfig = z.infer<typeof queueConfigSchema>;
export type QueueConfig = z.infer<typeof queueConfigSchema>;

View file

@ -1,15 +1,17 @@
import { createContainer, InjectionMode, asFunction, type AwilixContainer } from 'awilix';
import { asClass, asFunction, createContainer, InjectionMode, type AwilixContainer } from 'awilix';
import type { BaseAppConfig as StockBotAppConfig, UnifiedAppConfig } from '@stock-bot/config';
import { appConfigSchema, type AppConfig } from '../config/schemas';
import { toUnifiedConfig } from '@stock-bot/config';
import {
registerCoreServices,
import { HandlerRegistry } from '@stock-bot/handler-registry';
import { appConfigSchema, type AppConfig } from '../config/schemas';
import {
registerApplicationServices,
registerCacheServices,
registerCoreServices,
registerDatabaseServices,
registerApplicationServices
} from '../registrations';
import { HandlerScanner } from '../scanner';
import { ServiceLifecycleManager } from '../utils/lifecycle';
import type { ServiceDefinitions, ContainerBuildOptions } from './types';
import type { ContainerBuildOptions, ServiceDefinitions } from './types';
export class ServiceContainerBuilder {
private config: Partial<AppConfig> = {};
@ -38,7 +40,10 @@ export class ServiceContainerBuilder {
return this;
}
enableService(service: keyof Omit<ContainerBuildOptions, 'skipInitialization' | 'initializationTimeout'>, enabled = true): this {
enableService(
service: keyof Omit<ContainerBuildOptions, 'skipInitialization' | 'initializationTimeout'>,
enabled = true
): this {
this.options[service] = enabled;
return this;
}
@ -51,7 +56,7 @@ export class ServiceContainerBuilder {
async build(): Promise<AwilixContainer<ServiceDefinitions>> {
// Validate and prepare config
const validatedConfig = this.prepareConfig();
// Create container
const container = createContainer<ServiceDefinitions>({
injectionMode: InjectionMode.PROXY,
@ -77,17 +82,19 @@ export class ServiceContainerBuilder {
private applyServiceOptions(config: Partial<AppConfig>): AppConfig {
// Ensure questdb config has the right field names for DI
const questdbConfig = config.questdb ? {
...config.questdb,
influxPort: (config.questdb as any).influxPort || (config.questdb as any).ilpPort || 9009,
} : {
enabled: true,
host: 'localhost',
httpPort: 9000,
pgPort: 8812,
influxPort: 9009,
database: 'questdb',
};
const questdbConfig = config.questdb
? {
...config.questdb,
influxPort: (config.questdb as any).influxPort || (config.questdb as any).ilpPort || 9009,
}
: {
enabled: true,
host: 'localhost',
httpPort: 9000,
pgPort: 8812,
influxPort: 9009,
database: 'questdb',
};
return {
redis: config.redis || {
@ -110,61 +117,88 @@ export class ServiceContainerBuilder {
password: 'postgres',
},
questdb: this.options.enableQuestDB ? questdbConfig : undefined,
proxy: this.options.enableProxy ? (config.proxy || { enabled: false, cachePrefix: 'proxy:', ttl: 3600 }) : undefined,
browser: this.options.enableBrowser ? (config.browser || { headless: true, timeout: 30000 }) : undefined,
queue: this.options.enableQueue ? (config.queue || {
enabled: true,
workers: 1,
concurrency: 1,
enableScheduledJobs: true,
delayWorkerStart: false,
defaultJobOptions: {
attempts: 3,
backoff: { type: 'exponential' as const, delay: 1000 },
removeOnComplete: 100,
removeOnFail: 50,
}
}) : undefined,
proxy: this.options.enableProxy
? config.proxy || { enabled: false, cachePrefix: 'proxy:', ttl: 3600 }
: undefined,
browser: this.options.enableBrowser
? config.browser || { headless: true, timeout: 30000 }
: undefined,
queue: this.options.enableQueue
? config.queue || {
enabled: true,
workers: 1,
concurrency: 1,
enableScheduledJobs: true,
delayWorkerStart: false,
defaultJobOptions: {
attempts: 3,
backoff: { type: 'exponential' as const, delay: 1000 },
removeOnComplete: 100,
removeOnFail: 50,
},
}
: undefined,
service: config.service,
};
}
private registerServices(container: AwilixContainer<ServiceDefinitions>, config: AppConfig): void {
private registerServices(
container: AwilixContainer<ServiceDefinitions>,
config: AppConfig
): void {
// Register handler infrastructure first
container.register({
handlerRegistry: asClass(HandlerRegistry).singleton(),
handlerScanner: asClass(HandlerScanner).singleton(),
});
registerCoreServices(container, config);
registerCacheServices(container, config);
registerDatabaseServices(container, config);
registerApplicationServices(container, config);
// Register service container aggregate
container.register({
serviceContainer: asFunction(({
config: _config, logger, cache, globalCache, proxyManager, browser,
queueManager, mongoClient, postgresClient, questdbClient
}) => ({
logger,
cache,
globalCache,
proxy: proxyManager, // Map proxyManager to proxy
browser,
queue: queueManager, // Map queueManager to queue
mongodb: mongoClient, // Map mongoClient to mongodb
postgres: postgresClient, // Map postgresClient to postgres
questdb: questdbClient, // Map questdbClient to questdb
})).singleton(),
serviceContainer: asFunction(
({
config: _config,
logger,
cache,
globalCache,
proxyManager,
browser,
queueManager,
mongoClient,
postgresClient,
questdbClient,
}) => ({
logger,
cache,
globalCache,
proxy: proxyManager, // Map proxyManager to proxy
browser,
queue: queueManager, // Map queueManager to queue
mongodb: mongoClient, // Map mongoClient to mongodb
postgres: postgresClient, // Map postgresClient to postgres
questdb: questdbClient, // Map questdbClient to questdb
})
).singleton(),
});
}
private transformStockBotConfig(config: UnifiedAppConfig): Partial<AppConfig> {
// Unified config already has flat structure, just extract what we need
// Handle questdb field name mapping
const questdb = config.questdb ? {
enabled: config.questdb.enabled || true,
host: config.questdb.host || 'localhost',
httpPort: config.questdb.httpPort || 9000,
pgPort: config.questdb.pgPort || 8812,
influxPort: (config.questdb as any).influxPort || (config.questdb as any).ilpPort || 9009,
database: config.questdb.database || 'questdb',
} : undefined;
const questdb = config.questdb
? {
enabled: config.questdb.enabled || true,
host: config.questdb.host || 'localhost',
httpPort: config.questdb.httpPort || 9000,
pgPort: config.questdb.pgPort || 8812,
influxPort: (config.questdb as any).influxPort || (config.questdb as any).ilpPort || 9009,
database: config.questdb.database || 'questdb',
}
: undefined;
return {
redis: config.redis,
@ -177,4 +211,4 @@ export class ServiceContainerBuilder {
service: config.service,
};
}
}
}

View file

@ -1,48 +1,54 @@
import type { Browser } from '@stock-bot/browser';
import type { CacheProvider } from '@stock-bot/cache';
import type { IServiceContainer } from '@stock-bot/types';
import type { Logger } from '@stock-bot/logger';
import type { MongoDBClient } from '@stock-bot/mongodb';
import type { PostgreSQLClient } from '@stock-bot/postgres';
import type { ProxyManager } from '@stock-bot/proxy';
import type { QuestDBClient } from '@stock-bot/questdb';
import type { SmartQueueManager } from '@stock-bot/queue';
import type { AppConfig } from '../config/schemas';
export interface ServiceDefinitions {
// Configuration
config: AppConfig;
logger: Logger;
// Core services
cache: CacheProvider | null;
globalCache: CacheProvider | null;
proxyManager: ProxyManager | null;
browser: Browser;
queueManager: SmartQueueManager | null;
// Database clients
mongoClient: MongoDBClient | null;
postgresClient: PostgreSQLClient | null;
questdbClient: QuestDBClient | null;
// Aggregate service container
serviceContainer: IServiceContainer;
}
export type ServiceCradle = ServiceDefinitions;
export interface ServiceContainerOptions {
enableQuestDB?: boolean;
enableMongoDB?: boolean;
enablePostgres?: boolean;
enableCache?: boolean;
enableQueue?: boolean;
enableBrowser?: boolean;
enableProxy?: boolean;
}
export interface ContainerBuildOptions extends ServiceContainerOptions {
skipInitialization?: boolean;
initializationTimeout?: number;
}
import type { Browser } from '@stock-bot/browser';
import type { CacheProvider } from '@stock-bot/cache';
import type { HandlerRegistry } from '@stock-bot/handler-registry';
import type { Logger } from '@stock-bot/logger';
import type { MongoDBClient } from '@stock-bot/mongodb';
import type { PostgreSQLClient } from '@stock-bot/postgres';
import type { ProxyManager } from '@stock-bot/proxy';
import type { QuestDBClient } from '@stock-bot/questdb';
import type { SmartQueueManager } from '@stock-bot/queue';
import type { IServiceContainer } from '@stock-bot/types';
import type { AppConfig } from '../config/schemas';
import type { HandlerScanner } from '../scanner';
export interface ServiceDefinitions {
// Configuration
config: AppConfig;
logger: Logger;
// Handler infrastructure
handlerRegistry: HandlerRegistry;
handlerScanner: HandlerScanner;
// Core services
cache: CacheProvider | null;
globalCache: CacheProvider | null;
proxyManager: ProxyManager | null;
browser: Browser;
queueManager: SmartQueueManager | null;
// Database clients
mongoClient: MongoDBClient | null;
postgresClient: PostgreSQLClient | null;
questdbClient: QuestDBClient | null;
// Aggregate service container
serviceContainer: IServiceContainer;
}
export type ServiceCradle = ServiceDefinitions;
export interface ServiceContainerOptions {
enableQuestDB?: boolean;
enableMongoDB?: boolean;
enablePostgres?: boolean;
enableCache?: boolean;
enableQueue?: boolean;
enableBrowser?: boolean;
enableProxy?: boolean;
}
export interface ContainerBuildOptions extends ServiceContainerOptions {
skipInitialization?: boolean;
initializationTimeout?: number;
}

View file

@ -3,10 +3,7 @@ import { NamespacedCache, type CacheProvider } from '@stock-bot/cache';
import type { ServiceDefinitions } from '../container/types';
export class CacheFactory {
static createNamespacedCache(
baseCache: CacheProvider,
namespace: string
): NamespacedCache {
static createNamespacedCache(baseCache: CacheProvider, namespace: string): NamespacedCache {
return new NamespacedCache(baseCache, namespace);
}
@ -15,8 +12,10 @@ export class CacheFactory {
serviceName: string
): CacheProvider | null {
const baseCache = container.cradle.cache;
if (!baseCache) {return null;}
if (!baseCache) {
return null;
}
return this.createNamespacedCache(baseCache, serviceName);
}
@ -25,8 +24,10 @@ export class CacheFactory {
handlerName: string
): CacheProvider | null {
const baseCache = container.cradle.cache;
if (!baseCache) {return null;}
if (!baseCache) {
return null;
}
return this.createNamespacedCache(baseCache, `handler:${handlerName}`);
}
@ -35,10 +36,12 @@ export class CacheFactory {
prefix: string
): CacheProvider | null {
const baseCache = container.cradle.cache;
if (!baseCache) {return null;}
if (!baseCache) {
return null;
}
// Remove 'cache:' prefix if already included
const cleanPrefix = prefix.replace(/^cache:/, '');
return this.createNamespacedCache(baseCache, cleanPrefix);
}
}
}

View file

@ -1 +1 @@
export { CacheFactory } from './cache.factory';
export { CacheFactory } from './cache.factory';

View file

@ -33,3 +33,7 @@ export {
type ServiceApplicationConfig,
type ServiceLifecycleHooks,
} from './service-application';
// Handler scanner
export { HandlerScanner } from './scanner';
export type { HandlerScannerOptions } from './scanner';

View file

@ -12,26 +12,34 @@ export function registerCacheServices(
const { createServiceCache } = require('@stock-bot/queue');
// Get standardized service name from config
const serviceName = config.service?.serviceName || config.service?.name || 'unknown';
// Create service-specific cache that uses the service's Redis DB
return createServiceCache(serviceName, {
host: config.redis.host,
port: config.redis.port,
password: config.redis.password,
db: config.redis.db, // This will be overridden by ServiceCache
}, { logger });
return createServiceCache(
serviceName,
{
host: config.redis.host,
port: config.redis.port,
password: config.redis.password,
db: config.redis.db, // This will be overridden by ServiceCache
},
{ logger }
);
}).singleton(),
// Also provide global cache for shared data
globalCache: asFunction(({ logger }) => {
const { createServiceCache } = require('@stock-bot/queue');
const serviceName = config.service?.serviceName || config.service?.name || 'unknown';
return createServiceCache(serviceName, {
host: config.redis.host,
port: config.redis.port,
password: config.redis.password,
}, { global: true, logger });
return createServiceCache(
serviceName,
{
host: config.redis.host,
port: config.redis.port,
password: config.redis.password,
},
{ global: true, logger }
);
}).singleton(),
});
} else {
@ -40,4 +48,4 @@ export function registerCacheServices(
globalCache: asValue(null),
});
}
}
}

View file

@ -11,4 +11,4 @@ export function registerCoreServices(
config: asValue(config),
logger: asValue(getLogger('di-container')),
});
}
}

View file

@ -1,7 +1,7 @@
import { asFunction, asValue, type AwilixContainer } from 'awilix';
import { MongoDBClient } from '@stock-bot/mongodb';
import { PostgreSQLClient } from '@stock-bot/postgres';
import { QuestDBClient } from '@stock-bot/questdb';
import { asFunction, asValue, type AwilixContainer } from 'awilix';
import type { AppConfig } from '../config/schemas';
import type { ServiceDefinitions } from '../container/types';
@ -14,7 +14,9 @@ export function registerDatabaseServices(
container.register({
mongoClient: asFunction(({ logger }) => {
// Parse MongoDB URI to extract components
const uriMatch = config.mongodb.uri.match(/mongodb:\/\/(?:([^:]+):([^@]+)@)?([^:/]+):(\d+)\/([^?]+)(?:\?authSource=(.+))?/);
const uriMatch = config.mongodb.uri.match(
/mongodb:\/\/(?:([^:]+):([^@]+)@)?([^:/]+):(\d+)\/([^?]+)(?:\?authSource=(.+))?/
);
const mongoConfig = {
host: uriMatch?.[3] || 'localhost',
port: parseInt(uriMatch?.[4] || '27017'),
@ -44,9 +46,9 @@ export function registerDatabaseServices(
username: config.postgres.user,
password: String(config.postgres.password), // Ensure password is a string
};
logger.debug('PostgreSQL config:', {
...pgConfig,
logger.debug('PostgreSQL config:', {
...pgConfig,
password: pgConfig.password ? '***' : 'NO_PASSWORD',
});
return new PostgreSQLClient(pgConfig, logger);
@ -79,4 +81,4 @@ export function registerDatabaseServices(
questdbClient: asValue(null),
});
}
}
}

View file

@ -1,4 +1,4 @@
export { registerCoreServices } from './core.registration';
export { registerCacheServices } from './cache.registration';
export { registerDatabaseServices } from './database.registration';
export { registerApplicationServices } from './service.registration';
export { registerCoreServices } from './core.registration';
export { registerCacheServices } from './cache.registration';
export { registerDatabaseServices } from './database.registration';
export { registerApplicationServices } from './service.registration';

View file

@ -44,9 +44,9 @@ export function registerApplicationServices(
enableMetrics: true,
logger,
});
const proxyManager = new ProxyManager(proxyCache, config.proxy, logger);
// Note: Initialization will be handled by the lifecycle manager
return proxyManager;
}).singleton(),
@ -60,7 +60,7 @@ export function registerApplicationServices(
// Queue Manager
if (config.queue?.enabled && config.redis.enabled) {
container.register({
queueManager: asFunction(({ logger }) => {
queueManager: asFunction(({ logger, handlerRegistry }) => {
const { SmartQueueManager } = require('@stock-bot/queue');
const queueConfig = {
serviceName: config.service?.serviceName || config.service?.name || 'unknown',
@ -79,7 +79,7 @@ export function registerApplicationServices(
delayWorkerStart: config.queue!.delayWorkerStart ?? false,
autoDiscoverHandlers: true,
};
return new SmartQueueManager(queueConfig, logger);
return new SmartQueueManager(queueConfig, handlerRegistry, logger);
}).singleton(),
});
} else {
@ -87,4 +87,4 @@ export function registerApplicationServices(
queueManager: asValue(null),
});
}
}
}

View file

@ -0,0 +1,201 @@
/**
* Handler Scanner
* Discovers and registers handlers with the DI container
*/
import { asClass, type AwilixContainer } from 'awilix';
import { glob } from 'glob';
import type {
HandlerConfiguration,
HandlerMetadata,
HandlerRegistry,
} from '@stock-bot/handler-registry';
import { createJobHandler } from '@stock-bot/handlers';
import { getLogger } from '@stock-bot/logger';
import type { ExecutionContext, IHandler } from '@stock-bot/types';
export interface HandlerScannerOptions {
serviceName?: string;
autoRegister?: boolean;
patterns?: string[];
}
export class HandlerScanner {
private logger = getLogger('handler-scanner');
private discoveredHandlers = new Map<string, any>();
constructor(
private registry: HandlerRegistry,
private container: AwilixContainer,
private options: HandlerScannerOptions = {}
) {}
/**
* Scan for handlers matching the given patterns
*/
async scanHandlers(patterns: string[] = this.options.patterns || []): Promise<void> {
this.logger.info('Starting handler scan', { patterns });
for (const pattern of patterns) {
const files = await glob(pattern, { absolute: true });
this.logger.debug(`Found ${files.length} files for pattern: ${pattern}`);
for (const file of files) {
try {
await this.scanFile(file);
} catch (error) {
this.logger.error('Failed to scan file', { file, error });
}
}
}
this.logger.info('Handler scan complete', {
discovered: this.discoveredHandlers.size,
patterns,
});
}
/**
* Scan a single file for handlers
*/
private async scanFile(filePath: string): Promise<void> {
try {
const module = await import(filePath);
this.registerHandlersFromModule(module, filePath);
} catch (error) {
this.logger.error('Failed to import module', { filePath, error });
}
}
/**
* Register handlers found in a module
*/
private registerHandlersFromModule(module: any, filePath: string): void {
for (const [exportName, exported] of Object.entries(module)) {
if (this.isHandler(exported)) {
this.registerHandler(exported, exportName, filePath);
}
}
}
/**
* Check if an exported value is a handler
*/
private isHandler(exported: any): boolean {
if (typeof exported !== 'function') return false;
// Check for handler metadata added by decorators
const hasHandlerName = !!(exported as any).__handlerName;
const hasOperations = Array.isArray((exported as any).__operations);
return hasHandlerName && hasOperations;
}
/**
* Register a handler with the registry and DI container
*/
private registerHandler(HandlerClass: any, exportName: string, filePath: string): void {
const handlerName = HandlerClass.__handlerName;
const operations = HandlerClass.__operations || [];
const schedules = HandlerClass.__schedules || [];
const isDisabled = HandlerClass.__disabled || false;
if (isDisabled) {
this.logger.debug('Skipping disabled handler', { handlerName });
return;
}
// Build metadata
const metadata: HandlerMetadata = {
name: handlerName,
service: this.options.serviceName,
operations: operations.map((op: any) => ({
name: op.name,
method: op.method,
})),
schedules: schedules.map((schedule: any) => ({
operation: schedule.operation,
cronPattern: schedule.cronPattern,
priority: schedule.priority,
immediately: schedule.immediately,
description: schedule.description,
})),
};
// Build configuration with operation handlers
const operationHandlers: Record<string, any> = {};
for (const op of operations) {
operationHandlers[op.name] = createJobHandler(async payload => {
const handler = this.container.resolve<IHandler>(handlerName);
const context: ExecutionContext = {
type: 'queue',
metadata: { source: 'queue', timestamp: Date.now() },
};
return await handler.execute(op.name, payload, context);
});
}
const configuration: HandlerConfiguration = {
name: handlerName,
operations: operationHandlers,
scheduledJobs: schedules.map((schedule: any) => {
const operation = operations.find((op: any) => op.method === schedule.operation);
return {
type: `${handlerName}-${schedule.operation}`,
operation: operation?.name || schedule.operation,
cronPattern: schedule.cronPattern,
priority: schedule.priority || 5,
immediately: schedule.immediately || false,
description: schedule.description || `${handlerName} ${schedule.operation}`,
};
}),
};
// Register with registry
this.registry.register(metadata, configuration);
// Register with DI container if auto-register is enabled
if (this.options.autoRegister !== false) {
this.container.register({
[handlerName]: asClass(HandlerClass).singleton(),
});
}
// Track discovered handler
this.discoveredHandlers.set(handlerName, HandlerClass);
this.logger.info('Registered handler', {
handlerName,
exportName,
filePath,
operations: operations.length,
schedules: schedules.length,
service: this.options.serviceName,
});
}
/**
* Get all discovered handlers
*/
getDiscoveredHandlers(): Map<string, any> {
return new Map(this.discoveredHandlers);
}
/**
* Manually register a handler class
*/
registerHandlerClass(HandlerClass: any, options: { serviceName?: string } = {}): void {
const serviceName = options.serviceName || this.options.serviceName;
const originalServiceName = this.options.serviceName;
// Temporarily override service name if provided
if (serviceName) {
this.options.serviceName = serviceName;
}
this.registerHandler(HandlerClass, HandlerClass.name, 'manual');
// Restore original service name
this.options.serviceName = originalServiceName;
}
}

View file

@ -0,0 +1,2 @@
export { HandlerScanner } from './handler-scanner';
export type { HandlerScannerOptions } from './handler-scanner';

View file

@ -5,12 +5,14 @@
import { Hono } from 'hono';
import { cors } from 'hono/cors';
import { getLogger, setLoggerConfig, shutdownLoggers, type Logger } from '@stock-bot/logger';
import { Shutdown } from '@stock-bot/shutdown';
import type { BaseAppConfig, UnifiedAppConfig } from '@stock-bot/config';
import { toUnifiedConfig } from '@stock-bot/config';
import { getLogger, setLoggerConfig, shutdownLoggers, type Logger } from '@stock-bot/logger';
import { Shutdown } from '@stock-bot/shutdown';
import type { IServiceContainer } from '@stock-bot/types';
import type { ServiceContainer } from './awilix-container';
import type { HandlerRegistry } from '@stock-bot/handler-registry';
import type { ServiceDefinitions } from './container/types';
import type { AwilixContainer } from 'awilix';
/**
* Configuration for ServiceApplication
@ -18,26 +20,26 @@ import type { ServiceContainer } from './awilix-container';
export interface ServiceApplicationConfig {
/** Service name for logging and identification */
serviceName: string;
/** CORS configuration - if not provided, uses permissive defaults */
corsConfig?: Parameters<typeof cors>[0];
/** Whether to enable handler initialization */
enableHandlers?: boolean;
/** Whether to enable scheduled job creation */
enableScheduledJobs?: boolean;
/** Custom shutdown timeout in milliseconds */
shutdownTimeout?: number;
/** Service metadata for info endpoint */
serviceMetadata?: {
version?: string;
description?: string;
endpoints?: Record<string, string>;
};
/** Whether to add a basic info endpoint at root */
addInfoEndpoint?: boolean;
}
@ -48,16 +50,16 @@ export interface ServiceApplicationConfig {
export interface ServiceLifecycleHooks {
/** Called after container is created but before routes */
onContainerReady?: (container: IServiceContainer) => Promise<void> | void;
/** Called after app is created but before routes are mounted */
onAppReady?: (app: Hono, container: IServiceContainer) => Promise<void> | void;
/** Called after routes are mounted but before server starts */
onBeforeStart?: (app: Hono, container: IServiceContainer) => Promise<void> | void;
/** Called after successful server startup */
onStarted?: (port: number) => Promise<void> | void;
/** Called during shutdown before cleanup */
onBeforeShutdown?: () => Promise<void> | void;
}
@ -70,13 +72,13 @@ export class ServiceApplication {
private serviceConfig: ServiceApplicationConfig;
private hooks: ServiceLifecycleHooks;
private logger: Logger;
private container: ServiceContainer | null = null;
private container: AwilixContainer<ServiceDefinitions> | null = null;
private serviceContainer: IServiceContainer | null = null;
private app: Hono | null = null;
private server: ReturnType<typeof Bun.serve> | null = null;
private shutdown: Shutdown;
constructor(
config: BaseAppConfig | UnifiedAppConfig,
serviceConfig: ServiceApplicationConfig,
@ -84,12 +86,12 @@ export class ServiceApplication {
) {
// Convert to unified config
this.config = toUnifiedConfig(config);
// Ensure service name is set in config
if (!this.config.service.serviceName) {
this.config.service.serviceName = serviceConfig.serviceName;
}
this.serviceConfig = {
shutdownTimeout: 15000,
enableHandlers: false,
@ -98,17 +100,17 @@ export class ServiceApplication {
...serviceConfig,
};
this.hooks = hooks;
// Initialize logger configuration
this.configureLogger();
this.logger = getLogger(this.serviceConfig.serviceName);
// Initialize shutdown manager
this.shutdown = Shutdown.getInstance({
timeout: this.serviceConfig.shutdownTimeout
this.shutdown = Shutdown.getInstance({
timeout: this.serviceConfig.shutdownTimeout,
});
}
/**
* Configure logger based on application config
*/
@ -123,13 +125,13 @@ export class ServiceApplication {
});
}
}
/**
* Create and configure Hono application with CORS
*/
private createApp(): Hono {
const app = new Hono();
// Add CORS middleware with service-specific or default configuration
const corsConfig = this.serviceConfig.corsConfig || {
origin: '*',
@ -137,9 +139,9 @@ export class ServiceApplication {
allowHeaders: ['Content-Type', 'Authorization'],
credentials: false,
};
app.use('*', cors(corsConfig));
// Add basic info endpoint if enabled
if (this.serviceConfig.addInfoEndpoint) {
const metadata = this.serviceConfig.serviceMetadata || {};
@ -154,10 +156,10 @@ export class ServiceApplication {
});
});
}
return app;
}
/**
* Register graceful shutdown handlers
*/
@ -177,7 +179,7 @@ export class ServiceApplication {
}
}, 'Queue System');
}
// Priority 1: HTTP Server (high priority)
this.shutdown.onShutdownHigh(async () => {
if (this.server) {
@ -190,7 +192,7 @@ export class ServiceApplication {
}
}
}, 'HTTP Server');
// Custom shutdown hook
if (this.hooks.onBeforeShutdown) {
this.shutdown.onShutdownHigh(async () => {
@ -201,7 +203,7 @@ export class ServiceApplication {
}
}, 'Custom Shutdown');
}
// Priority 2: Services and connections (medium priority)
this.shutdown.onShutdownMedium(async () => {
this.logger.info('Disposing services and connections...');
@ -212,24 +214,24 @@ export class ServiceApplication {
if (mongoClient?.disconnect) {
await mongoClient.disconnect();
}
const postgresClient = this.container.resolve('postgresClient');
if (postgresClient?.disconnect) {
await postgresClient.disconnect();
}
const questdbClient = this.container.resolve('questdbClient');
if (questdbClient?.disconnect) {
await questdbClient.disconnect();
}
this.logger.info('All services disposed successfully');
}
} catch (error) {
this.logger.error('Error disposing services', { error });
}
}, 'Services');
// Priority 3: Logger shutdown (lowest priority - runs last)
this.shutdown.onShutdownLow(async () => {
try {
@ -241,62 +243,62 @@ export class ServiceApplication {
}
}, 'Loggers');
}
/**
* Start the service with full initialization
*/
async start(
containerFactory: (config: UnifiedAppConfig) => Promise<ServiceContainer>,
containerFactory: (config: UnifiedAppConfig) => Promise<AwilixContainer<ServiceDefinitions>>,
routeFactory: (container: IServiceContainer) => Hono,
handlerInitializer?: (container: IServiceContainer) => Promise<void>
): Promise<void> {
this.logger.info(`Initializing ${this.serviceConfig.serviceName} service...`);
try {
// Create and initialize container
this.logger.debug('Creating DI container...');
// Config already has service name from constructor
this.container = await containerFactory(this.config);
this.serviceContainer = this.container.resolve('serviceContainer');
this.serviceContainer = this.container!.resolve('serviceContainer');
this.logger.info('DI container created and initialized');
// Call container ready hook
if (this.hooks.onContainerReady) {
await this.hooks.onContainerReady(this.serviceContainer);
}
// Create Hono application
this.app = this.createApp();
// Call app ready hook
if (this.hooks.onAppReady) {
await this.hooks.onAppReady(this.app, this.serviceContainer);
}
// Initialize handlers if enabled
if (this.serviceConfig.enableHandlers && handlerInitializer) {
this.logger.debug('Initializing handlers...');
await handlerInitializer(this.serviceContainer);
this.logger.info('Handlers initialized');
}
// Create and mount routes
const routes = routeFactory(this.serviceContainer);
this.app.route('/', routes);
// Initialize scheduled jobs if enabled
if (this.serviceConfig.enableScheduledJobs) {
await this.initializeScheduledJobs();
}
// Call before start hook
if (this.hooks.onBeforeStart) {
await this.hooks.onBeforeStart(this.app, this.serviceContainer);
}
// Register shutdown handlers
this.registerShutdownHandlers();
// Start HTTP server
const port = this.config.service.port;
this.server = Bun.serve({
@ -304,14 +306,13 @@ export class ServiceApplication {
fetch: this.app.fetch,
development: this.config.environment === 'development',
});
this.logger.info(`${this.serviceConfig.serviceName} service started on port ${port}`);
// Call started hook
if (this.hooks.onStarted) {
await this.hooks.onStarted(port);
}
} catch (error) {
this.logger.error('DETAILED ERROR:', error);
this.logger.error('Failed to start service', {
@ -322,7 +323,7 @@ export class ServiceApplication {
throw error;
}
}
/**
* Initialize scheduled jobs from handler registry
*/
@ -330,17 +331,17 @@ export class ServiceApplication {
if (!this.container) {
throw new Error('Container not initialized');
}
this.logger.debug('Creating scheduled jobs from registered handlers...');
const { handlerRegistry } = await import('@stock-bot/handlers');
const handlerRegistry = this.container.resolve<HandlerRegistry>('handlerRegistry');
const allHandlers = handlerRegistry.getAllHandlersWithSchedule();
let totalScheduledJobs = 0;
for (const [handlerName, config] of allHandlers) {
if (config.scheduledJobs && config.scheduledJobs.length > 0) {
// Check if this handler belongs to the current service
const ownerService = handlerRegistry.getHandlerService(handlerName);
if (ownerService !== this.config.service.serviceName) {
this.logger.trace('Skipping scheduled jobs for handler from different service', {
handler: handlerName,
@ -349,14 +350,14 @@ export class ServiceApplication {
});
continue;
}
const queueManager = this.container.resolve('queueManager');
if (!queueManager) {
this.logger.error('Queue manager is not initialized, cannot create scheduled jobs');
continue;
}
const queue = queueManager.getQueue(handlerName);
for (const scheduledJob of config.scheduledJobs) {
// Include handler and operation info in job data
const jobData = {
@ -364,7 +365,7 @@ export class ServiceApplication {
operation: scheduledJob.operation,
payload: scheduledJob.payload,
};
// Build job options from scheduled job config
const jobOptions = {
priority: scheduledJob.priority,
@ -373,7 +374,7 @@ export class ServiceApplication {
immediately: scheduledJob.immediately,
},
};
await queue.addScheduledJob(
scheduledJob.operation,
jobData,
@ -392,7 +393,7 @@ export class ServiceApplication {
}
}
this.logger.info('Scheduled jobs created', { totalJobs: totalScheduledJobs });
// Start queue workers
this.logger.debug('Starting queue workers...');
const queueManager = this.container.resolve('queueManager');
@ -401,7 +402,7 @@ export class ServiceApplication {
this.logger.info('Queue workers started');
}
}
/**
* Stop the service gracefully
*/
@ -409,18 +410,18 @@ export class ServiceApplication {
this.logger.info(`Stopping ${this.serviceConfig.serviceName} service...`);
await this.shutdown.shutdown();
}
/**
* Get the service container (for testing or advanced use cases)
*/
getServiceContainer(): IServiceContainer | null {
return this.serviceContainer;
}
/**
* Get the Hono app (for testing or advanced use cases)
*/
getApp(): Hono | null {
return this.app;
}
}
}

View file

@ -1,6 +1,6 @@
import type { AwilixContainer } from 'awilix';
import type { ServiceDefinitions } from '../container/types';
import { getLogger } from '@stock-bot/logger';
import type { ServiceDefinitions } from '../container/types';
interface ServiceWithLifecycle {
connect?: () => Promise<void>;
@ -29,13 +29,16 @@ export class ServiceLifecycleManager {
for (const { name, key } of this.services) {
const service = container.cradle[key] as ServiceWithLifecycle | null;
if (service) {
const initPromise = this.initializeService(name, service);
initPromises.push(
Promise.race([
initPromise,
this.createTimeoutPromise(timeout, `${name} initialization timed out after ${timeout}ms`),
this.createTimeoutPromise(
timeout,
`${name} initialization timed out after ${timeout}ms`
),
])
);
}
@ -51,7 +54,7 @@ export class ServiceLifecycleManager {
// Shutdown in reverse order
for (const { name, key } of [...this.services].reverse()) {
const service = container.cradle[key] as ServiceWithLifecycle | null;
if (service) {
shutdownPromises.push(this.shutdownService(name, service));
}

View file

@ -0,0 +1,27 @@
{
"name": "@stock-bot/handler-registry",
"version": "1.0.0",
"type": "module",
"main": "./dist/index.js",
"types": "./dist/index.d.ts",
"exports": {
".": {
"import": "./dist/index.js",
"types": "./dist/index.d.ts"
}
},
"scripts": {
"build": "bun run build:clean && bun run build:tsc",
"build:clean": "rm -rf dist",
"build:tsc": "tsc",
"test": "bun test",
"clean": "rm -rf dist node_modules .turbo"
},
"dependencies": {
"@stock-bot/types": "workspace:*"
},
"devDependencies": {
"@types/bun": "*",
"typescript": "*"
}
}

View file

@ -0,0 +1,14 @@
/**
* Handler Registry Package
* Provides centralized handler registration without circular dependencies
*/
export { HandlerRegistry } from './registry';
export type {
HandlerMetadata,
OperationMetadata,
ScheduleMetadata,
HandlerConfiguration,
RegistryStats,
HandlerDiscoveryResult,
} from './types';

View file

@ -0,0 +1,226 @@
/**
* Handler Registry Implementation
* Manages handler metadata and configuration without circular dependencies
*/
import type { JobHandler, ScheduledJob } from '@stock-bot/types';
import type {
HandlerConfiguration,
HandlerMetadata,
OperationMetadata,
RegistryStats,
ScheduleMetadata,
} from './types';
export class HandlerRegistry {
private handlers = new Map<string, HandlerMetadata>();
private configurations = new Map<string, HandlerConfiguration>();
private handlerServices = new Map<string, string>();
/**
* Register handler metadata
*/
registerMetadata(metadata: HandlerMetadata): void {
this.handlers.set(metadata.name, metadata);
if (metadata.service) {
this.handlerServices.set(metadata.name, metadata.service);
}
}
/**
* Register handler configuration with operation implementations
*/
registerConfiguration(config: HandlerConfiguration): void {
this.configurations.set(config.name, config);
}
/**
* Register both metadata and configuration
*/
register(metadata: HandlerMetadata, config: HandlerConfiguration): void {
this.registerMetadata(metadata);
this.registerConfiguration(config);
}
/**
* Get handler metadata
*/
getMetadata(handlerName: string): HandlerMetadata | undefined {
return this.handlers.get(handlerName);
}
/**
* Get handler configuration
*/
getConfiguration(handlerName: string): HandlerConfiguration | undefined {
return this.configurations.get(handlerName);
}
/**
* Get a specific operation handler
*/
getOperation(handlerName: string, operationName: string): JobHandler | undefined {
const config = this.configurations.get(handlerName);
return config?.operations[operationName];
}
/**
* Get all handler metadata
*/
getAllMetadata(): Map<string, HandlerMetadata> {
return new Map(this.handlers);
}
/**
* Get all handler names
*/
getHandlerNames(): string[] {
return Array.from(this.handlers.keys());
}
/**
* Check if a handler is registered
*/
hasHandler(handlerName: string): boolean {
return this.handlers.has(handlerName);
}
/**
* Get handlers for a specific service
*/
getServiceHandlers(serviceName: string): HandlerMetadata[] {
const handlers: HandlerMetadata[] = [];
for (const [handlerName, service] of this.handlerServices) {
if (service === serviceName) {
const metadata = this.handlers.get(handlerName);
if (metadata) {
handlers.push(metadata);
}
}
}
return handlers;
}
/**
* Set service ownership for a handler
*/
setHandlerService(handlerName: string, serviceName: string): void {
this.handlerServices.set(handlerName, serviceName);
// Update metadata if it exists
const metadata = this.handlers.get(handlerName);
if (metadata) {
metadata.service = serviceName;
}
}
/**
* Get the service that owns a handler
*/
getHandlerService(handlerName: string): string | undefined {
return this.handlerServices.get(handlerName);
}
/**
* Get scheduled jobs for a handler
*/
getScheduledJobs(handlerName: string): ScheduledJob[] {
const config = this.configurations.get(handlerName);
return config?.scheduledJobs || [];
}
/**
* Get all handlers with their scheduled jobs
*/
getAllHandlersWithSchedule(): Map<
string,
{ metadata: HandlerMetadata; scheduledJobs: ScheduledJob[] }
> {
const result = new Map<string, { metadata: HandlerMetadata; scheduledJobs: ScheduledJob[] }>();
for (const [name, metadata] of this.handlers) {
const config = this.configurations.get(name);
result.set(name, {
metadata,
scheduledJobs: config?.scheduledJobs || [],
});
}
return result;
}
/**
* Get registry statistics
*/
getStats(): RegistryStats {
let operationCount = 0;
let scheduledJobCount = 0;
const services = new Set<string>();
for (const metadata of this.handlers.values()) {
operationCount += metadata.operations.length;
scheduledJobCount += metadata.schedules?.length || 0;
if (metadata.service) {
services.add(metadata.service);
}
}
return {
handlers: this.handlers.size,
operations: operationCount,
scheduledJobs: scheduledJobCount,
services: services.size,
};
}
/**
* Clear all registrations (useful for testing)
*/
clear(): void {
this.handlers.clear();
this.configurations.clear();
this.handlerServices.clear();
}
/**
* Export registry data for debugging or persistence
*/
export(): {
handlers: Array<[string, HandlerMetadata]>;
configurations: Array<[string, HandlerConfiguration]>;
services: Array<[string, string]>;
} {
return {
handlers: Array.from(this.handlers.entries()),
configurations: Array.from(this.configurations.entries()),
services: Array.from(this.handlerServices.entries()),
};
}
/**
* Import registry data
*/
import(data: {
handlers: Array<[string, HandlerMetadata]>;
configurations: Array<[string, HandlerConfiguration]>;
services: Array<[string, string]>;
}): void {
this.clear();
for (const [name, metadata] of data.handlers) {
this.handlers.set(name, metadata);
}
for (const [name, config] of data.configurations) {
this.configurations.set(name, config);
}
for (const [handler, service] of data.services) {
this.handlerServices.set(handler, service);
}
}
}

View file

@ -0,0 +1,66 @@
/**
* Handler Registry Types
* Pure types for handler metadata and registration
*/
import type { JobHandler, ScheduledJob } from '@stock-bot/types';
/**
* Metadata for a single operation within a handler
*/
export interface OperationMetadata {
name: string;
method: string;
description?: string;
}
/**
* Metadata for a scheduled operation
*/
export interface ScheduleMetadata {
operation: string;
cronPattern: string;
priority?: number;
immediately?: boolean;
description?: string;
}
/**
* Complete metadata for a handler
*/
export interface HandlerMetadata {
name: string;
service?: string;
operations: OperationMetadata[];
schedules?: ScheduleMetadata[];
version?: string;
description?: string;
}
/**
* Handler configuration with operation implementations
*/
export interface HandlerConfiguration {
name: string;
operations: Record<string, JobHandler>;
scheduledJobs?: ScheduledJob[];
}
/**
* Registry statistics
*/
export interface RegistryStats {
handlers: number;
operations: number;
scheduledJobs: number;
services: number;
}
/**
* Handler discovery result
*/
export interface HandlerDiscoveryResult {
handler: HandlerMetadata;
constructor: any;
filePath?: string;
}

View file

@ -0,0 +1,9 @@
{
"extends": "../../../tsconfig.lib.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src"
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "test/**/*", "**/*.test.ts", "**/*.spec.ts"]
}

View file

@ -10,10 +10,11 @@
"test": "bun test"
},
"dependencies": {
"@stock-bot/cache": "workspace:*",
"@stock-bot/config": "workspace:*",
"@stock-bot/handler-registry": "workspace:*",
"@stock-bot/logger": "workspace:*",
"@stock-bot/types": "workspace:*",
"@stock-bot/cache": "workspace:*",
"@stock-bot/utils": "workspace:*",
"mongodb": "^6.12.0"
},

View file

@ -1,14 +1,17 @@
import type { Collection } from 'mongodb';
import { createNamespacedCache } from '@stock-bot/cache';
import { getLogger } from '@stock-bot/logger';
import type {
HandlerConfigWithSchedule,
IServiceContainer,
import type {
ExecutionContext,
IHandler
HandlerConfigWithSchedule,
HandlerMetadata,
IHandler,
IServiceContainer,
JobHandler,
ServiceTypes,
} from '@stock-bot/types';
import { fetch } from '@stock-bot/utils';
import { createNamespacedCache } from '@stock-bot/cache';
import { handlerRegistry } from '../registry/handler-registry';
// Handler registry is now injected, not imported
import { createJobHandler } from '../utils/create-job-handler';
/**
@ -38,16 +41,16 @@ export interface JobScheduleOptions {
* Provides common functionality and structure for queue/event operations
*/
export abstract class BaseHandler implements IHandler {
// Direct service properties - flattened for cleaner access
readonly logger;
readonly cache;
readonly globalCache;
readonly queue;
readonly proxy;
readonly browser;
readonly mongodb;
readonly postgres;
readonly questdb;
// Direct service properties - flattened for cleaner access with proper types
readonly logger: ServiceTypes['logger'];
readonly cache: ServiceTypes['cache'];
readonly globalCache: ServiceTypes['globalCache'];
readonly queue: ServiceTypes['queue'];
readonly proxy: ServiceTypes['proxy'];
readonly browser: ServiceTypes['browser'];
readonly mongodb: ServiceTypes['mongodb'];
readonly postgres: ServiceTypes['postgres'];
readonly questdb: ServiceTypes['questdb'];
private handlerName: string;
@ -109,8 +112,8 @@ export abstract class BaseHandler implements IHandler {
}
async scheduleOperation(
operation: string,
payload: unknown,
operation: string,
payload: unknown,
options?: JobScheduleOptions
): Promise<void> {
if (!this.queue) {
@ -122,7 +125,7 @@ export abstract class BaseHandler implements IHandler {
operation,
payload,
};
await queue.add(operation, jobData, options || {});
}
@ -162,7 +165,7 @@ export abstract class BaseHandler implements IHandler {
* Example: handler 'webshare' creates namespace 'webshare:api' -> keys will be 'cache:data-ingestion:webshare:api:*'
*/
protected createNamespacedCache(subNamespace: string) {
return createNamespacedCache(this.cache, `${this.handlerName}:${subNamespace}`);
return createNamespacedCache(this.cache || null, `${this.handlerName}:${subNamespace}`);
}
/**
@ -197,36 +200,36 @@ export abstract class BaseHandler implements IHandler {
// Don't add 'cache:' prefix since the cache already has its own prefix
return this.cache.del(`${this.handlerName}:${key}`);
}
/**
* Set global cache with key
*/
protected async globalCacheSet(key: string, value: any, ttl?: number): Promise<void> {
if (!this.globalCache) {
return;
}
return this.globalCache.set(key, value, ttl);
/**
* Set global cache with key
*/
protected async globalCacheSet(key: string, value: any, ttl?: number): Promise<void> {
if (!this.globalCache) {
return;
}
/**
* Get global cache with key
*/
protected async globalCacheGet<T = any>(key: string): Promise<T | null> {
if (!this.globalCache) {
return null;
}
return this.globalCache.get(key);
return this.globalCache.set(key, value, ttl);
}
/**
* Get global cache with key
*/
protected async globalCacheGet<T = any>(key: string): Promise<T | null> {
if (!this.globalCache) {
return null;
}
/**
* Delete global cache with key
*/
protected async globalCacheDel(key: string): Promise<void> {
if (!this.globalCache) {
return;
}
return this.globalCache.del(key);
return this.globalCache.get(key);
}
/**
* Delete global cache with key
*/
protected async globalCacheDel(key: string): Promise<void> {
if (!this.globalCache) {
return;
}
return this.globalCache.del(key);
}
/**
* Schedule operation with delay in seconds
*/
@ -238,7 +241,7 @@ export abstract class BaseHandler implements IHandler {
): Promise<void> {
return this.scheduleOperation(operation, payload, {
delay: delaySeconds * 1000,
...additionalOptions
...additionalOptions,
});
}
@ -294,27 +297,45 @@ export abstract class BaseHandler implements IHandler {
// }
/**
* Register this handler using decorator metadata
* Automatically reads @Handler, @Operation, and @QueueSchedule decorators
* Create handler configuration with job handlers
* This is used by the scanner to create the actual handler configuration
*/
register(serviceName?: string): void {
const constructor = this.constructor as any;
const handlerName = constructor.__handlerName || this.handlerName;
const operations = constructor.__operations || [];
const schedules = constructor.__schedules || [];
createHandlerConfig(): HandlerConfigWithSchedule {
const metadata = (this.constructor as typeof BaseHandler).extractMetadata();
if (!metadata) {
throw new Error('Handler metadata not found');
}
// Create operation handlers from decorator metadata
const operationHandlers: Record<string, any> = {};
for (const op of operations) {
operationHandlers[op.name] = createJobHandler(async payload => {
const operationHandlers: Record<string, JobHandler> = {};
for (const opName of metadata.operations) {
operationHandlers[opName] = createJobHandler(async (payload: any) => {
const context: ExecutionContext = {
type: 'queue',
metadata: { source: 'queue', timestamp: Date.now() },
};
return await this.execute(op.name, payload, context);
return await this.execute(opName, payload, context);
});
}
return {
name: metadata.name,
operations: operationHandlers,
scheduledJobs: metadata.scheduledJobs,
};
}
/**
* Extract handler metadata from decorators
* This returns metadata only - actual handler instances are created by the scanner
*/
static extractMetadata(): HandlerMetadata | null {
const constructor = this as any;
const handlerName = constructor.__handlerName;
if (!handlerName) return null;
const operations = constructor.__operations || [];
const schedules = constructor.__schedules || [];
// Create scheduled jobs from decorator metadata
const scheduledJobs = schedules.map((schedule: any) => {
// Find the operation name from the method name
@ -326,27 +347,15 @@ export abstract class BaseHandler implements IHandler {
priority: schedule.priority || 5,
immediately: schedule.immediately || false,
description: schedule.description || `${handlerName} ${schedule.operation}`,
payload: this.getScheduledJobPayload?.(schedule.operation),
};
});
const config: HandlerConfigWithSchedule = {
return {
name: handlerName,
operations: operationHandlers,
operations: operations.map((op: any) => op.name),
scheduledJobs,
description: constructor.__description,
};
handlerRegistry.registerWithSchedule(config, serviceName);
this.logger.info('Handler registered using decorator metadata', {
handlerName,
service: serviceName,
operations: operations.map((op: any) => ({ name: op.name, method: op.method })),
scheduledJobs: scheduledJobs.map((job: any) => ({
operation: job.operation,
cronPattern: job.cronPattern,
immediately: job.immediately,
})),
});
}
/**

View file

@ -2,8 +2,7 @@
export { BaseHandler, ScheduledHandler } from './base/BaseHandler';
export type { JobScheduleOptions } from './base/BaseHandler';
// Handler registry
export { handlerRegistry } from './registry/handler-registry';
// Handler registry is now in a separate package
// Utilities
export { createJobHandler } from './utils/create-job-handler';

View file

@ -6,8 +6,8 @@
import { readdirSync, statSync } from 'fs';
import { join, relative } from 'path';
import { getLogger } from '@stock-bot/logger';
import { BaseHandler } from '../base/BaseHandler';
import type { IServiceContainer } from '@stock-bot/types';
import { BaseHandler } from '../base/BaseHandler';
const logger = getLogger('handler-auto-register');
@ -123,14 +123,13 @@ export async function autoRegisterHandlers(
} else {
logger.info(`Registering handler: ${handlerName} from ${relativePath}`);
// Create instance and register
// Create instance - handlers now auto-register via decorators
const handler = new HandlerClass(services);
handler.register(serviceName);
// No need to set service ownership separately - it's done in register()
registered.push(handlerName);
logger.info(`Successfully registered handler: ${handlerName}`, { service: serviceName });
logger.info(`Successfully registered handler: ${handlerName}`, {
service: serviceName,
});
}
}
} catch (error) {

View file

@ -1,16 +1,16 @@
/**
* Utility for creating typed job handlers
*/
import type { JobHandler, TypedJobHandler } from '@stock-bot/types';
/**
* Create a typed job handler with validation
*/
export function createJobHandler<TPayload = unknown, TResult = unknown>(
handler: TypedJobHandler<TPayload, TResult>
): JobHandler<unknown, TResult> {
return async (payload: unknown): Promise<TResult> => {
return handler(payload as TPayload);
};
}
/**
* Utility for creating typed job handlers
*/
import type { JobHandler, TypedJobHandler } from '@stock-bot/types';
/**
* Create a typed job handler with validation
*/
export function createJobHandler<TPayload = unknown, TResult = unknown>(
handler: TypedJobHandler<TPayload, TResult>
): JobHandler<unknown, TResult> {
return async (payload: unknown): Promise<TResult> => {
return handler(payload as TPayload);
};
}

View file

@ -14,9 +14,9 @@
"ioredis": "^5.3.0",
"rate-limiter-flexible": "^3.0.0",
"@stock-bot/cache": "*",
"@stock-bot/handler-registry": "*",
"@stock-bot/logger": "*",
"@stock-bot/types": "*",
"@stock-bot/handlers": "*"
"@stock-bot/types": "*"
},
"devDependencies": {
"typescript": "^5.3.0",

View file

@ -171,7 +171,11 @@ async function processBatched<T>(
/**
* Process a batch job - loads items and creates individual jobs
*/
export async function processBatchJob(jobData: BatchJobData, queueName: string, queueManager: QueueManager): Promise<unknown> {
export async function processBatchJob(
jobData: BatchJobData,
queueName: string,
queueManager: QueueManager
): Promise<unknown> {
const queue = queueManager.getQueue(queueName);
const logger = queue.createChildLogger('batch-job', {
queueName,
@ -304,7 +308,11 @@ async function loadPayload<T>(
} | null;
}
async function cleanupPayload(key: string, queueName: string, queueManager: QueueManager): Promise<void> {
async function cleanupPayload(
key: string,
queueName: string,
queueManager: QueueManager
): Promise<void> {
const cache = queueManager.getCache(queueName);
await cache.del(key);
}

View file

@ -4,15 +4,15 @@ export { QueueManager } from './queue-manager';
export { SmartQueueManager } from './smart-queue-manager';
export { ServiceCache, createServiceCache } from './service-cache';
// Service utilities
export {
export {
normalizeServiceName,
generateCachePrefix,
getFullQueueName,
parseQueueName
parseQueueName,
} from './service-utils';
// Re-export handler registry and utilities from handlers package
export { handlerRegistry, createJobHandler } from '@stock-bot/handlers';
// Re-export utilities from handlers package
export { createJobHandler } from '@stock-bot/handlers';
// Batch processing
export { processBatchJob, processItems } from './batch-processor';
@ -64,10 +64,8 @@ export type {
// Scheduled job types
ScheduledJob,
ScheduleConfig,
// Smart Queue types
SmartQueueConfig,
QueueRoute,
} from './types';

View file

@ -76,8 +76,9 @@ export class QueueManager {
// Prepare queue configuration
const workers = mergedOptions.workers ?? this.config.defaultQueueOptions?.workers ?? 1;
const concurrency = mergedOptions.concurrency ?? this.config.defaultQueueOptions?.concurrency ?? 1;
const concurrency =
mergedOptions.concurrency ?? this.config.defaultQueueOptions?.concurrency ?? 1;
const queueConfig: QueueWorkerConfig = {
workers,
concurrency,
@ -180,7 +181,6 @@ export class QueueManager {
return this.queues;
}
/**
* Get statistics for all queues
*/
@ -449,4 +449,4 @@ export class QueueManager {
getConfig(): Readonly<QueueManagerConfig> {
return { ...this.config };
}
}
}

View file

@ -1,6 +1,7 @@
import { Queue as BullQueue, QueueEvents, Worker, type Job } from 'bullmq';
import { handlerRegistry } from '@stock-bot/handlers';
import type { JobData, JobOptions, ExtendedJobOptions, QueueStats, RedisConfig } from './types';
// Handler registry will be injected
import type { HandlerRegistry } from '@stock-bot/handler-registry';
import type { ExtendedJobOptions, JobData, JobOptions, QueueStats, RedisConfig } from './types';
import { getRedisConnection } from './utils';
// Logger interface for type safety
@ -17,6 +18,7 @@ export interface QueueWorkerConfig {
workers?: number;
concurrency?: number;
startWorker?: boolean;
handlerRegistry?: HandlerRegistry;
}
/**
@ -30,6 +32,7 @@ export class Queue {
private queueName: string;
private redisConfig: RedisConfig;
private readonly logger: Logger;
private readonly handlerRegistry?: HandlerRegistry;
constructor(
queueName: string,
@ -41,6 +44,7 @@ export class Queue {
this.queueName = queueName;
this.redisConfig = redisConfig;
this.logger = logger || console;
this.handlerRegistry = config.handlerRegistry;
const connection = getRedisConnection(redisConfig);
@ -338,7 +342,10 @@ export class Queue {
try {
// Look up handler in registry
const jobHandler = handlerRegistry.getOperation(handler, operation);
if (!this.handlerRegistry) {
throw new Error('Handler registry not configured for worker processing');
}
const jobHandler = this.handlerRegistry.getOperation(handler, operation);
if (!jobHandler) {
throw new Error(`No handler found for ${handler}:${operation}`);
@ -390,5 +397,4 @@ export class Queue {
getWorkerCount(): number {
return this.workers.length;
}
}

View file

@ -271,7 +271,12 @@ export class QueueRateLimiter {
limit,
};
} catch (error) {
this.logger.error('Failed to get rate limit status', { queueName, handler, operation, error });
this.logger.error('Failed to get rate limit status', {
queueName,
handler,
operation,
error,
});
return {
queueName,
handler,

View file

@ -1,6 +1,6 @@
import { createCache, type CacheProvider, type CacheStats } from '@stock-bot/cache';
import type { RedisConfig } from './types';
import { generateCachePrefix } from './service-utils';
import type { RedisConfig } from './types';
/**
* Service-aware cache that uses the service's Redis DB
@ -132,7 +132,11 @@ export class ServiceCache implements CacheProvider {
return this.cache.set(key, value, ttl);
}
async updateField<T = any>(key: string, updater: (current: T | null) => T, ttl?: number): Promise<T | null> {
async updateField<T = any>(
key: string,
updater: (current: T | null) => T,
ttl?: number
): Promise<T | null> {
if (this.cache.updateField) {
return this.cache.updateField(key, updater, ttl);
}
@ -162,7 +166,6 @@ export class ServiceCache implements CacheProvider {
}
}
/**
* Factory function to create service cache
*/
@ -172,4 +175,4 @@ export function createServiceCache(
options: { global?: boolean; logger?: any } = {}
): ServiceCache {
return new ServiceCache(serviceName, redisConfig, options.global, options.logger);
}
}

View file

@ -1,53 +1,51 @@
/**
* Service utilities for name normalization and auto-discovery
*/
/**
* Normalize service name to kebab-case format
* Examples:
* - webApi -> web-api
* - dataIngestion -> data-ingestion
* - data-pipeline -> data-pipeline (unchanged)
*/
export function normalizeServiceName(serviceName: string): string {
// Handle camelCase to kebab-case conversion
const kebabCase = serviceName
.replace(/([a-z])([A-Z])/g, '$1-$2')
.toLowerCase();
return kebabCase;
}
/**
* Generate cache prefix for a service
*/
export function generateCachePrefix(serviceName: string): string {
const normalized = normalizeServiceName(serviceName);
return `cache:${normalized}`;
}
/**
* Generate full queue name with service namespace
*/
export function getFullQueueName(serviceName: string, handlerName: string): string {
const normalized = normalizeServiceName(serviceName);
// Use {service_handler} format for Dragonfly optimization and BullMQ compatibility
return `{${normalized}_${handlerName}}`;
}
/**
* Parse a full queue name into service and handler
*/
export function parseQueueName(fullQueueName: string): { service: string; handler: string } | null {
// Match pattern {service_handler}
const match = fullQueueName.match(/^\{([^_]+)_([^}]+)\}$/);
if (!match || !match[1] || !match[2]) {
return null;
}
return {
service: match[1],
handler: match[2],
};
}
/**
* Service utilities for name normalization and auto-discovery
*/
/**
* Normalize service name to kebab-case format
* Examples:
* - webApi -> web-api
* - dataIngestion -> data-ingestion
* - data-pipeline -> data-pipeline (unchanged)
*/
export function normalizeServiceName(serviceName: string): string {
// Handle camelCase to kebab-case conversion
const kebabCase = serviceName.replace(/([a-z])([A-Z])/g, '$1-$2').toLowerCase();
return kebabCase;
}
/**
* Generate cache prefix for a service
*/
export function generateCachePrefix(serviceName: string): string {
const normalized = normalizeServiceName(serviceName);
return `cache:${normalized}`;
}
/**
* Generate full queue name with service namespace
*/
export function getFullQueueName(serviceName: string, handlerName: string): string {
const normalized = normalizeServiceName(serviceName);
// Use {service_handler} format for Dragonfly optimization and BullMQ compatibility
return `{${normalized}_${handlerName}}`;
}
/**
* Parse a full queue name into service and handler
*/
export function parseQueueName(fullQueueName: string): { service: string; handler: string } | null {
// Match pattern {service_handler}
const match = fullQueueName.match(/^\{([^_]+)_([^}]+)\}$/);
if (!match || !match[1] || !match[2]) {
return null;
}
return {
service: match[1],
handler: match[2],
};
}

View file

@ -1,16 +1,10 @@
import { Queue as BullQueue, type Job } from 'bullmq';
import { handlerRegistry } from '@stock-bot/handlers';
import type { HandlerRegistry } from '@stock-bot/handler-registry';
import { getLogger, type Logger } from '@stock-bot/logger';
import { QueueManager } from './queue-manager';
import { Queue } from './queue';
import type {
SmartQueueConfig,
QueueRoute,
JobData,
JobOptions,
RedisConfig
} from './types';
import { QueueManager } from './queue-manager';
import { getFullQueueName, parseQueueName } from './service-utils';
import type { JobData, JobOptions, QueueRoute, RedisConfig, SmartQueueConfig } from './types';
import { getRedisConnection } from './utils';
/**
@ -23,30 +17,33 @@ export class SmartQueueManager extends QueueManager {
private connections = new Map<number, any>(); // Redis connections by DB
private producerQueues = new Map<string, BullQueue>(); // For cross-service sending
private _logger: Logger;
private handlerRegistry?: HandlerRegistry;
constructor(config: SmartQueueConfig, logger?: Logger) {
constructor(config: SmartQueueConfig, handlerRegistry?: HandlerRegistry, logger?: Logger) {
// Always use DB 0 for queues (unified queue database)
const modifiedConfig = {
...config,
redis: {
...config.redis,
db: 0, // All queues in DB 0
db: 0, // All queues in DB 0
},
};
super(modifiedConfig, logger);
this.serviceName = config.serviceName;
this.handlerRegistry = handlerRegistry;
this._logger = logger || getLogger('SmartQueueManager');
// Auto-discover routes if enabled
if (config.autoDiscoverHandlers !== false) {
// Auto-discover routes if enabled and registry provided
if (config.autoDiscoverHandlers !== false && handlerRegistry) {
this.discoverQueueRoutes();
}
this._logger.info('SmartQueueManager initialized', {
service: this.serviceName,
discoveredRoutes: this.queueRoutes.size,
hasRegistry: !!handlerRegistry,
});
}
@ -54,26 +51,31 @@ export class SmartQueueManager extends QueueManager {
* Discover all available queue routes from handler registry
*/
private discoverQueueRoutes(): void {
if (!this.handlerRegistry) {
this._logger.warn('No handler registry provided, skipping route discovery');
return;
}
try {
const handlers = handlerRegistry.getAllHandlers();
for (const [handlerName, handlerConfig] of handlers) {
const handlers = this.handlerRegistry.getAllMetadata();
for (const [handlerName, metadata] of handlers) {
// Get the service that registered this handler
const ownerService = handlerRegistry.getHandlerService(handlerName);
const ownerService = metadata.service;
if (ownerService) {
const fullName = getFullQueueName(ownerService, handlerName);
this.queueRoutes.set(handlerName, {
fullName,
service: ownerService,
handler: handlerName,
db: 0, // All queues in DB 0
operations: Object.keys(handlerConfig.operations || {}),
db: 0, // All queues in DB 0
operations: metadata.operations.map((op: any) => op.name),
});
this._logger.trace('Discovered queue route', {
handler: handlerName,
service: ownerService,
operations: Object.keys(handlerConfig.operations || {}).length,
operations: metadata.operations.length,
});
} else {
this._logger.warn('Handler has no service ownership', { handlerName });
@ -81,24 +83,25 @@ export class SmartQueueManager extends QueueManager {
}
// Also discover handlers registered by the current service
const myHandlers = handlerRegistry.getServiceHandlers(this.serviceName);
for (const handlerName of myHandlers) {
const myHandlers = this.handlerRegistry.getServiceHandlers(this.serviceName);
for (const metadata of myHandlers) {
const handlerName = metadata.name;
if (!this.queueRoutes.has(handlerName)) {
const fullName = getFullQueueName(this.serviceName, handlerName);
this.queueRoutes.set(handlerName, {
fullName,
service: this.serviceName,
handler: handlerName,
db: 0, // All queues in DB 0
db: 0, // All queues in DB 0
});
}
}
this._logger.info('Queue routes discovered', {
totalRoutes: this.queueRoutes.size,
routes: Array.from(this.queueRoutes.values()).map(r => ({
handler: r.handler,
service: r.service
routes: Array.from(this.queueRoutes.values()).map(r => ({
handler: r.handler,
service: r.service,
})),
});
} catch (error) {
@ -129,10 +132,10 @@ export class SmartQueueManager extends QueueManager {
override getQueue(queueName: string, options = {}): Queue {
// Check if this is already a full queue name (service:handler format)
const parsed = parseQueueName(queueName);
let fullQueueName: string;
let isOwnQueue: boolean;
if (parsed) {
// Already in service:handler format
fullQueueName = queueName;
@ -142,20 +145,19 @@ export class SmartQueueManager extends QueueManager {
fullQueueName = getFullQueueName(this.serviceName, queueName);
isOwnQueue = true;
}
// For cross-service queues, create without workers (producer-only)
if (!isOwnQueue) {
return super.getQueue(fullQueueName, {
...options,
workers: 0, // No workers for other services' queues
workers: 0, // No workers for other services' queues
});
}
// For own service queues, use configured workers
return super.getQueue(fullQueueName, options);
}
/**
* Send a job to any queue (local or remote)
* This is the main method for cross-service communication
@ -236,7 +238,7 @@ export class SmartQueueManager extends QueueManager {
fullName: queueName,
service: parsed.service,
handler: parsed.handler,
db: 0, // All queues in DB 0
db: 0, // All queues in DB 0
};
}
@ -247,13 +249,13 @@ export class SmartQueueManager extends QueueManager {
}
// Try to find in handler registry
const ownerService = handlerRegistry.getHandlerService(queueName);
const ownerService = this.handlerRegistry?.getHandlerService(queueName);
if (ownerService) {
return {
fullName: getFullQueueName(ownerService, queueName),
service: ownerService,
handler: queueName,
db: 0, // All queues in DB 0
db: 0, // All queues in DB 0
};
}
@ -281,7 +283,7 @@ export class SmartQueueManager extends QueueManager {
*/
getAllQueues(): Record<string, BullQueue> {
const allQueues: Record<string, BullQueue> = {};
// Get all worker queues using public API
const workerQueueNames = this.getQueueNames();
for (const name of workerQueueNames) {
@ -296,7 +298,7 @@ export class SmartQueueManager extends QueueManager {
}
}
}
// Add producer queues
for (const [name, queue] of this.producerQueues) {
// Use the simple handler name without service prefix for display
@ -306,7 +308,7 @@ export class SmartQueueManager extends QueueManager {
allQueues[simpleName] = queue;
}
}
// If no queues found, create from discovered routes
if (Object.keys(allQueues).length === 0) {
for (const [handlerName, route] of this.queueRoutes) {
@ -317,7 +319,7 @@ export class SmartQueueManager extends QueueManager {
});
}
}
return allQueues;
}
@ -350,11 +352,11 @@ export class SmartQueueManager extends QueueManager {
let workersStarted = 0;
const queues = this.getQueues();
for (const [queueName, queue] of queues) {
// Parse queue name to check if it belongs to this service
const parsed = parseQueueName(queueName);
// Skip if not our service's queue
if (parsed && parsed.service !== this.serviceName) {
this._logger.trace('Skipping workers for cross-service queue', {
@ -364,7 +366,7 @@ export class SmartQueueManager extends QueueManager {
});
continue;
}
const workerCount = this.getConfig().defaultQueueOptions?.workers || 1;
const concurrency = this.getConfig().defaultQueueOptions?.concurrency || 1;
@ -399,7 +401,8 @@ export class SmartQueueManager extends QueueManager {
// Close additional connections
for (const [db, connection] of this.connections) {
if (db !== 0) { // Don't close our main connection (DB 0 for queues)
if (db !== 0) {
// Don't close our main connection (DB 0 for queues)
connection.disconnect();
this._logger.debug('Closed Redis connection', { db });
}
@ -408,4 +411,4 @@ export class SmartQueueManager extends QueueManager {
// Call parent shutdown
await super.shutdown();
}
}
}

View file

@ -4,14 +4,14 @@ import type { JobOptions, QueueStats } from '@stock-bot/types';
// Re-export handler and queue types from shared types package
export type {
HandlerConfig,
HandlerConfigWithSchedule,
JobHandler,
ScheduledJob,
HandlerConfigWithSchedule,
JobHandler,
ScheduledJob,
TypedJobHandler,
JobData,
JobOptions,
QueueWorkerConfig,
QueueStats
QueueStats,
} from '@stock-bot/types';
export interface ProcessOptions {
@ -92,7 +92,6 @@ export interface QueueConfig extends QueueManagerConfig {
enableMetrics?: boolean;
}
// Extended batch job data for queue implementation
export interface BatchJobData {
payloadKey: string;

View file

@ -8,6 +8,7 @@
"include": ["src/**/*"],
"references": [
{ "path": "../cache" },
{ "path": "../handler-registry" },
{ "path": "../handlers" },
{ "path": "../logger" },
{ "path": "../types" }

View file

@ -8,13 +8,13 @@
* - Platform-specific signal support (Windows/Unix)
*/
import { getLogger } from '@stock-bot/logger';
import type {
PrioritizedShutdownCallback,
ShutdownCallback,
ShutdownOptions,
ShutdownResult,
} from './types';
import { getLogger } from '@stock-bot/logger';
// Global flag that works across all processes/workers
declare global {

View file

@ -1,41 +1,41 @@
/**
* Decorator Type Definitions
* Type definitions for handler decorators
*/
/**
* Schedule configuration for operations
*/
export interface ScheduleConfig {
cronPattern: string;
priority?: number;
immediately?: boolean;
description?: string;
}
/**
* Decorator metadata stored on classes
*/
export interface DecoratorMetadata {
handlerName?: string;
operations?: Array<{
name: string;
methodName: string;
schedules?: ScheduleConfig[];
}>;
disabled?: boolean;
}
/**
* Type for decorator factories
*/
export type DecoratorFactory<T = any> = (target: T, context?: any) => T | void;
/**
* Type for method decorators
*/
export type MethodDecoratorFactory = (
target: any,
propertyKey: string,
descriptor?: PropertyDescriptor
) => any;
/**
* Decorator Type Definitions
* Type definitions for handler decorators
*/
/**
* Schedule configuration for operations
*/
export interface ScheduleConfig {
cronPattern: string;
priority?: number;
immediately?: boolean;
description?: string;
}
/**
* Decorator metadata stored on classes
*/
export interface DecoratorMetadata {
handlerName?: string;
operations?: Array<{
name: string;
methodName: string;
schedules?: ScheduleConfig[];
}>;
disabled?: boolean;
}
/**
* Type for decorator factories
*/
export type DecoratorFactory<T = any> = (target: T, context?: any) => T | void;
/**
* Type for method decorators
*/
export type MethodDecoratorFactory = (
target: any,
propertyKey: string,
descriptor?: PropertyDescriptor
) => any;

View file

@ -70,4 +70,3 @@ export interface OperationMetadata {
description?: string;
validation?: (input: unknown) => boolean;
}

View file

@ -64,6 +64,24 @@ export type {
// Export service container interface
export type { IServiceContainer } from './service-container';
// Export service types
export type {
ServiceTypes,
Logger,
CacheProvider,
QueueManager,
Queue,
MongoDBClient,
PostgresClient,
QuestDBClient,
Browser,
BrowserContext,
Page,
ProxyManager,
ProxyInfo,
ProxyStats,
} from './services';
// Export decorator types
export type {
ScheduleConfig,
@ -73,10 +91,4 @@ export type {
} from './decorators';
// Export queue types
export type {
JobData,
JobOptions,
QueueStats,
BatchJobData,
QueueWorkerConfig,
} from './queue';
export type { JobData, JobOptions, QueueStats, BatchJobData, QueueWorkerConfig } from './queue';

View file

@ -1,64 +1,64 @@
/**
* Queue Type Definitions
* Types specific to queue operations
*/
/**
* Job data structure for queue operations
*/
export interface JobData<T = unknown> {
handler: string;
operation: string;
payload: T;
priority?: number;
}
/**
* Queue job options
*/
export interface JobOptions {
priority?: number;
delay?: number;
attempts?: number;
backoff?: {
type: 'exponential' | 'fixed';
delay: number;
};
removeOnComplete?: boolean | number;
removeOnFail?: boolean | number;
timeout?: number;
}
/**
* Queue statistics
*/
export interface QueueStats {
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
paused: boolean;
workers?: number;
}
/**
* Batch job configuration
*/
export interface BatchJobData {
payloadKey: string;
batchIndex: number;
totalBatches: number;
items: unknown[];
}
/**
* Queue worker configuration
*/
export interface QueueWorkerConfig {
concurrency?: number;
maxStalledCount?: number;
stalledInterval?: number;
lockDuration?: number;
lockRenewTime?: number;
}
/**
* Queue Type Definitions
* Types specific to queue operations
*/
/**
* Job data structure for queue operations
*/
export interface JobData<T = unknown> {
handler: string;
operation: string;
payload: T;
priority?: number;
}
/**
* Queue job options
*/
export interface JobOptions {
priority?: number;
delay?: number;
attempts?: number;
backoff?: {
type: 'exponential' | 'fixed';
delay: number;
};
removeOnComplete?: boolean | number;
removeOnFail?: boolean | number;
timeout?: number;
}
/**
* Queue statistics
*/
export interface QueueStats {
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
paused: boolean;
workers?: number;
}
/**
* Batch job configuration
*/
export interface BatchJobData {
payloadKey: string;
batchIndex: number;
totalBatches: number;
items: unknown[];
}
/**
* Queue worker configuration
*/
export interface QueueWorkerConfig {
concurrency?: number;
maxStalledCount?: number;
stalledInterval?: number;
lockDuration?: number;
lockRenewTime?: number;
}

View file

@ -1,28 +1,17 @@
/**
* Service Container Interface
* Pure interface definition with no dependencies
* Used by both DI and Handlers packages
*/
/**
* Universal service container interface
* Provides access to all common services in a type-safe manner
* Designed to work across different service contexts
*/
export interface IServiceContainer {
// Core infrastructure
readonly logger: any; // Logger instance
readonly cache?: any; // Cache provider (Redis/Dragonfly) - optional
readonly globalCache?: any; // Global cache provider (shared across services) - optional
readonly queue?: any; // Queue manager (BullMQ) - optional
readonly proxy?: any; // Proxy manager service - optional (depends on cache)
readonly browser?: any; // Browser automation (Playwright)
// Database clients - all optional to support selective enabling
readonly mongodb?: any; // MongoDB client
readonly postgres?: any; // PostgreSQL client
readonly questdb?: any; // QuestDB client (time-series)
// Optional extensions for future use
readonly custom?: Record<string, any>;
}
/**
* Service Container Interface
* Pure interface definition with full type safety
* Used by both DI and Handlers packages
*/
import type { ServiceTypes } from './services';
/**
* Universal service container interface
* Provides access to all common services in a type-safe manner
* Designed to work across different service contexts
*/
export interface IServiceContainer extends ServiceTypes {
// Optional extensions for future use
readonly custom?: Record<string, any>;
}

View file

@ -0,0 +1,311 @@
/**
* Service Type Definitions
* Provides full type safety for all injected services
*/
// Import actual service types from their packages
// These are type-only imports, so no runtime dependencies
// Logger types
export interface Logger {
info(message: string, meta?: Record<string, unknown>): void;
error(message: string, meta?: Record<string, unknown>): void;
warn(message: string, meta?: Record<string, unknown>): void;
debug(message: string, meta?: Record<string, unknown>): void;
trace?(message: string, meta?: Record<string, unknown>): void;
child?(name: string, context?: Record<string, unknown>): Logger;
}
// Cache types - matches the actual CacheProvider interface from @stock-bot/cache
export interface CacheProvider {
get<T>(key: string): Promise<T | null>;
set<T>(
key: string,
value: T,
options?:
| number
| {
ttl?: number;
preserveTTL?: boolean;
onlyIfExists?: boolean;
onlyIfNotExists?: boolean;
getOldValue?: boolean;
}
): Promise<T | null>;
del(key: string): Promise<void>;
exists(key: string): Promise<boolean>;
clear(): Promise<void>;
keys(pattern: string): Promise<string[]>;
getStats(): CacheStats;
health(): Promise<boolean>;
waitForReady(timeout?: number): Promise<void>;
isReady(): boolean;
}
export interface CacheStats {
hits: number;
misses: number;
errors: number;
hitRate: number;
total: number;
uptime: number;
}
// Queue Manager types
export interface QueueManager {
getQueue(queueName: string, options?: any): Queue;
createQueue(queueName: string, options?: any): Queue;
getOrCreateQueue(queueName: string, options?: any): Queue;
queueExists(queueName: string): boolean;
deleteQueue(queueName: string): Promise<void>;
getQueueStats(queueName?: string): Promise<any>;
getBatchCache(queueName: string): CacheProvider | null;
getRateLimiter(): any;
checkRateLimit(key: string, rule?: string): Promise<{ allowed: boolean; retryAfter?: number }>;
shutdown(): Promise<void>;
}
export interface Queue {
add(name: string, data: any, opts?: any): Promise<any>;
addBulk(jobs: Array<{ name: string; data: any; opts?: any }>): Promise<any[]>;
process(concurrency: number, processor: any): Promise<void>;
process(name: string, concurrency: number, processor: any): Promise<void>;
process(processor: any): Promise<void>;
on(event: string, listener: (...args: any[]) => void): void;
pause(): Promise<void>;
resume(): Promise<void>;
close(): Promise<void>;
clean(grace: number, limit?: number, type?: string): Promise<string[]>;
getJobs(types: string[], start?: number, end?: number, asc?: boolean): Promise<any[]>;
getJobCounts(...types: string[]): Promise<Record<string, number>>;
name: string;
}
// MongoDB types
export interface MongoDBClient {
connect(): Promise<void>;
disconnect(): Promise<void>;
isConnected(): boolean;
getDb(dbName?: string): any; // MongoDB Db type
collection<T = any>(name: string, dbName?: string): any; // MongoDB Collection<T>
createCollection(name: string, options?: any, dbName?: string): Promise<void>;
dropCollection(name: string, dbName?: string): Promise<boolean>;
listCollections(dbName?: string): Promise<Array<{ name: string; type: string }>>;
find<T = any>(collection: string, filter?: any, options?: any, dbName?: string): Promise<T[]>;
findOne<T = any>(
collection: string,
filter: any,
options?: any,
dbName?: string
): Promise<T | null>;
insertOne<T = any>(collection: string, document: T, options?: any, dbName?: string): Promise<any>;
insertMany<T = any>(
collection: string,
documents: T[],
options?: any,
dbName?: string
): Promise<any>;
updateOne(
collection: string,
filter: any,
update: any,
options?: any,
dbName?: string
): Promise<any>;
updateMany(
collection: string,
filter: any,
update: any,
options?: any,
dbName?: string
): Promise<any>;
deleteOne(collection: string, filter: any, options?: any, dbName?: string): Promise<any>;
deleteMany(collection: string, filter: any, options?: any, dbName?: string): Promise<any>;
countDocuments(collection: string, filter?: any, options?: any, dbName?: string): Promise<number>;
aggregate<T = any>(
collection: string,
pipeline: any[],
options?: any,
dbName?: string
): Promise<T[]>;
createIndex(collection: string, indexSpec: any, options?: any, dbName?: string): Promise<string>;
dropIndex(collection: string, indexName: string, options?: any, dbName?: string): Promise<void>;
listIndexes(collection: string, dbName?: string): Promise<any[]>;
batchUpsert<T = any>(
collection: string,
documents: T[],
uniqueKeys: string | string[],
options?: any,
dbName?: string
): Promise<any>;
batchUpsertStock<T = any>(
collection: string,
documents: T[],
uniqueKeys: string | string[],
options?: any
): Promise<any>;
batchUpsertTrading<T = any>(
collection: string,
documents: T[],
uniqueKeys: string | string[],
options?: any
): Promise<any>;
batchUpsertAnalytics<T = any>(
collection: string,
documents: T[],
uniqueKeys: string | string[],
options?: any
): Promise<any>;
}
// PostgreSQL types
export interface PostgresClient {
query<T = any>(text: string, params?: any[]): Promise<{ rows: T[]; rowCount: number }>;
queryOne<T = any>(text: string, params?: any[]): Promise<T | null>;
execute(text: string, params?: any[]): Promise<{ rowCount: number }>;
transaction<T>(callback: (client: any) => Promise<T>): Promise<T>;
insert(table: string, data: Record<string, any>, returning?: string | string[]): Promise<any>;
update(
table: string,
data: Record<string, any>,
where: Record<string, any>,
returning?: string | string[]
): Promise<any>;
delete(table: string, where: Record<string, any>, returning?: string | string[]): Promise<any>;
upsert(
table: string,
data: Record<string, any>,
conflictColumns: string | string[],
returning?: string | string[]
): Promise<any>;
batchInsert(
table: string,
data: Record<string, any>[],
returning?: string | string[]
): Promise<any[]>;
batchUpsert(
table: string,
data: Record<string, any>[],
conflictColumns: string | string[],
returning?: string | string[]
): Promise<any[]>;
exists(table: string, where: Record<string, any>): Promise<boolean>;
count(table: string, where?: Record<string, any>): Promise<number>;
getClient(): Promise<any>;
releaseClient(client: any): void;
end(): Promise<void>;
isConnected(): boolean;
}
// QuestDB types
export interface QuestDBClient {
connect(): Promise<void>;
disconnect(): Promise<void>;
isConnected(): boolean;
query<T = any>(sql: string): Promise<T[]>;
insert(table: string, data: Record<string, any> | Record<string, any>[]): Promise<void>;
insertBatch(table: string, data: Record<string, any>[]): Promise<void>;
createTable(tableDef: any): Promise<void>;
dropTable(tableName: string): Promise<void>;
tableExists(tableName: string): Promise<boolean>;
getTableSchema(tableName: string): Promise<any>;
flush(): Promise<void>;
insertWithTimestamp(
table: string,
data: Record<string, any>,
timestamp?: Date | number
): Promise<void>;
batchInsertWithTimestamp(
table: string,
data: Array<Record<string, any> & { timestamp?: Date | number }>
): Promise<void>;
}
// Browser types (Playwright)
export interface Browser {
newPage(): Promise<Page>;
newContext(options?: any): Promise<BrowserContext>;
close(): Promise<void>;
isConnected(): boolean;
version(): string;
}
export interface BrowserContext {
newPage(): Promise<Page>;
close(): Promise<void>;
pages(): Page[];
setDefaultTimeout(timeout: number): void;
setDefaultNavigationTimeout(timeout: number): void;
addCookies(cookies: any[]): Promise<void>;
clearCookies(): Promise<void>;
}
export interface Page {
goto(url: string, options?: any): Promise<any>;
waitForSelector(selector: string, options?: any): Promise<any>;
click(selector: string, options?: any): Promise<void>;
fill(selector: string, value: string, options?: any): Promise<void>;
evaluate<T = any>(pageFunction: any, arg?: any): Promise<T>;
evaluateHandle<T = any>(pageFunction: any, arg?: any): Promise<T>;
screenshot(options?: any): Promise<Buffer>;
pdf(options?: any): Promise<Buffer>;
content(): Promise<string>;
title(): Promise<string>;
url(): string;
close(): Promise<void>;
waitForTimeout(timeout: number): Promise<void>;
waitForLoadState(state?: string, options?: any): Promise<void>;
locator(selector: string): any;
$$(selector: string): Promise<any[]>;
textContent(selector: string): Promise<string | null>;
}
// Proxy Manager types
export interface ProxyManager {
getProxy(key?: string): Promise<ProxyInfo | null>;
getProxies(count: number, key?: string): Promise<ProxyInfo[]>;
releaseProxy(proxy: ProxyInfo | string): Promise<void>;
markProxyFailed(proxy: ProxyInfo | string, reason?: string): Promise<void>;
getStats(): Promise<ProxyStats>;
resetProxy(proxy: ProxyInfo | string): Promise<void>;
blacklistProxy(proxy: ProxyInfo | string, duration?: number): Promise<void>;
isBlacklisted(proxy: ProxyInfo | string): Promise<boolean>;
refreshProxies(): Promise<void>;
}
export interface ProxyInfo {
id: string;
host: string;
port: number;
username?: string;
password?: string;
protocol?: string;
country?: string;
lastUsed?: Date;
failureCount?: number;
successCount?: number;
averageResponseTime?: number;
}
export interface ProxyStats {
total: number;
available: number;
inUse: number;
failed: number;
blacklisted: number;
}
/**
* Complete service types for dependency injection
*/
export interface ServiceTypes {
logger: Logger;
cache?: CacheProvider;
globalCache?: CacheProvider;
queue?: QueueManager;
proxy?: ProxyManager;
browser?: Browser;
mongodb?: MongoDBClient;
postgres?: PostgresClient;
questdb?: QuestDBClient;
}