This commit is contained in:
Boki 2025-06-22 17:55:51 -04:00
parent d858222af7
commit 7d9044ab29
202 changed files with 10755 additions and 10972 deletions

View file

@ -1,294 +1,301 @@
/**
* Awilix DI Container Setup
* Creates a decoupled, reusable dependency injection container
*/
import { Browser } from '@stock-bot/browser';
import { createCache, type CacheProvider } from '@stock-bot/cache';
import type { IServiceContainer } from '@stock-bot/handlers';
import { getLogger, type Logger } from '@stock-bot/logger';
import { MongoDBClient } from '@stock-bot/mongodb';
import { PostgreSQLClient } from '@stock-bot/postgres';
import { ProxyManager } from '@stock-bot/proxy';
import { QuestDBClient } from '@stock-bot/questdb';
import { type QueueManager } from '@stock-bot/queue';
import { asFunction, asValue, createContainer, InjectionMode, type AwilixContainer } from 'awilix';
import { z } from 'zod';
// Configuration schema with validation
const appConfigSchema = z.object({
redis: z.object({
enabled: z.boolean().optional(),
host: z.string(),
port: z.number(),
password: z.string().optional(),
username: z.string().optional(),
db: z.number().optional(),
}),
mongodb: z.object({
enabled: z.boolean().optional(),
uri: z.string(),
database: z.string(),
}),
postgres: z.object({
enabled: z.boolean().optional(),
host: z.string(),
port: z.number(),
database: z.string(),
user: z.string(),
password: z.string(),
}),
questdb: z.object({
enabled: z.boolean().optional(),
host: z.string(),
httpPort: z.number().optional(),
pgPort: z.number().optional(),
influxPort: z.number().optional(),
database: z.string().optional(),
}).optional(),
proxy: z.object({
cachePrefix: z.string().optional(),
ttl: z.number().optional(),
}).optional(),
browser: z.object({
headless: z.boolean().optional(),
timeout: z.number().optional(),
}).optional(),
});
export type AppConfig = z.infer<typeof appConfigSchema>;
/**
* Service type definitions for type-safe resolution
*/
export interface ServiceDefinitions {
// Configuration
config: AppConfig;
logger: Logger;
// Core services
cache: CacheProvider | null;
proxyManager: ProxyManager | null;
browser: Browser;
queueManager: QueueManager | null;
// Database clients
mongoClient: MongoDBClient | null;
postgresClient: PostgreSQLClient | null;
questdbClient: QuestDBClient | null;
// Aggregate service container
serviceContainer: IServiceContainer;
}
/**
* Create and configure the DI container with type safety
*/
export function createServiceContainer(rawConfig: unknown): AwilixContainer<ServiceDefinitions> {
// Validate configuration
const config = appConfigSchema.parse(rawConfig);
const container = createContainer<ServiceDefinitions>({
injectionMode: InjectionMode.PROXY,
});
// Register configuration values
const registrations: any = {
// Configuration
config: asValue(config),
redisConfig: asValue(config.redis),
mongoConfig: asValue(config.mongodb),
postgresConfig: asValue(config.postgres),
questdbConfig: asValue(config.questdb || { host: 'localhost', httpPort: 9000, pgPort: 8812, influxPort: 9009 }),
// Core services with dependency injection
logger: asFunction(() => getLogger('app')).singleton(),
};
// Conditionally register cache/dragonfly
if (config.redis?.enabled !== false) {
registrations.cache = asFunction(({ redisConfig, logger }) =>
createCache({
redisConfig,
logger,
keyPrefix: 'cache:',
ttl: 3600,
enableMetrics: true,
})
).singleton();
} else {
registrations.cache = asValue(null);
}
// Proxy manager depends on cache
registrations.proxyManager = asFunction(({ cache, config, logger }) => {
if (!cache) {
logger.warn('Cache is disabled, ProxyManager will have limited functionality');
return null;
}
const manager = new ProxyManager(
cache,
config.proxy || {},
logger
);
return manager;
}).singleton();
// Conditionally register MongoDB client
if (config.mongodb?.enabled !== false) {
registrations.mongoClient = asFunction(({ mongoConfig, logger }) => {
return new MongoDBClient(mongoConfig, logger);
}).singleton();
} else {
registrations.mongoClient = asValue(null);
}
// Conditionally register PostgreSQL client
if (config.postgres?.enabled !== false) {
registrations.postgresClient = asFunction(({ postgresConfig, logger }) => {
return new PostgreSQLClient(
{
host: postgresConfig.host,
port: postgresConfig.port,
database: postgresConfig.database,
username: postgresConfig.user,
password: postgresConfig.password,
},
logger
);
}).singleton();
} else {
registrations.postgresClient = asValue(null);
}
// Conditionally register QuestDB client
if (config.questdb?.enabled !== false) {
registrations.questdbClient = asFunction(({ questdbConfig, logger }) => {
console.log('Creating QuestDB client with config:', questdbConfig);
return new QuestDBClient(
{
host: questdbConfig.host,
httpPort: questdbConfig.httpPort,
pgPort: questdbConfig.pgPort,
influxPort: questdbConfig.influxPort,
database: questdbConfig.database,
// QuestDB appears to require default credentials
user: 'admin',
password: 'quest',
},
logger
);
}).singleton();
} else {
registrations.questdbClient = asValue(null);
}
// Queue manager - placeholder until decoupled from singleton
registrations.queueManager = asFunction(({ redisConfig, cache, logger }) => {
// Import dynamically to avoid circular dependency
const { QueueManager } = require('@stock-bot/queue');
// Check if already initialized (singleton pattern)
if (QueueManager.isInitialized()) {
return QueueManager.getInstance();
}
// Initialize if not already done
return QueueManager.initialize({
redis: { host: redisConfig.host, port: redisConfig.port, db: redisConfig.db },
enableScheduledJobs: true,
delayWorkerStart: true // We'll start workers manually
});
}).singleton();
// Browser automation
registrations.browser = asFunction(({ config, logger }) => {
return new Browser(logger, config.browser);
}).singleton();
// Build the IServiceContainer for handlers
registrations.serviceContainer = asFunction((cradle) => ({
logger: cradle.logger,
cache: cradle.cache,
proxy: cradle.proxyManager,
browser: cradle.browser,
mongodb: cradle.mongoClient,
postgres: cradle.postgresClient,
questdb: cradle.questdbClient,
queue: cradle.queueManager,
} as IServiceContainer)).singleton();
container.register(registrations);
return container;
}
/**
* Initialize async services after container creation
*/
export async function initializeServices(container: AwilixContainer): Promise<void> {
const logger = container.resolve('logger');
const config = container.resolve('config');
try {
// Wait for cache to be ready first (if enabled)
const cache = container.resolve('cache');
if (cache && typeof cache.waitForReady === 'function') {
await cache.waitForReady(10000);
logger.info('Cache is ready');
} else if (config.redis?.enabled === false) {
logger.info('Cache is disabled');
}
// Initialize proxy manager (depends on cache)
const proxyManager = container.resolve('proxyManager');
if (proxyManager && typeof proxyManager.initialize === 'function') {
await proxyManager.initialize();
logger.info('Proxy manager initialized');
} else {
logger.info('Proxy manager is disabled (requires cache)');
}
// Connect MongoDB client (if enabled)
const mongoClient = container.resolve('mongoClient');
if (mongoClient && typeof mongoClient.connect === 'function') {
await mongoClient.connect();
logger.info('MongoDB connected');
} else if (config.mongodb?.enabled === false) {
logger.info('MongoDB is disabled');
}
// Connect PostgreSQL client (if enabled)
const postgresClient = container.resolve('postgresClient');
if (postgresClient && typeof postgresClient.connect === 'function') {
await postgresClient.connect();
logger.info('PostgreSQL connected');
} else if (config.postgres?.enabled === false) {
logger.info('PostgreSQL is disabled');
}
// Connect QuestDB client (if enabled)
const questdbClient = container.resolve('questdbClient');
if (questdbClient && typeof questdbClient.connect === 'function') {
await questdbClient.connect();
logger.info('QuestDB connected');
} else if (config.questdb?.enabled === false) {
logger.info('QuestDB is disabled');
}
// Initialize browser if configured
const browser = container.resolve('browser');
if (browser && typeof browser.initialize === 'function') {
await browser.initialize();
logger.info('Browser initialized');
}
logger.info('All services initialized successfully');
} catch (error) {
logger.error('Failed to initialize services', { error });
throw error;
}
}
// Export typed container
export type ServiceContainer = AwilixContainer<ServiceDefinitions>;
export type ServiceCradle = ServiceDefinitions;
/**
* Awilix DI Container Setup
* Creates a decoupled, reusable dependency injection container
*/
import { asFunction, asValue, createContainer, InjectionMode, type AwilixContainer } from 'awilix';
import { z } from 'zod';
import { Browser } from '@stock-bot/browser';
import { createCache, type CacheProvider } from '@stock-bot/cache';
import type { IServiceContainer } from '@stock-bot/handlers';
import { getLogger, type Logger } from '@stock-bot/logger';
import { MongoDBClient } from '@stock-bot/mongodb';
import { PostgreSQLClient } from '@stock-bot/postgres';
import { ProxyManager } from '@stock-bot/proxy';
import { QuestDBClient } from '@stock-bot/questdb';
import { type QueueManager } from '@stock-bot/queue';
// Configuration schema with validation
const appConfigSchema = z.object({
redis: z.object({
enabled: z.boolean().optional(),
host: z.string(),
port: z.number(),
password: z.string().optional(),
username: z.string().optional(),
db: z.number().optional(),
}),
mongodb: z.object({
enabled: z.boolean().optional(),
uri: z.string(),
database: z.string(),
}),
postgres: z.object({
enabled: z.boolean().optional(),
host: z.string(),
port: z.number(),
database: z.string(),
user: z.string(),
password: z.string(),
}),
questdb: z
.object({
enabled: z.boolean().optional(),
host: z.string(),
httpPort: z.number().optional(),
pgPort: z.number().optional(),
influxPort: z.number().optional(),
database: z.string().optional(),
})
.optional(),
proxy: z
.object({
cachePrefix: z.string().optional(),
ttl: z.number().optional(),
})
.optional(),
browser: z
.object({
headless: z.boolean().optional(),
timeout: z.number().optional(),
})
.optional(),
});
export type AppConfig = z.infer<typeof appConfigSchema>;
/**
* Service type definitions for type-safe resolution
*/
export interface ServiceDefinitions {
// Configuration
config: AppConfig;
logger: Logger;
// Core services
cache: CacheProvider | null;
proxyManager: ProxyManager | null;
browser: Browser;
queueManager: QueueManager | null;
// Database clients
mongoClient: MongoDBClient | null;
postgresClient: PostgreSQLClient | null;
questdbClient: QuestDBClient | null;
// Aggregate service container
serviceContainer: IServiceContainer;
}
/**
* Create and configure the DI container with type safety
*/
export function createServiceContainer(rawConfig: unknown): AwilixContainer<ServiceDefinitions> {
// Validate configuration
const config = appConfigSchema.parse(rawConfig);
const container = createContainer<ServiceDefinitions>({
injectionMode: InjectionMode.PROXY,
});
// Register configuration values
const registrations: any = {
// Configuration
config: asValue(config),
redisConfig: asValue(config.redis),
mongoConfig: asValue(config.mongodb),
postgresConfig: asValue(config.postgres),
questdbConfig: asValue(
config.questdb || { host: 'localhost', httpPort: 9000, pgPort: 8812, influxPort: 9009 }
),
// Core services with dependency injection
logger: asFunction(() => getLogger('app')).singleton(),
};
// Conditionally register cache/dragonfly
if (config.redis?.enabled !== false) {
registrations.cache = asFunction(({ redisConfig, logger }) =>
createCache({
redisConfig,
logger,
keyPrefix: 'cache:',
ttl: 3600,
enableMetrics: true,
})
).singleton();
} else {
registrations.cache = asValue(null);
}
// Proxy manager depends on cache
registrations.proxyManager = asFunction(({ cache, config, logger }) => {
if (!cache) {
logger.warn('Cache is disabled, ProxyManager will have limited functionality');
return null;
}
const manager = new ProxyManager(cache, config.proxy || {}, logger);
return manager;
}).singleton();
// Conditionally register MongoDB client
if (config.mongodb?.enabled !== false) {
registrations.mongoClient = asFunction(({ mongoConfig, logger }) => {
return new MongoDBClient(mongoConfig, logger);
}).singleton();
} else {
registrations.mongoClient = asValue(null);
}
// Conditionally register PostgreSQL client
if (config.postgres?.enabled !== false) {
registrations.postgresClient = asFunction(({ postgresConfig, logger }) => {
return new PostgreSQLClient(
{
host: postgresConfig.host,
port: postgresConfig.port,
database: postgresConfig.database,
username: postgresConfig.user,
password: postgresConfig.password,
},
logger
);
}).singleton();
} else {
registrations.postgresClient = asValue(null);
}
// Conditionally register QuestDB client
if (config.questdb?.enabled !== false) {
registrations.questdbClient = asFunction(({ questdbConfig, logger }) => {
console.log('Creating QuestDB client with config:', questdbConfig);
return new QuestDBClient(
{
host: questdbConfig.host,
httpPort: questdbConfig.httpPort,
pgPort: questdbConfig.pgPort,
influxPort: questdbConfig.influxPort,
database: questdbConfig.database,
// QuestDB appears to require default credentials
user: 'admin',
password: 'quest',
},
logger
);
}).singleton();
} else {
registrations.questdbClient = asValue(null);
}
// Queue manager - placeholder until decoupled from singleton
registrations.queueManager = asFunction(({ redisConfig, cache, logger }) => {
// Import dynamically to avoid circular dependency
const { QueueManager } = require('@stock-bot/queue');
// Check if already initialized (singleton pattern)
if (QueueManager.isInitialized()) {
return QueueManager.getInstance();
}
// Initialize if not already done
return QueueManager.initialize({
redis: { host: redisConfig.host, port: redisConfig.port, db: redisConfig.db },
enableScheduledJobs: true,
delayWorkerStart: true, // We'll start workers manually
});
}).singleton();
// Browser automation
registrations.browser = asFunction(({ config, logger }) => {
return new Browser(logger, config.browser);
}).singleton();
// Build the IServiceContainer for handlers
registrations.serviceContainer = asFunction(
cradle =>
({
logger: cradle.logger,
cache: cradle.cache,
proxy: cradle.proxyManager,
browser: cradle.browser,
mongodb: cradle.mongoClient,
postgres: cradle.postgresClient,
questdb: cradle.questdbClient,
queue: cradle.queueManager,
}) as IServiceContainer
).singleton();
container.register(registrations);
return container;
}
/**
* Initialize async services after container creation
*/
export async function initializeServices(container: AwilixContainer): Promise<void> {
const logger = container.resolve('logger');
const config = container.resolve('config');
try {
// Wait for cache to be ready first (if enabled)
const cache = container.resolve('cache');
if (cache && typeof cache.waitForReady === 'function') {
await cache.waitForReady(10000);
logger.info('Cache is ready');
} else if (config.redis?.enabled === false) {
logger.info('Cache is disabled');
}
// Initialize proxy manager (depends on cache)
const proxyManager = container.resolve('proxyManager');
if (proxyManager && typeof proxyManager.initialize === 'function') {
await proxyManager.initialize();
logger.info('Proxy manager initialized');
} else {
logger.info('Proxy manager is disabled (requires cache)');
}
// Connect MongoDB client (if enabled)
const mongoClient = container.resolve('mongoClient');
if (mongoClient && typeof mongoClient.connect === 'function') {
await mongoClient.connect();
logger.info('MongoDB connected');
} else if (config.mongodb?.enabled === false) {
logger.info('MongoDB is disabled');
}
// Connect PostgreSQL client (if enabled)
const postgresClient = container.resolve('postgresClient');
if (postgresClient && typeof postgresClient.connect === 'function') {
await postgresClient.connect();
logger.info('PostgreSQL connected');
} else if (config.postgres?.enabled === false) {
logger.info('PostgreSQL is disabled');
}
// Connect QuestDB client (if enabled)
const questdbClient = container.resolve('questdbClient');
if (questdbClient && typeof questdbClient.connect === 'function') {
await questdbClient.connect();
logger.info('QuestDB connected');
} else if (config.questdb?.enabled === false) {
logger.info('QuestDB is disabled');
}
// Initialize browser if configured
const browser = container.resolve('browser');
if (browser && typeof browser.initialize === 'function') {
await browser.initialize();
logger.info('Browser initialized');
}
logger.info('All services initialized successfully');
} catch (error) {
logger.error('Failed to initialize services', { error });
throw error;
}
}
// Export typed container
export type ServiceContainer = AwilixContainer<ServiceDefinitions>;
export type ServiceCradle = ServiceDefinitions;

View file

@ -1,13 +1,13 @@
// Export all dependency injection components
export * from './operation-context';
export * from './pool-size-calculator';
export * from './types';
// Awilix container exports
export {
createServiceContainer,
initializeServices,
type AppConfig,
type ServiceCradle,
type ServiceContainer
} from './awilix-container';
// Export all dependency injection components
export * from './operation-context';
export * from './pool-size-calculator';
export * from './types';
// Awilix container exports
export {
createServiceContainer,
initializeServices,
type AppConfig,
type ServiceCradle,
type ServiceContainer,
} from './awilix-container';

View file

@ -3,6 +3,7 @@
*/
import { getLogger, type Logger } from '@stock-bot/logger';
interface ServiceResolver {
resolve<T>(serviceName: string): T;
resolveAsync<T>(serviceName: string): Promise<T>;
@ -23,17 +24,19 @@ export class OperationContext {
public readonly metadata: Record<string, any>;
private readonly container?: ServiceResolver;
private readonly startTime: Date;
constructor(options: OperationContextOptions) {
this.container = options.container;
this.metadata = options.metadata || {};
this.traceId = options.traceId || this.generateTraceId();
this.startTime = new Date();
this.logger = options.parentLogger || getLogger(`${options.handlerName}:${options.operationName}`, {
traceId: this.traceId,
metadata: this.metadata,
});
this.logger =
options.parentLogger ||
getLogger(`${options.handlerName}:${options.operationName}`, {
traceId: this.traceId,
metadata: this.metadata,
});
}
/**
@ -42,8 +45,8 @@ export class OperationContext {
static create(
handlerName: string,
operationName: string,
options: {
container?: ServiceResolver;
options: {
container?: ServiceResolver;
parentLogger?: Logger;
metadata?: Record<string, any>;
traceId?: string;
@ -95,7 +98,7 @@ export class OperationContext {
*/
logCompletion(success: boolean, error?: Error): void {
const executionTime = this.getExecutionTime();
if (success) {
this.logger.info('Operation completed successfully', {
executionTime,
@ -138,4 +141,4 @@ export class OperationContext {
private generateTraceId(): string {
return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
}
}
}

View file

@ -1,80 +1,82 @@
import type { ConnectionPoolConfig } from './types';
export interface PoolSizeRecommendation {
min: number;
max: number;
idle: number;
}
export class PoolSizeCalculator {
private static readonly DEFAULT_SIZES: Record<string, PoolSizeRecommendation> = {
// Service-level defaults
'data-ingestion': { min: 5, max: 50, idle: 10 },
'data-pipeline': { min: 3, max: 30, idle: 5 },
'processing-service': { min: 2, max: 20, idle: 3 },
'web-api': { min: 2, max: 10, idle: 2 },
'portfolio-service': { min: 2, max: 15, idle: 3 },
'strategy-service': { min: 3, max: 25, idle: 5 },
'execution-service': { min: 2, max: 10, idle: 2 },
// Handler-level defaults
'batch-import': { min: 10, max: 100, idle: 20 },
'real-time': { min: 2, max: 10, idle: 3 },
'analytics': { min: 5, max: 30, idle: 10 },
'reporting': { min: 3, max: 20, idle: 5 },
};
static calculate(
serviceName: string,
handlerName?: string,
customConfig?: Partial<ConnectionPoolConfig>
): PoolSizeRecommendation {
// Check for custom configuration first
if (customConfig?.minConnections && customConfig?.maxConnections) {
return {
min: customConfig.minConnections,
max: customConfig.maxConnections,
idle: Math.floor((customConfig.minConnections + customConfig.maxConnections) / 4),
};
}
// Try handler-specific sizes first, then service-level
const key = handlerName || serviceName;
const recommendation = this.DEFAULT_SIZES[key] || this.DEFAULT_SIZES[serviceName];
if (recommendation) {
return { ...recommendation };
}
// Fall back to generic defaults
return {
min: 2,
max: 10,
idle: 3,
};
}
static getOptimalPoolSize(
expectedConcurrency: number,
averageQueryTimeMs: number,
targetLatencyMs: number
): number {
// Little's Law: L = λ * W
// L = number of connections needed
// λ = arrival rate (requests per second)
// W = average time in system (seconds)
const requestsPerSecond = expectedConcurrency;
const averageTimeInSystem = averageQueryTimeMs / 1000;
const minConnections = Math.ceil(requestsPerSecond * averageTimeInSystem);
// Add buffer for burst traffic (20% overhead)
const recommendedSize = Math.ceil(minConnections * 1.2);
// Ensure we meet target latency
const latencyBasedSize = Math.ceil(expectedConcurrency * (averageQueryTimeMs / targetLatencyMs));
return Math.max(recommendedSize, latencyBasedSize, 2); // Minimum 2 connections
}
}
import type { ConnectionPoolConfig } from './types';
export interface PoolSizeRecommendation {
min: number;
max: number;
idle: number;
}
export class PoolSizeCalculator {
private static readonly DEFAULT_SIZES: Record<string, PoolSizeRecommendation> = {
// Service-level defaults
'data-ingestion': { min: 5, max: 50, idle: 10 },
'data-pipeline': { min: 3, max: 30, idle: 5 },
'processing-service': { min: 2, max: 20, idle: 3 },
'web-api': { min: 2, max: 10, idle: 2 },
'portfolio-service': { min: 2, max: 15, idle: 3 },
'strategy-service': { min: 3, max: 25, idle: 5 },
'execution-service': { min: 2, max: 10, idle: 2 },
// Handler-level defaults
'batch-import': { min: 10, max: 100, idle: 20 },
'real-time': { min: 2, max: 10, idle: 3 },
analytics: { min: 5, max: 30, idle: 10 },
reporting: { min: 3, max: 20, idle: 5 },
};
static calculate(
serviceName: string,
handlerName?: string,
customConfig?: Partial<ConnectionPoolConfig>
): PoolSizeRecommendation {
// Check for custom configuration first
if (customConfig?.minConnections && customConfig?.maxConnections) {
return {
min: customConfig.minConnections,
max: customConfig.maxConnections,
idle: Math.floor((customConfig.minConnections + customConfig.maxConnections) / 4),
};
}
// Try handler-specific sizes first, then service-level
const key = handlerName || serviceName;
const recommendation = this.DEFAULT_SIZES[key] || this.DEFAULT_SIZES[serviceName];
if (recommendation) {
return { ...recommendation };
}
// Fall back to generic defaults
return {
min: 2,
max: 10,
idle: 3,
};
}
static getOptimalPoolSize(
expectedConcurrency: number,
averageQueryTimeMs: number,
targetLatencyMs: number
): number {
// Little's Law: L = λ * W
// L = number of connections needed
// λ = arrival rate (requests per second)
// W = average time in system (seconds)
const requestsPerSecond = expectedConcurrency;
const averageTimeInSystem = averageQueryTimeMs / 1000;
const minConnections = Math.ceil(requestsPerSecond * averageTimeInSystem);
// Add buffer for burst traffic (20% overhead)
const recommendedSize = Math.ceil(minConnections * 1.2);
// Ensure we meet target latency
const latencyBasedSize = Math.ceil(
expectedConcurrency * (averageQueryTimeMs / targetLatencyMs)
);
return Math.max(recommendedSize, latencyBasedSize, 2); // Minimum 2 connections
}
}

View file

@ -1,68 +1,71 @@
// Generic types to avoid circular dependencies
export interface GenericClientConfig {
[key: string]: any;
}
export interface ConnectionPoolConfig {
name: string;
poolSize?: number;
minConnections?: number;
maxConnections?: number;
idleTimeoutMillis?: number;
connectionTimeoutMillis?: number;
enableMetrics?: boolean;
}
export interface MongoDBPoolConfig extends ConnectionPoolConfig {
config: GenericClientConfig;
}
export interface PostgreSQLPoolConfig extends ConnectionPoolConfig {
config: GenericClientConfig;
}
export interface CachePoolConfig extends ConnectionPoolConfig {
config: GenericClientConfig;
}
export interface QueuePoolConfig extends ConnectionPoolConfig {
config: GenericClientConfig;
}
export interface ConnectionFactoryConfig {
service: string;
environment: 'development' | 'production' | 'test';
pools?: {
mongodb?: Partial<MongoDBPoolConfig>;
postgres?: Partial<PostgreSQLPoolConfig>;
cache?: Partial<CachePoolConfig>;
queue?: Partial<QueuePoolConfig>;
};
}
export interface ConnectionPool<T> {
name: string;
client: T;
metrics: PoolMetrics;
health(): Promise<boolean>;
dispose(): Promise<void>;
}
export interface PoolMetrics {
created: Date;
totalConnections: number;
activeConnections: number;
idleConnections: number;
waitingRequests: number;
errors: number;
}
export interface ConnectionFactory {
createMongoDB(config: MongoDBPoolConfig): Promise<ConnectionPool<any>>;
createPostgreSQL(config: PostgreSQLPoolConfig): Promise<ConnectionPool<any>>;
createCache(config: CachePoolConfig): Promise<ConnectionPool<any>>;
createQueue(config: QueuePoolConfig): Promise<ConnectionPool<any>>;
getPool(type: 'mongodb' | 'postgres' | 'cache' | 'queue', name: string): ConnectionPool<any> | undefined;
listPools(): Array<{ type: string; name: string; metrics: PoolMetrics }>;
disposeAll(): Promise<void>;
}
// Generic types to avoid circular dependencies
export interface GenericClientConfig {
[key: string]: any;
}
export interface ConnectionPoolConfig {
name: string;
poolSize?: number;
minConnections?: number;
maxConnections?: number;
idleTimeoutMillis?: number;
connectionTimeoutMillis?: number;
enableMetrics?: boolean;
}
export interface MongoDBPoolConfig extends ConnectionPoolConfig {
config: GenericClientConfig;
}
export interface PostgreSQLPoolConfig extends ConnectionPoolConfig {
config: GenericClientConfig;
}
export interface CachePoolConfig extends ConnectionPoolConfig {
config: GenericClientConfig;
}
export interface QueuePoolConfig extends ConnectionPoolConfig {
config: GenericClientConfig;
}
export interface ConnectionFactoryConfig {
service: string;
environment: 'development' | 'production' | 'test';
pools?: {
mongodb?: Partial<MongoDBPoolConfig>;
postgres?: Partial<PostgreSQLPoolConfig>;
cache?: Partial<CachePoolConfig>;
queue?: Partial<QueuePoolConfig>;
};
}
export interface ConnectionPool<T> {
name: string;
client: T;
metrics: PoolMetrics;
health(): Promise<boolean>;
dispose(): Promise<void>;
}
export interface PoolMetrics {
created: Date;
totalConnections: number;
activeConnections: number;
idleConnections: number;
waitingRequests: number;
errors: number;
}
export interface ConnectionFactory {
createMongoDB(config: MongoDBPoolConfig): Promise<ConnectionPool<any>>;
createPostgreSQL(config: PostgreSQLPoolConfig): Promise<ConnectionPool<any>>;
createCache(config: CachePoolConfig): Promise<ConnectionPool<any>>;
createQueue(config: QueuePoolConfig): Promise<ConnectionPool<any>>;
getPool(
type: 'mongodb' | 'postgres' | 'cache' | 'queue',
name: string
): ConnectionPool<any> | undefined;
listPools(): Array<{ type: string; name: string; metrics: PoolMetrics }>;
disposeAll(): Promise<void>;
}