moved folders around
This commit is contained in:
parent
4f89affc2b
commit
36cb84b343
202 changed files with 1160 additions and 660 deletions
280
libs/core/di/connection-factory.ts
Normal file
280
libs/core/di/connection-factory.ts
Normal file
|
|
@ -0,0 +1,280 @@
|
|||
import { getLogger, type Logger } from '@stock-bot/logger';
|
||||
import { MongoDBClient, createMongoDBClient, type ConnectionEvents } from '@stock-bot/mongodb';
|
||||
import { PostgreSQLClient, createPostgreSQLClient } from '@stock-bot/postgres';
|
||||
import { createCache, type CacheProvider } from '@stock-bot/cache';
|
||||
import { QueueManager } from '@stock-bot/queue';
|
||||
import type {
|
||||
ConnectionFactory as IConnectionFactory,
|
||||
ConnectionPool,
|
||||
ConnectionFactoryConfig,
|
||||
MongoDBPoolConfig,
|
||||
PostgreSQLPoolConfig,
|
||||
CachePoolConfig,
|
||||
QueuePoolConfig,
|
||||
PoolMetrics,
|
||||
} from './types';
|
||||
|
||||
export class ConnectionFactory implements IConnectionFactory {
|
||||
private readonly logger: Logger;
|
||||
private readonly pools: Map<string, ConnectionPool<any>> = new Map();
|
||||
private readonly config: ConnectionFactoryConfig;
|
||||
|
||||
constructor(config: ConnectionFactoryConfig) {
|
||||
this.config = config;
|
||||
this.logger = getLogger(`connection-factory:${config.service}`);
|
||||
}
|
||||
|
||||
async createMongoDB(poolConfig: MongoDBPoolConfig): Promise<ConnectionPool<MongoDBClient>> {
|
||||
const key = `mongodb:${poolConfig.name}`;
|
||||
|
||||
if (this.pools.has(key)) {
|
||||
this.logger.debug('Reusing existing MongoDB pool', { name: poolConfig.name });
|
||||
return this.pools.get(key)!;
|
||||
}
|
||||
|
||||
this.logger.info('Creating MongoDB connection pool', {
|
||||
name: poolConfig.name,
|
||||
poolSize: poolConfig.poolSize,
|
||||
});
|
||||
|
||||
try {
|
||||
const events: ConnectionEvents = {
|
||||
onConnect: () => {
|
||||
this.logger.debug('MongoDB connected', { pool: poolConfig.name });
|
||||
},
|
||||
onDisconnect: () => {
|
||||
this.logger.debug('MongoDB disconnected', { pool: poolConfig.name });
|
||||
},
|
||||
onError: (error) => {
|
||||
this.logger.error('MongoDB error', { pool: poolConfig.name, error });
|
||||
},
|
||||
};
|
||||
|
||||
const client = createMongoDBClient({
|
||||
...poolConfig.config,
|
||||
poolSettings: {
|
||||
maxPoolSize: poolConfig.maxConnections || poolConfig.poolSize || 10,
|
||||
minPoolSize: poolConfig.minConnections || 2,
|
||||
maxIdleTime: 30000,
|
||||
}
|
||||
}, events);
|
||||
|
||||
await client.connect();
|
||||
|
||||
// Warm up the pool
|
||||
if (poolConfig.minConnections) {
|
||||
await client.warmupPool();
|
||||
}
|
||||
|
||||
const pool: ConnectionPool<MongoDBClient> = {
|
||||
name: poolConfig.name,
|
||||
client,
|
||||
metrics: client.getPoolMetrics(),
|
||||
health: async () => {
|
||||
try {
|
||||
await client.getDatabase().admin().ping();
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
},
|
||||
dispose: async () => {
|
||||
await client.disconnect();
|
||||
this.pools.delete(key);
|
||||
},
|
||||
};
|
||||
|
||||
this.pools.set(key, pool);
|
||||
return pool;
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to create MongoDB pool', { name: poolConfig.name, error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async createPostgreSQL(poolConfig: PostgreSQLPoolConfig): Promise<ConnectionPool<PostgreSQLClient>> {
|
||||
const key = `postgres:${poolConfig.name}`;
|
||||
|
||||
if (this.pools.has(key)) {
|
||||
this.logger.debug('Reusing existing PostgreSQL pool', { name: poolConfig.name });
|
||||
return this.pools.get(key)!;
|
||||
}
|
||||
|
||||
this.logger.info('Creating PostgreSQL connection pool', {
|
||||
name: poolConfig.name,
|
||||
poolSize: poolConfig.poolSize,
|
||||
});
|
||||
|
||||
try {
|
||||
const events: ConnectionEvents = {
|
||||
onConnect: () => {
|
||||
this.logger.debug('PostgreSQL connected', { pool: poolConfig.name });
|
||||
},
|
||||
onDisconnect: () => {
|
||||
this.logger.debug('PostgreSQL disconnected', { pool: poolConfig.name });
|
||||
},
|
||||
onError: (error) => {
|
||||
this.logger.error('PostgreSQL error', { pool: poolConfig.name, error });
|
||||
},
|
||||
};
|
||||
|
||||
const client = createPostgreSQLClient({
|
||||
...poolConfig.config,
|
||||
poolSettings: {
|
||||
max: poolConfig.maxConnections || poolConfig.poolSize || 10,
|
||||
min: poolConfig.minConnections || 2,
|
||||
idleTimeoutMillis: poolConfig.idleTimeoutMillis || 30000,
|
||||
},
|
||||
}, undefined, events);
|
||||
|
||||
await client.connect();
|
||||
|
||||
// Warm up the pool
|
||||
if (poolConfig.minConnections) {
|
||||
await client.warmupPool();
|
||||
}
|
||||
|
||||
const pool: ConnectionPool<PostgreSQLClient> = {
|
||||
name: poolConfig.name,
|
||||
client,
|
||||
metrics: client.getPoolMetrics(),
|
||||
health: async () => client.connected,
|
||||
dispose: async () => {
|
||||
await client.disconnect();
|
||||
this.pools.delete(key);
|
||||
},
|
||||
};
|
||||
|
||||
this.pools.set(key, pool);
|
||||
return pool;
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to create PostgreSQL pool', { name: poolConfig.name, error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
createCache(poolConfig: CachePoolConfig): ConnectionPool<CacheProvider> {
|
||||
const key = `cache:${poolConfig.name}`;
|
||||
|
||||
if (this.pools.has(key)) {
|
||||
this.logger.debug('Reusing existing cache pool', { name: poolConfig.name });
|
||||
return this.pools.get(key)!;
|
||||
}
|
||||
|
||||
this.logger.info('Creating cache connection pool', {
|
||||
name: poolConfig.name,
|
||||
});
|
||||
|
||||
try {
|
||||
const cache = createCache({
|
||||
...poolConfig.config,
|
||||
keyPrefix: `${this.config.service}:${poolConfig.name}:`,
|
||||
shared: false, // Each pool gets its own connection
|
||||
});
|
||||
|
||||
const pool: ConnectionPool<CacheProvider> = {
|
||||
name: poolConfig.name,
|
||||
client: cache,
|
||||
metrics: this.createInitialMetrics(),
|
||||
health: async () => cache.health(),
|
||||
dispose: async () => {
|
||||
// Cache disposal handled internally
|
||||
this.pools.delete(key);
|
||||
},
|
||||
};
|
||||
|
||||
this.pools.set(key, pool);
|
||||
return pool;
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to create cache pool', { name: poolConfig.name, error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
createQueue(poolConfig: QueuePoolConfig): ConnectionPool<QueueManager> {
|
||||
const key = `queue:${poolConfig.name}`;
|
||||
|
||||
if (this.pools.has(key)) {
|
||||
this.logger.debug('Reusing existing queue manager', { name: poolConfig.name });
|
||||
return this.pools.get(key)!;
|
||||
}
|
||||
|
||||
this.logger.info('Creating queue manager', {
|
||||
name: poolConfig.name,
|
||||
});
|
||||
|
||||
try {
|
||||
// Initialize or get existing QueueManager instance
|
||||
const queueManager = QueueManager.getOrInitialize(poolConfig.config);
|
||||
|
||||
const pool: ConnectionPool<QueueManager> = {
|
||||
name: poolConfig.name,
|
||||
client: queueManager,
|
||||
metrics: this.createInitialMetrics(),
|
||||
health: async () => {
|
||||
try {
|
||||
// Check if QueueManager is initialized
|
||||
queueManager.getQueueNames();
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
},
|
||||
dispose: async () => {
|
||||
// QueueManager handles its own shutdown
|
||||
await queueManager.shutdown();
|
||||
this.pools.delete(key);
|
||||
},
|
||||
};
|
||||
|
||||
this.pools.set(key, pool);
|
||||
return pool;
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to create queue manager', { name: poolConfig.name, error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
getPool(type: 'mongodb' | 'postgres' | 'cache' | 'queue', name: string): ConnectionPool<any> | undefined {
|
||||
const key = `${type}:${name}`;
|
||||
return this.pools.get(key);
|
||||
}
|
||||
|
||||
listPools(): Array<{ type: string; name: string; metrics: PoolMetrics }> {
|
||||
const result: Array<{ type: string; name: string; metrics: PoolMetrics }> = [];
|
||||
|
||||
for (const [key, pool] of this.pools.entries()) {
|
||||
const [type, ...nameParts] = key.split(':');
|
||||
result.push({
|
||||
type: type || 'unknown',
|
||||
name: nameParts.join(':'),
|
||||
metrics: pool.metrics,
|
||||
});
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
async disposeAll(): Promise<void> {
|
||||
this.logger.info('Disposing all connection pools', { count: this.pools.size });
|
||||
|
||||
const disposePromises: Promise<void>[] = [];
|
||||
for (const pool of this.pools.values()) {
|
||||
disposePromises.push(pool.dispose());
|
||||
}
|
||||
|
||||
await Promise.all(disposePromises);
|
||||
this.pools.clear();
|
||||
}
|
||||
|
||||
private createInitialMetrics(): PoolMetrics {
|
||||
return {
|
||||
created: new Date(),
|
||||
totalConnections: 0,
|
||||
activeConnections: 0,
|
||||
idleConnections: 0,
|
||||
waitingRequests: 0,
|
||||
errors: 0,
|
||||
};
|
||||
}
|
||||
}
|
||||
6
libs/core/di/index.ts
Normal file
6
libs/core/di/index.ts
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
// Export all dependency injection components
|
||||
export * from './service-container';
|
||||
export * from './connection-factory';
|
||||
export * from './operation-context';
|
||||
export * from './pool-size-calculator';
|
||||
export * from './types';
|
||||
307
libs/core/di/operation-context.ts
Normal file
307
libs/core/di/operation-context.ts
Normal file
|
|
@ -0,0 +1,307 @@
|
|||
/**
|
||||
* OperationContext - Unified context for handler operations
|
||||
*
|
||||
* Provides streamlined access to:
|
||||
* - Child loggers with hierarchical context
|
||||
* - Database clients (MongoDB, PostgreSQL)
|
||||
* - Contextual cache with automatic key prefixing
|
||||
* - Shared resource management
|
||||
*/
|
||||
|
||||
import { createCache, type CacheProvider } from '@stock-bot/cache';
|
||||
import { getLogger, type Logger } from '@stock-bot/logger';
|
||||
import { getDatabaseConfig } from '@stock-bot/config';
|
||||
import type { ServiceResolver } from './service-container';
|
||||
import type { MongoDBClient } from '@stock-bot/mongodb';
|
||||
import type { PostgreSQLClient } from '@stock-bot/postgres';
|
||||
|
||||
export interface OperationContextOptions {
|
||||
handlerName: string;
|
||||
operationName: string;
|
||||
parentLogger?: Logger;
|
||||
container?: ServiceResolver;
|
||||
}
|
||||
|
||||
export class OperationContext {
|
||||
public readonly logger: Logger;
|
||||
private readonly container?: ServiceResolver;
|
||||
private _mongodb?: MongoDBClient;
|
||||
private _postgres?: PostgreSQLClient;
|
||||
private _cache?: CacheProvider;
|
||||
private _queue?: any; // Type will be QueueManager but we avoid import for circular deps
|
||||
|
||||
private static sharedCache: CacheProvider | null = null;
|
||||
private static parentLoggers = new Map<string, Logger>();
|
||||
private static databaseConfig: any = null;
|
||||
|
||||
constructor(
|
||||
public readonly handlerName: string,
|
||||
public readonly operationName: string,
|
||||
parentLoggerOrOptions?: Logger | OperationContextOptions
|
||||
) {
|
||||
// Handle both old and new constructor signatures
|
||||
if (parentLoggerOrOptions && 'container' in parentLoggerOrOptions) {
|
||||
const options = parentLoggerOrOptions;
|
||||
this.container = options.container;
|
||||
const parent = options.parentLogger || this.getOrCreateParentLogger();
|
||||
this.logger = parent.child(operationName, {
|
||||
handler: handlerName,
|
||||
operation: operationName
|
||||
});
|
||||
} else {
|
||||
// Legacy support
|
||||
const parentLogger = parentLoggerOrOptions as Logger | undefined;
|
||||
const parent = parentLogger || this.getOrCreateParentLogger();
|
||||
this.logger = parent.child(operationName, {
|
||||
handler: handlerName,
|
||||
operation: operationName
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Lazy load MongoDB client
|
||||
get mongodb(): MongoDBClient {
|
||||
if (!this._mongodb) {
|
||||
if (this.container) {
|
||||
try {
|
||||
this._mongodb = this.container.resolve<MongoDBClient>('mongodb');
|
||||
} catch (error) {
|
||||
this.logger.warn('Failed to resolve MongoDB from container, falling back to singleton', { error });
|
||||
this._mongodb = this.getLegacyDatabaseClient('mongodb') as MongoDBClient;
|
||||
}
|
||||
} else {
|
||||
this._mongodb = this.getLegacyDatabaseClient('mongodb') as MongoDBClient;
|
||||
}
|
||||
}
|
||||
return this._mongodb!;
|
||||
}
|
||||
|
||||
// Lazy load PostgreSQL client
|
||||
get postgres(): PostgreSQLClient {
|
||||
if (!this._postgres) {
|
||||
if (this.container) {
|
||||
try {
|
||||
this._postgres = this.container.resolve<PostgreSQLClient>('postgres');
|
||||
} catch (error) {
|
||||
this.logger.warn('Failed to resolve PostgreSQL from container, falling back to singleton', { error });
|
||||
this._postgres = this.getLegacyDatabaseClient('postgres') as PostgreSQLClient;
|
||||
}
|
||||
} else {
|
||||
this._postgres = this.getLegacyDatabaseClient('postgres') as PostgreSQLClient;
|
||||
}
|
||||
}
|
||||
return this._postgres!;
|
||||
}
|
||||
|
||||
// Lazy load QueueManager
|
||||
get queue(): any {
|
||||
if (!this._queue) {
|
||||
if (this.container) {
|
||||
try {
|
||||
this._queue = this.container.resolve('queue');
|
||||
} catch (error) {
|
||||
this.logger.warn('Failed to resolve QueueManager from container, falling back to singleton', { error });
|
||||
this._queue = this.getLegacyQueueManager();
|
||||
}
|
||||
} else {
|
||||
this._queue = this.getLegacyQueueManager();
|
||||
}
|
||||
}
|
||||
return this._queue!;
|
||||
}
|
||||
|
||||
// Legacy method for QueueManager
|
||||
private getLegacyQueueManager(): any {
|
||||
try {
|
||||
// Dynamic import to avoid TypeScript issues during build
|
||||
const { QueueManager } = require('@stock-bot/queue');
|
||||
return QueueManager.getInstance();
|
||||
} catch (error) {
|
||||
this.logger.warn('QueueManager not initialized, queue operations may fail', { error });
|
||||
throw new Error('QueueManager not available');
|
||||
}
|
||||
}
|
||||
|
||||
// Legacy method for backward compatibility
|
||||
private getLegacyDatabaseClient(type: 'mongodb' | 'postgres'): any {
|
||||
try {
|
||||
if (type === 'mongodb') {
|
||||
// Dynamic import to avoid TypeScript issues during build
|
||||
const { getMongoDBClient } = require('@stock-bot/mongodb');
|
||||
return getMongoDBClient();
|
||||
} else {
|
||||
// Dynamic import to avoid TypeScript issues during build
|
||||
const { getPostgreSQLClient } = require('@stock-bot/postgres');
|
||||
return getPostgreSQLClient();
|
||||
}
|
||||
} catch (error) {
|
||||
this.logger.warn(`${type} client not initialized, operations may fail`, { error });
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private getOrCreateParentLogger(): Logger {
|
||||
const parentKey = `${this.handlerName}-handler`;
|
||||
|
||||
if (!OperationContext.parentLoggers.has(parentKey)) {
|
||||
const parentLogger = getLogger(parentKey);
|
||||
OperationContext.parentLoggers.set(parentKey, parentLogger);
|
||||
}
|
||||
|
||||
return OperationContext.parentLoggers.get(parentKey)!;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get contextual cache with automatic key prefixing
|
||||
* Keys are automatically prefixed as: "operations:handlerName:operationName:key"
|
||||
*/
|
||||
get cache(): CacheProvider {
|
||||
if (!this._cache) {
|
||||
if (this.container) {
|
||||
try {
|
||||
const baseCache = this.container.resolve<CacheProvider>('cache');
|
||||
this._cache = this.createContextualCache(baseCache);
|
||||
} catch (error) {
|
||||
this.logger.warn('Failed to resolve cache from container, using shared cache', { error });
|
||||
this._cache = this.getOrCreateSharedCache();
|
||||
}
|
||||
} else {
|
||||
this._cache = this.getOrCreateSharedCache();
|
||||
}
|
||||
}
|
||||
return this._cache!;
|
||||
}
|
||||
|
||||
private getOrCreateSharedCache(): CacheProvider {
|
||||
if (!OperationContext.sharedCache) {
|
||||
// Get Redis configuration from database config
|
||||
if (!OperationContext.databaseConfig) {
|
||||
OperationContext.databaseConfig = getDatabaseConfig();
|
||||
}
|
||||
|
||||
const redisConfig = OperationContext.databaseConfig.dragonfly || {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
db: 1
|
||||
};
|
||||
|
||||
OperationContext.sharedCache = createCache({
|
||||
keyPrefix: 'operations:',
|
||||
shared: true, // Use singleton Redis connection
|
||||
enableMetrics: true,
|
||||
ttl: 3600, // Default 1 hour TTL
|
||||
redisConfig
|
||||
});
|
||||
}
|
||||
return this.createContextualCache(OperationContext.sharedCache);
|
||||
}
|
||||
|
||||
private createContextualCache(baseCache: CacheProvider): CacheProvider {
|
||||
const contextPrefix = `${this.handlerName}:${this.operationName}:`;
|
||||
|
||||
// Return a proxy that automatically prefixes keys with context
|
||||
return {
|
||||
async get<T>(key: string): Promise<T | null> {
|
||||
return baseCache.get(`${contextPrefix}${key}`);
|
||||
},
|
||||
|
||||
async set<T>(key: string, value: T, options?: any): Promise<T | null> {
|
||||
return baseCache.set(`${contextPrefix}${key}`, value, options);
|
||||
},
|
||||
|
||||
async del(key: string): Promise<void> {
|
||||
return baseCache.del(`${contextPrefix}${key}`);
|
||||
},
|
||||
|
||||
async exists(key: string): Promise<boolean> {
|
||||
return baseCache.exists(`${contextPrefix}${key}`);
|
||||
},
|
||||
|
||||
async clear(): Promise<void> {
|
||||
// Not implemented for contextual cache - use del() for specific keys
|
||||
throw new Error('clear() not implemented for contextual cache - use del() for specific keys');
|
||||
},
|
||||
|
||||
async keys(pattern: string): Promise<string[]> {
|
||||
const fullPattern = `${contextPrefix}${pattern}`;
|
||||
return baseCache.keys(fullPattern);
|
||||
},
|
||||
|
||||
getStats() {
|
||||
return baseCache.getStats();
|
||||
},
|
||||
|
||||
async health(): Promise<boolean> {
|
||||
return baseCache.health();
|
||||
},
|
||||
|
||||
async waitForReady(timeout?: number): Promise<void> {
|
||||
return baseCache.waitForReady(timeout);
|
||||
},
|
||||
|
||||
isReady(): boolean {
|
||||
return baseCache.isReady();
|
||||
}
|
||||
} as CacheProvider;
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory method to create OperationContext
|
||||
*/
|
||||
static create(handlerName: string, operationName: string, parentLoggerOrOptions?: Logger | OperationContextOptions): OperationContext {
|
||||
if (parentLoggerOrOptions && 'container' in parentLoggerOrOptions) {
|
||||
return new OperationContext(handlerName, operationName, {
|
||||
...parentLoggerOrOptions,
|
||||
handlerName,
|
||||
operationName
|
||||
});
|
||||
}
|
||||
return new OperationContext(handlerName, operationName, parentLoggerOrOptions as Logger | undefined);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache key prefix for this operation context
|
||||
*/
|
||||
getCacheKeyPrefix(): string {
|
||||
return `operations:${this.handlerName}:${this.operationName}:`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a child context for sub-operations
|
||||
*/
|
||||
createChild(subOperationName: string): OperationContext {
|
||||
if (this.container) {
|
||||
return new OperationContext(
|
||||
this.handlerName,
|
||||
`${this.operationName}:${subOperationName}`,
|
||||
{
|
||||
handlerName: this.handlerName,
|
||||
operationName: `${this.operationName}:${subOperationName}`,
|
||||
parentLogger: this.logger,
|
||||
container: this.container
|
||||
}
|
||||
);
|
||||
}
|
||||
return new OperationContext(
|
||||
this.handlerName,
|
||||
`${this.operationName}:${subOperationName}`,
|
||||
this.logger
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Dispose of resources if using container-based connections
|
||||
* This is a no-op for legacy singleton connections
|
||||
*/
|
||||
async dispose(): Promise<void> {
|
||||
// If using container, it will handle cleanup
|
||||
// For singleton connections, they persist
|
||||
this.logger.debug('OperationContext disposed', {
|
||||
handler: this.handlerName,
|
||||
operation: this.operationName,
|
||||
hasContainer: !!this.container
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export default OperationContext;
|
||||
23
libs/core/di/package.json
Normal file
23
libs/core/di/package.json
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"name": "@stock-bot/di",
|
||||
"version": "1.0.0",
|
||||
"main": "./src/index.ts",
|
||||
"types": "./src/index.ts",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"clean": "rm -rf dist"
|
||||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/config": "workspace:*",
|
||||
"@stock-bot/logger": "workspace:*",
|
||||
"@stock-bot/mongodb": "workspace:*",
|
||||
"@stock-bot/postgres": "workspace:*",
|
||||
"@stock-bot/cache": "workspace:*",
|
||||
"@stock-bot/queue": "workspace:*",
|
||||
"mongodb": "^6.3.0",
|
||||
"pg": "^8.11.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/pg": "^8.10.7"
|
||||
}
|
||||
}
|
||||
80
libs/core/di/pool-size-calculator.ts
Normal file
80
libs/core/di/pool-size-calculator.ts
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
import type { ConnectionPoolConfig } from './types';
|
||||
|
||||
export interface PoolSizeRecommendation {
|
||||
min: number;
|
||||
max: number;
|
||||
idle: number;
|
||||
}
|
||||
|
||||
export class PoolSizeCalculator {
|
||||
private static readonly DEFAULT_SIZES: Record<string, PoolSizeRecommendation> = {
|
||||
// Service-level defaults
|
||||
'data-ingestion': { min: 5, max: 50, idle: 10 },
|
||||
'data-pipeline': { min: 3, max: 30, idle: 5 },
|
||||
'processing-service': { min: 2, max: 20, idle: 3 },
|
||||
'web-api': { min: 2, max: 10, idle: 2 },
|
||||
'portfolio-service': { min: 2, max: 15, idle: 3 },
|
||||
'strategy-service': { min: 3, max: 25, idle: 5 },
|
||||
'execution-service': { min: 2, max: 10, idle: 2 },
|
||||
|
||||
// Handler-level defaults
|
||||
'batch-import': { min: 10, max: 100, idle: 20 },
|
||||
'real-time': { min: 2, max: 10, idle: 3 },
|
||||
'analytics': { min: 5, max: 30, idle: 10 },
|
||||
'reporting': { min: 3, max: 20, idle: 5 },
|
||||
};
|
||||
|
||||
static calculate(
|
||||
serviceName: string,
|
||||
handlerName?: string,
|
||||
customConfig?: Partial<ConnectionPoolConfig>
|
||||
): PoolSizeRecommendation {
|
||||
// Check for custom configuration first
|
||||
if (customConfig?.minConnections && customConfig?.maxConnections) {
|
||||
return {
|
||||
min: customConfig.minConnections,
|
||||
max: customConfig.maxConnections,
|
||||
idle: Math.floor((customConfig.minConnections + customConfig.maxConnections) / 4),
|
||||
};
|
||||
}
|
||||
|
||||
// Try handler-specific sizes first, then service-level
|
||||
const key = handlerName || serviceName;
|
||||
const recommendation = this.DEFAULT_SIZES[key] || this.DEFAULT_SIZES[serviceName];
|
||||
|
||||
if (recommendation) {
|
||||
return { ...recommendation };
|
||||
}
|
||||
|
||||
// Fall back to generic defaults
|
||||
return {
|
||||
min: 2,
|
||||
max: 10,
|
||||
idle: 3,
|
||||
};
|
||||
}
|
||||
|
||||
static getOptimalPoolSize(
|
||||
expectedConcurrency: number,
|
||||
averageQueryTimeMs: number,
|
||||
targetLatencyMs: number
|
||||
): number {
|
||||
// Little's Law: L = λ * W
|
||||
// L = number of connections needed
|
||||
// λ = arrival rate (requests per second)
|
||||
// W = average time in system (seconds)
|
||||
|
||||
const requestsPerSecond = expectedConcurrency;
|
||||
const averageTimeInSystem = averageQueryTimeMs / 1000;
|
||||
|
||||
const minConnections = Math.ceil(requestsPerSecond * averageTimeInSystem);
|
||||
|
||||
// Add buffer for burst traffic (20% overhead)
|
||||
const recommendedSize = Math.ceil(minConnections * 1.2);
|
||||
|
||||
// Ensure we meet target latency
|
||||
const latencyBasedSize = Math.ceil(expectedConcurrency * (averageQueryTimeMs / targetLatencyMs));
|
||||
|
||||
return Math.max(recommendedSize, latencyBasedSize, 2); // Minimum 2 connections
|
||||
}
|
||||
}
|
||||
215
libs/core/di/service-container.ts
Normal file
215
libs/core/di/service-container.ts
Normal file
|
|
@ -0,0 +1,215 @@
|
|||
import { getLogger, type Logger } from '@stock-bot/logger';
|
||||
import type { ConnectionFactory } from './connection-factory';
|
||||
|
||||
export interface ServiceRegistration<T = any> {
|
||||
name: string;
|
||||
factory: () => T | Promise<T>;
|
||||
singleton?: boolean;
|
||||
dispose?: (instance: T) => Promise<void>;
|
||||
}
|
||||
|
||||
export interface ServiceResolver {
|
||||
resolve<T>(name: string, options?: any): T;
|
||||
resolveAsync<T>(name: string, options?: any): Promise<T>;
|
||||
}
|
||||
|
||||
export class ServiceContainer implements ServiceResolver {
|
||||
private readonly logger: Logger;
|
||||
private readonly registrations = new Map<string, ServiceRegistration>();
|
||||
private readonly instances = new Map<string, any>();
|
||||
private readonly scopedInstances = new Map<string, any>();
|
||||
private readonly parent?: ServiceContainer;
|
||||
|
||||
constructor(name: string, parent?: ServiceContainer) {
|
||||
this.logger = getLogger(`service-container:${name}`);
|
||||
this.parent = parent;
|
||||
}
|
||||
|
||||
register<T>(registration: ServiceRegistration<T>): void {
|
||||
this.registrations.set(registration.name, registration);
|
||||
this.logger.debug('Service registered', { name: registration.name, singleton: registration.singleton });
|
||||
}
|
||||
|
||||
resolve<T>(name: string, options?: any): T {
|
||||
const instance = this.resolveAsync<T>(name, options);
|
||||
if (instance instanceof Promise) {
|
||||
throw new Error(`Service ${name} is async. Use resolveAsync() instead.`);
|
||||
}
|
||||
return instance as T;
|
||||
}
|
||||
|
||||
async resolveAsync<T>(name: string, _options?: any): Promise<T> {
|
||||
// Check scoped instances first
|
||||
if (this.scopedInstances.has(name)) {
|
||||
return this.scopedInstances.get(name);
|
||||
}
|
||||
|
||||
// Check singleton instances
|
||||
if (this.instances.has(name)) {
|
||||
return this.instances.get(name);
|
||||
}
|
||||
|
||||
// Get registration from this container or parent
|
||||
const registration = this.getRegistration(name);
|
||||
if (!registration) {
|
||||
throw new Error(`Service ${name} not registered`);
|
||||
}
|
||||
|
||||
// Create instance
|
||||
const instance = await Promise.resolve(registration.factory());
|
||||
|
||||
// Store based on singleton flag
|
||||
if (registration.singleton) {
|
||||
this.instances.set(name, instance);
|
||||
} else {
|
||||
this.scopedInstances.set(name, instance);
|
||||
}
|
||||
|
||||
return instance as T;
|
||||
}
|
||||
|
||||
createScope(): ServiceContainer {
|
||||
return new ServiceContainer('scoped', this);
|
||||
}
|
||||
|
||||
async dispose(): Promise<void> {
|
||||
// Dispose scoped instances
|
||||
for (const [name, instance] of this.scopedInstances.entries()) {
|
||||
const registration = this.getRegistration(name);
|
||||
if (registration?.dispose) {
|
||||
await registration.dispose(instance);
|
||||
}
|
||||
}
|
||||
this.scopedInstances.clear();
|
||||
|
||||
// Only dispose singletons if this is the root container
|
||||
if (!this.parent) {
|
||||
for (const [name, instance] of this.instances.entries()) {
|
||||
const registration = this.registrations.get(name);
|
||||
if (registration?.dispose) {
|
||||
await registration.dispose(instance);
|
||||
}
|
||||
}
|
||||
this.instances.clear();
|
||||
}
|
||||
}
|
||||
|
||||
private getRegistration(name: string): ServiceRegistration | undefined {
|
||||
return this.registrations.get(name) || this.parent?.getRegistration(name);
|
||||
}
|
||||
}
|
||||
|
||||
// Enhanced service container factory with infrastructure services
|
||||
export function createServiceContainer(
|
||||
serviceName: string,
|
||||
connectionFactory: ConnectionFactory,
|
||||
config?: any
|
||||
): ServiceContainer {
|
||||
const container = new ServiceContainer(serviceName);
|
||||
|
||||
// Register configuration if provided
|
||||
if (config) {
|
||||
container.register({
|
||||
name: 'config',
|
||||
factory: () => config,
|
||||
singleton: true,
|
||||
});
|
||||
}
|
||||
|
||||
// Register connection factories
|
||||
container.register({
|
||||
name: 'mongodb',
|
||||
factory: async () => {
|
||||
const pool = await connectionFactory.createMongoDB({
|
||||
name: 'default',
|
||||
config: {} as any, // Config injected by factory
|
||||
});
|
||||
return pool.client;
|
||||
},
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
container.register({
|
||||
name: 'postgres',
|
||||
factory: async () => {
|
||||
const pool = await connectionFactory.createPostgreSQL({
|
||||
name: 'default',
|
||||
config: {} as any, // Config injected by factory
|
||||
});
|
||||
return pool.client;
|
||||
},
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
container.register({
|
||||
name: 'cache',
|
||||
factory: () => {
|
||||
const pool = connectionFactory.createCache({
|
||||
name: 'default',
|
||||
config: {} as any, // Config injected by factory
|
||||
});
|
||||
return pool.client;
|
||||
},
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
container.register({
|
||||
name: 'queue',
|
||||
factory: () => {
|
||||
const pool = connectionFactory.createQueue({
|
||||
name: 'default',
|
||||
config: {} as any, // Config injected by factory
|
||||
});
|
||||
return pool.client;
|
||||
},
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
// Register ProxyManager
|
||||
container.register({
|
||||
name: 'proxyManager',
|
||||
factory: async () => {
|
||||
const { ProxyManager } = await import('@stock-bot/utils');
|
||||
await ProxyManager.initialize();
|
||||
return ProxyManager.getInstance();
|
||||
},
|
||||
singleton: true,
|
||||
dispose: async (proxyManager) => {
|
||||
// ProxyManager handles its own cleanup
|
||||
if (proxyManager && typeof proxyManager.shutdown === 'function') {
|
||||
await proxyManager.shutdown();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Register Browser service
|
||||
container.register({
|
||||
name: 'browser',
|
||||
factory: async () => {
|
||||
const { Browser } = await import('@stock-bot/browser');
|
||||
return Browser;
|
||||
},
|
||||
singleton: true,
|
||||
dispose: async (browser) => {
|
||||
if (browser && typeof browser.close === 'function') {
|
||||
await browser.close();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Register HttpClient with default configuration
|
||||
container.register({
|
||||
name: 'httpClient',
|
||||
factory: async () => {
|
||||
const { createHttpClient } = await import('@stock-bot/http');
|
||||
return createHttpClient({
|
||||
timeout: 30000,
|
||||
retries: 3,
|
||||
userAgent: 'stock-bot/1.0',
|
||||
});
|
||||
},
|
||||
singleton: true,
|
||||
});
|
||||
|
||||
return container;
|
||||
}
|
||||
21
libs/core/di/tsconfig.json
Normal file
21
libs/core/di/tsconfig.json
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"rootDir": "./",
|
||||
"outDir": "./dist",
|
||||
"composite": true,
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"types": ["node", "bun-types"]
|
||||
},
|
||||
"include": ["./**/*.ts"],
|
||||
"exclude": ["node_modules", "dist"],
|
||||
"references": [
|
||||
{ "path": "../config" },
|
||||
{ "path": "../logger" },
|
||||
{ "path": "../../data/mongodb" },
|
||||
{ "path": "../../data/postgres" },
|
||||
{ "path": "../../data/cache" },
|
||||
{ "path": "../../services/queue" }
|
||||
]
|
||||
}
|
||||
68
libs/core/di/types.ts
Normal file
68
libs/core/di/types.ts
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
import type { MongoDBClientConfig } from '@stock-bot/mongodb-client';
|
||||
import type { PostgreSQLClientConfig } from '@stock-bot/postgres-client';
|
||||
import type { CacheOptions } from '@stock-bot/cache';
|
||||
import type { QueueManagerConfig } from '@stock-bot/queue';
|
||||
|
||||
export interface ConnectionPoolConfig {
|
||||
name: string;
|
||||
poolSize?: number;
|
||||
minConnections?: number;
|
||||
maxConnections?: number;
|
||||
idleTimeoutMillis?: number;
|
||||
connectionTimeoutMillis?: number;
|
||||
enableMetrics?: boolean;
|
||||
}
|
||||
|
||||
export interface MongoDBPoolConfig extends ConnectionPoolConfig {
|
||||
config: MongoDBClientConfig;
|
||||
}
|
||||
|
||||
export interface PostgreSQLPoolConfig extends ConnectionPoolConfig {
|
||||
config: PostgreSQLClientConfig;
|
||||
}
|
||||
|
||||
export interface CachePoolConfig extends ConnectionPoolConfig {
|
||||
config: CacheOptions;
|
||||
}
|
||||
|
||||
export interface QueuePoolConfig extends ConnectionPoolConfig {
|
||||
config: QueueManagerConfig;
|
||||
}
|
||||
|
||||
export interface ConnectionFactoryConfig {
|
||||
service: string;
|
||||
environment: 'development' | 'production' | 'test';
|
||||
pools?: {
|
||||
mongodb?: Partial<MongoDBPoolConfig>;
|
||||
postgres?: Partial<PostgreSQLPoolConfig>;
|
||||
cache?: Partial<CachePoolConfig>;
|
||||
queue?: Partial<QueuePoolConfig>;
|
||||
};
|
||||
}
|
||||
|
||||
export interface ConnectionPool<T> {
|
||||
name: string;
|
||||
client: T;
|
||||
metrics: PoolMetrics;
|
||||
health(): Promise<boolean>;
|
||||
dispose(): Promise<void>;
|
||||
}
|
||||
|
||||
export interface PoolMetrics {
|
||||
created: Date;
|
||||
totalConnections: number;
|
||||
activeConnections: number;
|
||||
idleConnections: number;
|
||||
waitingRequests: number;
|
||||
errors: number;
|
||||
}
|
||||
|
||||
export interface ConnectionFactory {
|
||||
createMongoDB(config: MongoDBPoolConfig): Promise<ConnectionPool<any>>;
|
||||
createPostgreSQL(config: PostgreSQLPoolConfig): Promise<ConnectionPool<any>>;
|
||||
createCache(config: CachePoolConfig): ConnectionPool<any>;
|
||||
createQueue(config: QueuePoolConfig): ConnectionPool<any>;
|
||||
getPool(type: 'mongodb' | 'postgres' | 'cache' | 'queue', name: string): ConnectionPool<any> | undefined;
|
||||
listPools(): Array<{ type: string; name: string; metrics: PoolMetrics }>;
|
||||
disposeAll(): Promise<void>;
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue