updated di

This commit is contained in:
Boki 2025-06-21 20:07:43 -04:00
parent 3227388d25
commit c5a114d544
9 changed files with 545 additions and 306 deletions

View file

@ -3,4 +3,6 @@ export * from './service-container';
export { ConnectionFactory } from './connection-factory';
export * from './operation-context';
export * from './pool-size-calculator';
export * from './types';
export * from './types';
export * from './service-interfaces';
export * from './service-factory';

View file

@ -0,0 +1,225 @@
/**
* Service Factory for creating and managing all application dependencies
*/
import { getLogger } from '@stock-bot/logger';
import { ConnectionFactory } from './connection-factory';
import { PoolSizeCalculator } from './pool-size-calculator';
import type {
IDataIngestionServices,
IServiceFactory,
IConnectionFactory,
IMongoDBClient,
IPostgreSQLClient
} from './service-interfaces';
import type { CacheProvider } from '@stock-bot/cache';
import type { QueueManager } from '@stock-bot/queue';
export class DataIngestionServiceFactory implements IServiceFactory {
/**
* Create all services with proper dependency injection
*/
async create(config: any): Promise<IDataIngestionServices> {
const logger = getLogger('data-ingestion-factory');
logger.info('Creating data ingestion services...');
// Create connection factory
const connectionFactory = new ConnectionFactory({
service: 'data-ingestion',
environment: config.environment || 'development',
pools: {
mongodb: { poolSize: 50 },
postgres: { poolSize: 30 },
cache: { poolSize: 20 },
queue: { poolSize: 1 }
}
}) as IConnectionFactory;
try {
// Create all database connections in parallel
const [mongoPool, postgresPool, cachePool, queuePool] = await Promise.all([
this.createMongoDBConnection(connectionFactory, config),
this.createPostgreSQLConnection(connectionFactory, config),
this.createCacheConnection(connectionFactory, config),
this.createQueueConnection(connectionFactory, config)
]);
const services: IDataIngestionServices = {
mongodb: mongoPool.client,
postgres: postgresPool.client,
cache: cachePool.client,
queue: queuePool.client,
logger,
connectionFactory
};
logger.info('All data ingestion services created successfully');
return services;
} catch (error) {
logger.error('Failed to create services', { error });
// Cleanup any partial connections
await connectionFactory.disposeAll().catch(cleanupError => {
logger.error('Error during cleanup', { error: cleanupError });
});
throw error;
}
}
/**
* Dispose all services and connections
*/
async dispose(services: IDataIngestionServices): Promise<void> {
const logger = services.logger;
logger.info('Disposing data ingestion services...');
try {
// Dispose connection factory (this will close all pools)
await services.connectionFactory.disposeAll();
logger.info('All services disposed successfully');
} catch (error) {
logger.error('Error disposing services', { error });
throw error;
}
}
/**
* Create MongoDB connection with optimized settings
*/
private async createMongoDBConnection(
connectionFactory: IConnectionFactory,
config: any
): Promise<{ client: IMongoDBClient }> {
const poolSize = PoolSizeCalculator.calculate('data-ingestion', 'batch-import');
return connectionFactory.createMongoDB({
name: 'data-ingestion',
config: {
uri: config.database.mongodb.uri,
database: config.database.mongodb.database,
host: config.database.mongodb.host,
port: config.database.mongodb.port,
username: config.database.mongodb.user,
password: config.database.mongodb.password,
authSource: config.database.mongodb.authSource,
poolSettings: {
maxPoolSize: poolSize.max,
minPoolSize: poolSize.min,
maxIdleTime: 30000,
}
},
maxConnections: poolSize.max,
minConnections: poolSize.min,
});
}
/**
* Create PostgreSQL connection with optimized settings
*/
private async createPostgreSQLConnection(
connectionFactory: IConnectionFactory,
config: any
): Promise<{ client: IPostgreSQLClient }> {
const poolSize = PoolSizeCalculator.calculate('data-ingestion');
return connectionFactory.createPostgreSQL({
name: 'data-ingestion',
config: {
host: config.database.postgres.host,
port: config.database.postgres.port,
database: config.database.postgres.database,
username: config.database.postgres.user,
password: config.database.postgres.password,
poolSettings: {
max: poolSize.max,
min: poolSize.min,
idleTimeoutMillis: 30000,
}
},
maxConnections: poolSize.max,
minConnections: poolSize.min,
});
}
/**
* Create cache connection
*/
private async createCacheConnection(
connectionFactory: IConnectionFactory,
config: any
): Promise<{ client: CacheProvider }> {
return connectionFactory.createCache({
name: 'data-ingestion',
config: {
host: config.database.dragonfly.host,
port: config.database.dragonfly.port,
db: config.database.dragonfly.db,
}
});
}
/**
* Create queue connection
*/
private async createQueueConnection(
connectionFactory: IConnectionFactory,
config: any
): Promise<{ client: QueueManager }> {
return connectionFactory.createQueue({
name: 'data-ingestion',
config: {
host: config.database.dragonfly.host,
port: config.database.dragonfly.port,
db: config.database.dragonfly.db || 1,
}
});
}
/**
* Enable dynamic pool sizing for production workloads
*/
async enableDynamicPoolSizing(services: IDataIngestionServices): Promise<void> {
const dynamicConfig = {
enabled: true,
minSize: 5,
maxSize: 100,
scaleUpThreshold: 70,
scaleDownThreshold: 30,
scaleUpIncrement: 10,
scaleDownIncrement: 5,
evaluationInterval: 30000,
};
try {
// Set dynamic config for MongoDB
if (services.mongodb && typeof services.mongodb.setDynamicPoolConfig === 'function') {
services.mongodb.setDynamicPoolConfig(dynamicConfig);
services.logger.info('Dynamic pool sizing enabled for MongoDB');
}
// Set dynamic config for PostgreSQL
if (services.postgres && typeof services.postgres.setDynamicPoolConfig === 'function') {
services.postgres.setDynamicPoolConfig(dynamicConfig);
services.logger.info('Dynamic pool sizing enabled for PostgreSQL');
}
} catch (error) {
services.logger.warn('Failed to enable dynamic pool sizing', { error });
}
}
}
/**
* Convenience function to create services
*/
export async function createDataIngestionServices(config: any): Promise<IDataIngestionServices> {
const factory = new DataIngestionServiceFactory();
return factory.create(config);
}
/**
* Convenience function to dispose services
*/
export async function disposeDataIngestionServices(services: IDataIngestionServices): Promise<void> {
const factory = new DataIngestionServiceFactory();
return factory.dispose(services);
}

View file

@ -0,0 +1,79 @@
/**
* Service interfaces for type-safe dependency injection
*/
import type { Logger } from '@stock-bot/logger';
import type { CacheProvider } from '@stock-bot/cache';
import type { QueueManager } from '@stock-bot/queue';
// Core database client interfaces
export interface IMongoDBClient {
collection(name: string): any;
getDatabase(): any;
connect(): Promise<void>;
disconnect(): Promise<void>;
getPoolMetrics(): any;
warmupPool?(): Promise<void>;
setDynamicPoolConfig?(config: any): void;
}
export interface IPostgreSQLClient {
query(sql: string, params?: any[]): Promise<any>;
connect(): Promise<void>;
disconnect(): Promise<void>;
getPoolMetrics(): any;
warmupPool?(): Promise<void>;
setDynamicPoolConfig?(config: any): void;
connected: boolean;
}
export interface IConnectionFactory {
createMongoDB(config: any): Promise<{ client: IMongoDBClient; [key: string]: any }>;
createPostgreSQL(config: any): Promise<{ client: IPostgreSQLClient; [key: string]: any }>;
createCache(config: any): Promise<{ client: CacheProvider; [key: string]: any }>;
createQueue(config: any): Promise<{ client: QueueManager; [key: string]: any }>;
disposeAll(): Promise<void>;
getPool(type: string, name: string): any;
listPools(): any[];
}
// Main service interface for data ingestion
export interface IDataIngestionServices {
readonly mongodb: IMongoDBClient;
readonly postgres: IPostgreSQLClient;
readonly cache: CacheProvider;
readonly queue: QueueManager;
readonly logger: Logger;
readonly connectionFactory: IConnectionFactory;
}
// Operation context interface (simplified)
export interface IOperationContext {
readonly logger: Logger;
readonly traceId: string;
readonly metadata: Record<string, any>;
readonly services: IDataIngestionServices;
}
// Handler execution context
export interface IExecutionContext {
readonly type: 'http' | 'queue' | 'scheduled';
readonly services: IDataIngestionServices;
readonly metadata: Record<string, any>;
readonly traceId?: string;
}
// Service factory interface
export interface IServiceFactory {
create(config: any): Promise<IDataIngestionServices>;
dispose(services: IDataIngestionServices): Promise<void>;
}
// For backwards compatibility during migration
export interface LegacyServiceContainer {
resolve<T>(name: string): T;
resolveAsync<T>(name: string): Promise<T>;
register(registration: any): void;
createScope(): any;
dispose(): Promise<void>;
}

View file

@ -1,18 +1,24 @@
import { getLogger } from '@stock-bot/logger';
import type { ServiceContainer } from '@stock-bot/di';
import type { IDataIngestionServices, IExecutionContext } from '@stock-bot/di';
import type { IHandler, ExecutionContext } from '../types/types';
/**
* Abstract base class for all handlers
* Abstract base class for all handlers with improved DI
* Provides common functionality and structure for queue/event operations
*/
export abstract class BaseHandler implements IHandler {
protected readonly logger;
constructor(protected readonly container: ServiceContainer) {
constructor(protected readonly services: IDataIngestionServices) {
this.logger = getLogger(this.constructor.name);
}
// Convenience getters for common services
protected get mongodb() { return this.services.mongodb; }
protected get postgres() { return this.services.postgres; }
protected get cache() { return this.services.cache; }
protected get queue() { return this.services.queue; }
/**
* Main execution method - must be implemented by subclasses
* Works with queue (events commented for future)
@ -20,18 +26,28 @@ export abstract class BaseHandler implements IHandler {
abstract execute(operation: string, input: unknown, context: ExecutionContext): Promise<unknown>;
/**
* Queue helper methods
* Queue helper methods - now type-safe and direct
*/
protected async scheduleOperation(operation: string, payload: unknown, delay?: number): Promise<void> {
const queue = await this.container.resolveAsync('queue') as any;
await queue.add(operation, payload, { delay });
const queue = this.services.queue.getQueue(this.constructor.name.toLowerCase());
const jobData = {
handler: this.constructor.name.toLowerCase(),
operation,
payload
};
await queue.add(operation, jobData, { delay });
}
/**
* Get a service from the container
* Create execution context for operations
*/
protected async getService<T>(serviceName: string): Promise<T> {
return await this.container.resolveAsync(serviceName);
protected createExecutionContext(type: 'http' | 'queue' | 'scheduled', metadata: Record<string, any> = {}): IExecutionContext {
return {
type,
services: this.services,
metadata,
traceId: `${this.constructor.name}-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`
};
}
/**