added new di with connection pool and rebuild of the database/cache services

This commit is contained in:
Boki 2025-06-21 14:54:51 -04:00
parent be6afef832
commit 09d907a10c
26 changed files with 4844 additions and 205 deletions

View file

@ -0,0 +1,137 @@
import { getDatabaseConfig } from '@stock-bot/config';
import { getLogger } from '@stock-bot/logger';
import {
ConnectionFactory,
ServiceContainer,
createServiceContainer,
PoolSizeCalculator
} from '@stock-bot/connection-factory';
import type { ConnectionFactoryConfig } from '@stock-bot/connection-factory';
const logger = getLogger('database-setup');
/**
* Creates a connection factory configured for the data-ingestion service
*/
export function createConnectionFactory(): ConnectionFactory {
const dbConfig = getDatabaseConfig();
const factoryConfig: ConnectionFactoryConfig = {
service: 'data-ingestion',
environment: process.env.NODE_ENV as 'development' | 'production' | 'test' || 'development',
pools: {
mongodb: {
poolSize: 50, // Higher for batch imports
},
postgres: {
poolSize: 30,
},
cache: {
poolSize: 20,
}
}
};
return new ConnectionFactory(factoryConfig);
}
/**
* Sets up the service container with all dependencies
*/
export async function setupServiceContainer(): Promise<ServiceContainer> {
logger.info('Setting up service container for data-ingestion');
const connectionFactory = createConnectionFactory();
const dbConfig = getDatabaseConfig();
// Create base container
const container = new ServiceContainer('data-ingestion');
// Register MongoDB with dynamic pool sizing
container.register({
name: 'mongodb',
factory: async () => {
const poolSize = PoolSizeCalculator.calculate('data-ingestion', 'batch-import');
const pool = await connectionFactory.createMongoDB({
name: 'default',
config: {
connectionString: dbConfig.mongodb.uri,
database: dbConfig.mongodb.database,
maxPoolSize: poolSize.max,
minPoolSize: poolSize.min,
},
maxConnections: poolSize.max,
minConnections: poolSize.min,
});
return pool.client;
},
singleton: true,
dispose: async (client) => {
await client.disconnect();
}
});
// Register PostgreSQL
container.register({
name: 'postgres',
factory: async () => {
const poolSize = PoolSizeCalculator.calculate('data-ingestion');
const pool = await connectionFactory.createPostgreSQL({
name: 'default',
config: {
host: dbConfig.postgresql.host,
port: dbConfig.postgresql.port,
database: dbConfig.postgresql.database,
user: dbConfig.postgresql.user,
password: dbConfig.postgresql.password,
pool: {
max: poolSize.max,
min: poolSize.min,
}
},
maxConnections: poolSize.max,
minConnections: poolSize.min,
});
return pool.client;
},
singleton: true,
dispose: async (client) => {
await client.disconnect();
}
});
// Register Cache
container.register({
name: 'cache',
factory: () => {
const pool = connectionFactory.createCache({
name: 'default',
config: {
redisConfig: {
host: dbConfig.dragonfly.host,
port: dbConfig.dragonfly.port,
db: dbConfig.dragonfly.db,
},
keyPrefix: 'data-ingestion:',
ttl: 3600,
enableMetrics: true,
}
});
return pool.client;
},
singleton: true,
});
// Register the connection factory itself for pool management
container.register({
name: 'connectionFactory',
factory: () => connectionFactory,
singleton: true,
dispose: async (factory) => {
await factory.disposeAll();
}
});
logger.info('Service container setup complete');
return container;
}