added new di with connection pool and rebuild of the database/cache services

This commit is contained in:
Boki 2025-06-21 14:54:51 -04:00
parent be6afef832
commit 09d907a10c
26 changed files with 4844 additions and 205 deletions

View file

@ -0,0 +1,100 @@
import { OperationContext } from '@stock-bot/utils';
import type { ServiceContainer } from '@stock-bot/connection-factory';
/**
* Example handler showing how to use the new connection pooling pattern
*/
export class ExampleHandler {
constructor(private readonly container: ServiceContainer) {}
/**
* Example operation using the enhanced OperationContext
*/
async performOperation(data: any): Promise<void> {
// Create operation context with container
const context = OperationContext.create('example', 'perform-operation', {
container: this.container
});
try {
// Log operation start
context.logger.info('Starting operation', { data });
// Use MongoDB through context (no more singleton!)
const result = await context.mongodb.collection('test').insertOne(data);
context.logger.debug('MongoDB insert complete', { insertedId: result.insertedId });
// Use PostgreSQL through context
await context.postgres.query(
'INSERT INTO operations (id, status) VALUES ($1, $2)',
[result.insertedId, 'completed']
);
// Use cache through context
await context.cache.set(`operation:${result.insertedId}`, {
status: 'completed',
timestamp: new Date()
});
context.logger.info('Operation completed successfully');
} catch (error) {
context.logger.error('Operation failed', { error });
throw error;
} finally {
// Clean up resources
await context.dispose();
}
}
/**
* Example of batch operation with isolated connection pool
*/
async performBatchOperation(items: any[]): Promise<void> {
// Create a scoped container for this batch operation
const scopedContainer = this.container.createScope();
const context = OperationContext.create('example', 'batch-operation', {
container: scopedContainer
});
try {
context.logger.info('Starting batch operation', { itemCount: items.length });
// Process items in parallel with isolated connections
const promises = items.map(async (item, index) => {
// Each sub-operation gets its own context
const subContext = context.createChild(`item-${index}`);
try {
await subContext.mongodb.collection('batch').insertOne(item);
await subContext.cache.set(`batch:${item.id}`, item);
} finally {
await subContext.dispose();
}
});
await Promise.all(promises);
context.logger.info('Batch operation completed');
} finally {
// Clean up scoped resources
await context.dispose();
await scopedContainer.dispose();
}
}
}
/**
* Example of how to use in a job handler
*/
export async function createExampleJobHandler(container: ServiceContainer) {
return async (job: any) => {
const handler = new ExampleHandler(container);
if (job.data.type === 'batch') {
await handler.performBatchOperation(job.data.items);
} else {
await handler.performOperation(job.data);
}
};
}

View file

@ -0,0 +1,137 @@
import { getDatabaseConfig } from '@stock-bot/config';
import { getLogger } from '@stock-bot/logger';
import {
ConnectionFactory,
ServiceContainer,
createServiceContainer,
PoolSizeCalculator
} from '@stock-bot/connection-factory';
import type { ConnectionFactoryConfig } from '@stock-bot/connection-factory';
const logger = getLogger('database-setup');
/**
* Creates a connection factory configured for the data-ingestion service
*/
export function createConnectionFactory(): ConnectionFactory {
const dbConfig = getDatabaseConfig();
const factoryConfig: ConnectionFactoryConfig = {
service: 'data-ingestion',
environment: process.env.NODE_ENV as 'development' | 'production' | 'test' || 'development',
pools: {
mongodb: {
poolSize: 50, // Higher for batch imports
},
postgres: {
poolSize: 30,
},
cache: {
poolSize: 20,
}
}
};
return new ConnectionFactory(factoryConfig);
}
/**
* Sets up the service container with all dependencies
*/
export async function setupServiceContainer(): Promise<ServiceContainer> {
logger.info('Setting up service container for data-ingestion');
const connectionFactory = createConnectionFactory();
const dbConfig = getDatabaseConfig();
// Create base container
const container = new ServiceContainer('data-ingestion');
// Register MongoDB with dynamic pool sizing
container.register({
name: 'mongodb',
factory: async () => {
const poolSize = PoolSizeCalculator.calculate('data-ingestion', 'batch-import');
const pool = await connectionFactory.createMongoDB({
name: 'default',
config: {
connectionString: dbConfig.mongodb.uri,
database: dbConfig.mongodb.database,
maxPoolSize: poolSize.max,
minPoolSize: poolSize.min,
},
maxConnections: poolSize.max,
minConnections: poolSize.min,
});
return pool.client;
},
singleton: true,
dispose: async (client) => {
await client.disconnect();
}
});
// Register PostgreSQL
container.register({
name: 'postgres',
factory: async () => {
const poolSize = PoolSizeCalculator.calculate('data-ingestion');
const pool = await connectionFactory.createPostgreSQL({
name: 'default',
config: {
host: dbConfig.postgresql.host,
port: dbConfig.postgresql.port,
database: dbConfig.postgresql.database,
user: dbConfig.postgresql.user,
password: dbConfig.postgresql.password,
pool: {
max: poolSize.max,
min: poolSize.min,
}
},
maxConnections: poolSize.max,
minConnections: poolSize.min,
});
return pool.client;
},
singleton: true,
dispose: async (client) => {
await client.disconnect();
}
});
// Register Cache
container.register({
name: 'cache',
factory: () => {
const pool = connectionFactory.createCache({
name: 'default',
config: {
redisConfig: {
host: dbConfig.dragonfly.host,
port: dbConfig.dragonfly.port,
db: dbConfig.dragonfly.db,
},
keyPrefix: 'data-ingestion:',
ttl: 3600,
enableMetrics: true,
}
});
return pool.client;
},
singleton: true,
});
// Register the connection factory itself for pool management
container.register({
name: 'connectionFactory',
factory: () => connectionFactory,
singleton: true,
dispose: async (factory) => {
await factory.disposeAll();
}
});
logger.info('Service container setup complete');
return container;
}

View file

@ -0,0 +1,116 @@
import { getDatabaseConfig } from '@stock-bot/config';
import { getLogger } from '@stock-bot/logger';
import { createMongoDBClient, createPostgreSQLClient } from '@stock-bot/connection-factory';
import type { DynamicPoolConfig } from '@stock-bot/mongodb-client';
const logger = getLogger('dynamic-pool-example');
/**
* Example of setting up dynamic pool sizing for high-load scenarios
*/
export async function setupDynamicPools() {
const dbConfig = getDatabaseConfig();
// Dynamic pool configuration for batch processing
const dynamicConfig: DynamicPoolConfig = {
enabled: true,
minSize: 5,
maxSize: 100,
scaleUpThreshold: 70, // Scale up when 70% of connections are in use
scaleDownThreshold: 30, // Scale down when only 30% are in use
scaleUpIncrement: 10, // Add 10 connections at a time
scaleDownIncrement: 5, // Remove 5 connections at a time
evaluationInterval: 10000 // Check every 10 seconds
};
// Create MongoDB client with dynamic pooling
const mongoClient = createMongoDBClient({
uri: dbConfig.mongodb.uri,
database: dbConfig.mongodb.database,
poolSettings: {
minPoolSize: dynamicConfig.minSize,
maxPoolSize: dynamicConfig.maxSize,
}
}, {
onConnect: () => logger.info('MongoDB connected with dynamic pooling'),
onError: (error) => logger.error('MongoDB pool error', { error }),
});
await mongoClient.connect();
mongoClient.setDynamicPoolConfig(dynamicConfig);
// Create PostgreSQL client with dynamic pooling
const pgClient = createPostgreSQLClient({
host: dbConfig.postgresql.host,
port: dbConfig.postgresql.port,
database: dbConfig.postgresql.database,
username: dbConfig.postgresql.user,
password: dbConfig.postgresql.password,
poolSettings: {
min: dynamicConfig.minSize,
max: dynamicConfig.maxSize,
}
}, undefined, {
onConnect: () => logger.info('PostgreSQL connected with dynamic pooling'),
onError: (error) => logger.error('PostgreSQL pool error', { error }),
});
await pgClient.connect();
pgClient.setDynamicPoolConfig(dynamicConfig);
// Monitor pool metrics
setInterval(() => {
const mongoMetrics = mongoClient.getPoolMetrics();
const pgMetrics = pgClient.getPoolMetrics();
logger.info('Pool metrics', {
mongodb: {
total: mongoMetrics.totalConnections,
active: mongoMetrics.activeConnections,
idle: mongoMetrics.idleConnections,
waiting: mongoMetrics.waitingRequests,
},
postgresql: {
total: pgMetrics.totalConnections,
active: pgMetrics.activeConnections,
idle: pgMetrics.idleConnections,
waiting: pgMetrics.waitingRequests,
}
});
}, 30000); // Log metrics every 30 seconds
return { mongoClient, pgClient };
}
/**
* Example of adaptive pool sizing based on time of day
*/
export function getTimeBasedPoolConfig(): DynamicPoolConfig {
const hour = new Date().getHours();
// High load hours (9 AM - 5 PM)
if (hour >= 9 && hour <= 17) {
return {
enabled: true,
minSize: 10,
maxSize: 150,
scaleUpThreshold: 60,
scaleDownThreshold: 20,
scaleUpIncrement: 20,
scaleDownIncrement: 10,
evaluationInterval: 5000 // More frequent checks during peak
};
}
// Low load hours (night time)
return {
enabled: true,
minSize: 2,
maxSize: 50,
scaleUpThreshold: 80,
scaleDownThreshold: 40,
scaleUpIncrement: 5,
scaleDownIncrement: 2,
evaluationInterval: 30000 // Less frequent checks at night
};
}