huge refactor with a million of things to make the code much more managable and easier to create new services #3
16 changed files with 141 additions and 476 deletions
|
|
@ -1,8 +1,4 @@
|
|||
import { getLogger, type Logger } from '@stock-bot/logger';
|
||||
import { MongoDBClient, createMongoDBClient, type ConnectionEvents } from '@stock-bot/mongodb';
|
||||
import { PostgreSQLClient, createPostgreSQLClient } from '@stock-bot/postgres';
|
||||
import { createCache, type CacheProvider } from '@stock-bot/cache';
|
||||
import { QueueManager } from '@stock-bot/queue';
|
||||
import type {
|
||||
ConnectionFactory as IConnectionFactory,
|
||||
ConnectionPool,
|
||||
|
|
@ -22,9 +18,10 @@ export class ConnectionFactory implements IConnectionFactory {
|
|||
constructor(config: ConnectionFactoryConfig) {
|
||||
this.config = config;
|
||||
this.logger = getLogger(`connection-factory:${config.service}`);
|
||||
// Note: config is stored for future use and used in logger name
|
||||
}
|
||||
|
||||
async createMongoDB(poolConfig: MongoDBPoolConfig): Promise<ConnectionPool<MongoDBClient>> {
|
||||
async createMongoDB(poolConfig: MongoDBPoolConfig): Promise<ConnectionPool<any>> {
|
||||
const key = `mongodb:${poolConfig.name}`;
|
||||
|
||||
if (this.pools.has(key)) {
|
||||
|
|
@ -38,35 +35,30 @@ export class ConnectionFactory implements IConnectionFactory {
|
|||
});
|
||||
|
||||
try {
|
||||
const events: ConnectionEvents = {
|
||||
// Dynamic import to avoid circular dependency
|
||||
const { createMongoDBClient } = await import('@stock-bot/mongodb');
|
||||
|
||||
const events = {
|
||||
onConnect: () => {
|
||||
this.logger.debug('MongoDB connected', { pool: poolConfig.name });
|
||||
},
|
||||
onDisconnect: () => {
|
||||
this.logger.debug('MongoDB disconnected', { pool: poolConfig.name });
|
||||
},
|
||||
onError: (error) => {
|
||||
onError: (error: any) => {
|
||||
this.logger.error('MongoDB error', { pool: poolConfig.name, error });
|
||||
},
|
||||
};
|
||||
|
||||
const client = createMongoDBClient({
|
||||
...poolConfig.config,
|
||||
poolSettings: {
|
||||
maxPoolSize: poolConfig.maxConnections || poolConfig.poolSize || 10,
|
||||
minPoolSize: poolConfig.minConnections || 2,
|
||||
maxIdleTime: 30000,
|
||||
}
|
||||
}, events);
|
||||
const client = createMongoDBClient(poolConfig.config as any, events);
|
||||
|
||||
await client.connect();
|
||||
|
||||
// Warm up the pool
|
||||
if (poolConfig.minConnections) {
|
||||
await client.warmupPool();
|
||||
}
|
||||
|
||||
const pool: ConnectionPool<MongoDBClient> = {
|
||||
const pool: ConnectionPool<any> = {
|
||||
name: poolConfig.name,
|
||||
client,
|
||||
metrics: client.getPoolMetrics(),
|
||||
|
|
@ -92,7 +84,7 @@ export class ConnectionFactory implements IConnectionFactory {
|
|||
}
|
||||
}
|
||||
|
||||
async createPostgreSQL(poolConfig: PostgreSQLPoolConfig): Promise<ConnectionPool<PostgreSQLClient>> {
|
||||
async createPostgreSQL(poolConfig: PostgreSQLPoolConfig): Promise<ConnectionPool<any>> {
|
||||
const key = `postgres:${poolConfig.name}`;
|
||||
|
||||
if (this.pools.has(key)) {
|
||||
|
|
@ -106,35 +98,19 @@ export class ConnectionFactory implements IConnectionFactory {
|
|||
});
|
||||
|
||||
try {
|
||||
const events: ConnectionEvents = {
|
||||
onConnect: () => {
|
||||
this.logger.debug('PostgreSQL connected', { pool: poolConfig.name });
|
||||
},
|
||||
onDisconnect: () => {
|
||||
this.logger.debug('PostgreSQL disconnected', { pool: poolConfig.name });
|
||||
},
|
||||
onError: (error) => {
|
||||
this.logger.error('PostgreSQL error', { pool: poolConfig.name, error });
|
||||
},
|
||||
};
|
||||
// Dynamic import to avoid circular dependency
|
||||
const { createPostgreSQLClient } = await import('@stock-bot/postgres');
|
||||
|
||||
const client = createPostgreSQLClient({
|
||||
...poolConfig.config,
|
||||
poolSettings: {
|
||||
max: poolConfig.maxConnections || poolConfig.poolSize || 10,
|
||||
min: poolConfig.minConnections || 2,
|
||||
idleTimeoutMillis: poolConfig.idleTimeoutMillis || 30000,
|
||||
},
|
||||
}, undefined, events);
|
||||
// Events will be handled by the client internally
|
||||
const client = createPostgreSQLClient(poolConfig.config as any);
|
||||
|
||||
await client.connect();
|
||||
|
||||
// Warm up the pool
|
||||
if (poolConfig.minConnections) {
|
||||
await client.warmupPool();
|
||||
}
|
||||
|
||||
const pool: ConnectionPool<PostgreSQLClient> = {
|
||||
const pool: ConnectionPool<any> = {
|
||||
name: poolConfig.name,
|
||||
client,
|
||||
metrics: client.getPoolMetrics(),
|
||||
|
|
@ -153,7 +129,7 @@ export class ConnectionFactory implements IConnectionFactory {
|
|||
}
|
||||
}
|
||||
|
||||
createCache(poolConfig: CachePoolConfig): ConnectionPool<CacheProvider> {
|
||||
createCache(poolConfig: CachePoolConfig): ConnectionPool<any> {
|
||||
const key = `cache:${poolConfig.name}`;
|
||||
|
||||
if (this.pools.has(key)) {
|
||||
|
|
@ -166,32 +142,16 @@ export class ConnectionFactory implements IConnectionFactory {
|
|||
});
|
||||
|
||||
try {
|
||||
const cache = createCache({
|
||||
...poolConfig.config,
|
||||
keyPrefix: `${this.config.service}:${poolConfig.name}:`,
|
||||
shared: false, // Each pool gets its own connection
|
||||
});
|
||||
// TODO: Implement cache creation with dynamic import
|
||||
throw new Error('Cache creation temporarily disabled');
|
||||
|
||||
const pool: ConnectionPool<CacheProvider> = {
|
||||
name: poolConfig.name,
|
||||
client: cache,
|
||||
metrics: this.createInitialMetrics(),
|
||||
health: async () => cache.health(),
|
||||
dispose: async () => {
|
||||
// Cache disposal handled internally
|
||||
this.pools.delete(key);
|
||||
},
|
||||
};
|
||||
|
||||
this.pools.set(key, pool);
|
||||
return pool;
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to create cache pool', { name: poolConfig.name, error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
createQueue(poolConfig: QueuePoolConfig): ConnectionPool<QueueManager> {
|
||||
createQueue(poolConfig: QueuePoolConfig): ConnectionPool<any> {
|
||||
const key = `queue:${poolConfig.name}`;
|
||||
|
||||
if (this.pools.has(key)) {
|
||||
|
|
@ -204,31 +164,9 @@ export class ConnectionFactory implements IConnectionFactory {
|
|||
});
|
||||
|
||||
try {
|
||||
// Initialize or get existing QueueManager instance
|
||||
const queueManager = QueueManager.getOrInitialize(poolConfig.config);
|
||||
// TODO: Implement queue creation with dynamic import
|
||||
throw new Error('Queue creation temporarily disabled');
|
||||
|
||||
const pool: ConnectionPool<QueueManager> = {
|
||||
name: poolConfig.name,
|
||||
client: queueManager,
|
||||
metrics: this.createInitialMetrics(),
|
||||
health: async () => {
|
||||
try {
|
||||
// Check if QueueManager is initialized
|
||||
queueManager.getQueueNames();
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
},
|
||||
dispose: async () => {
|
||||
// QueueManager handles its own shutdown
|
||||
await queueManager.shutdown();
|
||||
this.pools.delete(key);
|
||||
},
|
||||
};
|
||||
|
||||
this.pools.set(key, pool);
|
||||
return pool;
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to create queue manager', { name: poolConfig.name, error });
|
||||
throw error;
|
||||
|
|
@ -243,11 +181,11 @@ export class ConnectionFactory implements IConnectionFactory {
|
|||
listPools(): Array<{ type: string; name: string; metrics: PoolMetrics }> {
|
||||
const result: Array<{ type: string; name: string; metrics: PoolMetrics }> = [];
|
||||
|
||||
for (const [key, pool] of this.pools.entries()) {
|
||||
const [type, ...nameParts] = key.split(':');
|
||||
for (const [key, pool] of this.pools) {
|
||||
const [type] = key.split(':');
|
||||
result.push({
|
||||
type: type || 'unknown',
|
||||
name: nameParts.join(':'),
|
||||
name: pool.name,
|
||||
metrics: pool.metrics,
|
||||
});
|
||||
}
|
||||
|
|
@ -256,25 +194,10 @@ export class ConnectionFactory implements IConnectionFactory {
|
|||
}
|
||||
|
||||
async disposeAll(): Promise<void> {
|
||||
this.logger.info('Disposing all connection pools', { count: this.pools.size });
|
||||
|
||||
const disposePromises: Promise<void>[] = [];
|
||||
for (const pool of this.pools.values()) {
|
||||
disposePromises.push(pool.dispose());
|
||||
}
|
||||
this.logger.info('Disposing all connection pools', { service: this.config.service });
|
||||
|
||||
const disposePromises = Array.from(this.pools.values()).map(pool => pool.dispose());
|
||||
await Promise.all(disposePromises);
|
||||
this.pools.clear();
|
||||
}
|
||||
|
||||
private createInitialMetrics(): PoolMetrics {
|
||||
return {
|
||||
created: new Date(),
|
||||
totalConnections: 0,
|
||||
activeConnections: 0,
|
||||
idleConnections: 0,
|
||||
waitingRequests: 0,
|
||||
errors: 0,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
// Export all dependency injection components
|
||||
export * from './service-container';
|
||||
export * from './connection-factory';
|
||||
export { ConnectionFactory } from './connection-factory';
|
||||
export * from './operation-context';
|
||||
export * from './pool-size-calculator';
|
||||
export * from './types';
|
||||
|
|
@ -1,19 +1,12 @@
|
|||
/**
|
||||
* OperationContext - Unified context for handler operations
|
||||
*
|
||||
* Provides streamlined access to:
|
||||
* - Child loggers with hierarchical context
|
||||
* - Database clients (MongoDB, PostgreSQL)
|
||||
* - Contextual cache with automatic key prefixing
|
||||
* - Shared resource management
|
||||
* TEMPORARILY DISABLED to avoid circular dependencies during library build
|
||||
* Will be re-enabled once all core libraries are built
|
||||
*/
|
||||
|
||||
import { createCache, type CacheProvider } from '@stock-bot/cache';
|
||||
import { getLogger, type Logger } from '@stock-bot/logger';
|
||||
import { getDatabaseConfig } from '@stock-bot/config';
|
||||
import type { ServiceResolver } from './service-container';
|
||||
import type { MongoDBClient } from '@stock-bot/mongodb';
|
||||
import type { PostgreSQLClient } from '@stock-bot/postgres';
|
||||
|
||||
export interface OperationContextOptions {
|
||||
handlerName: string;
|
||||
|
|
@ -25,283 +18,44 @@ export interface OperationContextOptions {
|
|||
export class OperationContext {
|
||||
public readonly logger: Logger;
|
||||
private readonly container?: ServiceResolver;
|
||||
private _mongodb?: MongoDBClient;
|
||||
private _postgres?: PostgreSQLClient;
|
||||
private _cache?: CacheProvider;
|
||||
private _queue?: any; // Type will be QueueManager but we avoid import for circular deps
|
||||
|
||||
private static sharedCache: CacheProvider | null = null;
|
||||
private static parentLoggers = new Map<string, Logger>();
|
||||
private static databaseConfig: any = null;
|
||||
|
||||
constructor(
|
||||
public readonly handlerName: string,
|
||||
public readonly operationName: string,
|
||||
parentLoggerOrOptions?: Logger | OperationContextOptions
|
||||
) {
|
||||
// Handle both old and new constructor signatures
|
||||
if (parentLoggerOrOptions && 'container' in parentLoggerOrOptions) {
|
||||
const options = parentLoggerOrOptions;
|
||||
constructor(options: OperationContextOptions) {
|
||||
this.container = options.container;
|
||||
const parent = options.parentLogger || this.getOrCreateParentLogger();
|
||||
this.logger = parent.child(operationName, {
|
||||
handler: handlerName,
|
||||
operation: operationName
|
||||
});
|
||||
} else {
|
||||
// Legacy support
|
||||
const parentLogger = parentLoggerOrOptions as Logger | undefined;
|
||||
const parent = parentLogger || this.getOrCreateParentLogger();
|
||||
this.logger = parent.child(operationName, {
|
||||
handler: handlerName,
|
||||
operation: operationName
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Lazy load MongoDB client
|
||||
get mongodb(): MongoDBClient {
|
||||
if (!this._mongodb) {
|
||||
if (this.container) {
|
||||
try {
|
||||
this._mongodb = this.container.resolve<MongoDBClient>('mongodb');
|
||||
} catch (error) {
|
||||
this.logger.warn('Failed to resolve MongoDB from container, falling back to singleton', { error });
|
||||
this._mongodb = this.getLegacyDatabaseClient('mongodb') as MongoDBClient;
|
||||
}
|
||||
} else {
|
||||
this._mongodb = this.getLegacyDatabaseClient('mongodb') as MongoDBClient;
|
||||
}
|
||||
}
|
||||
return this._mongodb!;
|
||||
}
|
||||
|
||||
// Lazy load PostgreSQL client
|
||||
get postgres(): PostgreSQLClient {
|
||||
if (!this._postgres) {
|
||||
if (this.container) {
|
||||
try {
|
||||
this._postgres = this.container.resolve<PostgreSQLClient>('postgres');
|
||||
} catch (error) {
|
||||
this.logger.warn('Failed to resolve PostgreSQL from container, falling back to singleton', { error });
|
||||
this._postgres = this.getLegacyDatabaseClient('postgres') as PostgreSQLClient;
|
||||
}
|
||||
} else {
|
||||
this._postgres = this.getLegacyDatabaseClient('postgres') as PostgreSQLClient;
|
||||
}
|
||||
}
|
||||
return this._postgres!;
|
||||
}
|
||||
|
||||
// Lazy load QueueManager
|
||||
get queue(): any {
|
||||
if (!this._queue) {
|
||||
if (this.container) {
|
||||
try {
|
||||
this._queue = this.container.resolve('queue');
|
||||
} catch (error) {
|
||||
this.logger.warn('Failed to resolve QueueManager from container, falling back to singleton', { error });
|
||||
this._queue = this.getLegacyQueueManager();
|
||||
}
|
||||
} else {
|
||||
this._queue = this.getLegacyQueueManager();
|
||||
}
|
||||
}
|
||||
return this._queue!;
|
||||
}
|
||||
|
||||
// Legacy method for QueueManager
|
||||
private getLegacyQueueManager(): any {
|
||||
try {
|
||||
// Dynamic import to avoid TypeScript issues during build
|
||||
const { QueueManager } = require('@stock-bot/queue');
|
||||
return QueueManager.getInstance();
|
||||
} catch (error) {
|
||||
this.logger.warn('QueueManager not initialized, queue operations may fail', { error });
|
||||
throw new Error('QueueManager not available');
|
||||
}
|
||||
}
|
||||
|
||||
// Legacy method for backward compatibility
|
||||
private getLegacyDatabaseClient(type: 'mongodb' | 'postgres'): any {
|
||||
try {
|
||||
if (type === 'mongodb') {
|
||||
// Dynamic import to avoid TypeScript issues during build
|
||||
const { getMongoDBClient } = require('@stock-bot/mongodb');
|
||||
return getMongoDBClient();
|
||||
} else {
|
||||
// Dynamic import to avoid TypeScript issues during build
|
||||
const { getPostgreSQLClient } = require('@stock-bot/postgres');
|
||||
return getPostgreSQLClient();
|
||||
}
|
||||
} catch (error) {
|
||||
this.logger.warn(`${type} client not initialized, operations may fail`, { error });
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private getOrCreateParentLogger(): Logger {
|
||||
const parentKey = `${this.handlerName}-handler`;
|
||||
|
||||
if (!OperationContext.parentLoggers.has(parentKey)) {
|
||||
const parentLogger = getLogger(parentKey);
|
||||
OperationContext.parentLoggers.set(parentKey, parentLogger);
|
||||
}
|
||||
|
||||
return OperationContext.parentLoggers.get(parentKey)!;
|
||||
this.logger = options.parentLogger || getLogger(`${options.handlerName}:${options.operationName}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get contextual cache with automatic key prefixing
|
||||
* Keys are automatically prefixed as: "operations:handlerName:operationName:key"
|
||||
* Creates a new OperationContext with automatic resource management
|
||||
* TEMPORARILY SIMPLIFIED - full implementation will be restored after build fixes
|
||||
*/
|
||||
get cache(): CacheProvider {
|
||||
if (!this._cache) {
|
||||
if (this.container) {
|
||||
try {
|
||||
const baseCache = this.container.resolve<CacheProvider>('cache');
|
||||
this._cache = this.createContextualCache(baseCache);
|
||||
} catch (error) {
|
||||
this.logger.warn('Failed to resolve cache from container, using shared cache', { error });
|
||||
this._cache = this.getOrCreateSharedCache();
|
||||
}
|
||||
} else {
|
||||
this._cache = this.getOrCreateSharedCache();
|
||||
}
|
||||
}
|
||||
return this._cache!;
|
||||
}
|
||||
|
||||
private getOrCreateSharedCache(): CacheProvider {
|
||||
if (!OperationContext.sharedCache) {
|
||||
// Get Redis configuration from database config
|
||||
if (!OperationContext.databaseConfig) {
|
||||
OperationContext.databaseConfig = getDatabaseConfig();
|
||||
}
|
||||
|
||||
const redisConfig = OperationContext.databaseConfig.dragonfly || {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
db: 1
|
||||
};
|
||||
|
||||
OperationContext.sharedCache = createCache({
|
||||
keyPrefix: 'operations:',
|
||||
shared: true, // Use singleton Redis connection
|
||||
enableMetrics: true,
|
||||
ttl: 3600, // Default 1 hour TTL
|
||||
redisConfig
|
||||
});
|
||||
}
|
||||
return this.createContextualCache(OperationContext.sharedCache);
|
||||
}
|
||||
|
||||
private createContextualCache(baseCache: CacheProvider): CacheProvider {
|
||||
const contextPrefix = `${this.handlerName}:${this.operationName}:`;
|
||||
|
||||
// Return a proxy that automatically prefixes keys with context
|
||||
return {
|
||||
async get<T>(key: string): Promise<T | null> {
|
||||
return baseCache.get(`${contextPrefix}${key}`);
|
||||
},
|
||||
|
||||
async set<T>(key: string, value: T, options?: any): Promise<T | null> {
|
||||
return baseCache.set(`${contextPrefix}${key}`, value, options);
|
||||
},
|
||||
|
||||
async del(key: string): Promise<void> {
|
||||
return baseCache.del(`${contextPrefix}${key}`);
|
||||
},
|
||||
|
||||
async exists(key: string): Promise<boolean> {
|
||||
return baseCache.exists(`${contextPrefix}${key}`);
|
||||
},
|
||||
|
||||
async clear(): Promise<void> {
|
||||
// Not implemented for contextual cache - use del() for specific keys
|
||||
throw new Error('clear() not implemented for contextual cache - use del() for specific keys');
|
||||
},
|
||||
|
||||
async keys(pattern: string): Promise<string[]> {
|
||||
const fullPattern = `${contextPrefix}${pattern}`;
|
||||
return baseCache.keys(fullPattern);
|
||||
},
|
||||
|
||||
getStats() {
|
||||
return baseCache.getStats();
|
||||
},
|
||||
|
||||
async health(): Promise<boolean> {
|
||||
return baseCache.health();
|
||||
},
|
||||
|
||||
async waitForReady(timeout?: number): Promise<void> {
|
||||
return baseCache.waitForReady(timeout);
|
||||
},
|
||||
|
||||
isReady(): boolean {
|
||||
return baseCache.isReady();
|
||||
}
|
||||
} as CacheProvider;
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory method to create OperationContext
|
||||
*/
|
||||
static create(handlerName: string, operationName: string, parentLoggerOrOptions?: Logger | OperationContextOptions): OperationContext {
|
||||
if (parentLoggerOrOptions && 'container' in parentLoggerOrOptions) {
|
||||
return new OperationContext(handlerName, operationName, {
|
||||
...parentLoggerOrOptions,
|
||||
static create(
|
||||
handlerName: string,
|
||||
operationName: string,
|
||||
options: { container?: ServiceResolver; parentLogger?: Logger } = {}
|
||||
): OperationContext {
|
||||
return new OperationContext({
|
||||
handlerName,
|
||||
operationName
|
||||
operationName,
|
||||
...options,
|
||||
});
|
||||
}
|
||||
return new OperationContext(handlerName, operationName, parentLoggerOrOptions as Logger | undefined);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache key prefix for this operation context
|
||||
*/
|
||||
getCacheKeyPrefix(): string {
|
||||
return `operations:${this.handlerName}:${this.operationName}:`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a child context for sub-operations
|
||||
*/
|
||||
createChild(subOperationName: string): OperationContext {
|
||||
if (this.container) {
|
||||
return new OperationContext(
|
||||
this.handlerName,
|
||||
`${this.operationName}:${subOperationName}`,
|
||||
{
|
||||
handlerName: this.handlerName,
|
||||
operationName: `${this.operationName}:${subOperationName}`,
|
||||
parentLogger: this.logger,
|
||||
container: this.container
|
||||
}
|
||||
);
|
||||
}
|
||||
return new OperationContext(
|
||||
this.handlerName,
|
||||
`${this.operationName}:${subOperationName}`,
|
||||
this.logger
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Dispose of resources if using container-based connections
|
||||
* This is a no-op for legacy singleton connections
|
||||
* Cleanup method - simplified for now
|
||||
*/
|
||||
async dispose(): Promise<void> {
|
||||
// If using container, it will handle cleanup
|
||||
// For singleton connections, they persist
|
||||
this.logger.debug('OperationContext disposed', {
|
||||
handler: this.handlerName,
|
||||
operation: this.operationName,
|
||||
hasContainer: !!this.container
|
||||
// Cleanup will be implemented when dependencies are resolved
|
||||
}
|
||||
|
||||
/**
|
||||
* Create child context - simplified for now
|
||||
*/
|
||||
createChild(operationName: string): OperationContext {
|
||||
return new OperationContext({
|
||||
handlerName: 'child',
|
||||
operationName,
|
||||
parentLogger: this.logger,
|
||||
container: this.container,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export default OperationContext;
|
||||
|
|
@ -9,13 +9,7 @@
|
|||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/config": "workspace:*",
|
||||
"@stock-bot/logger": "workspace:*",
|
||||
"@stock-bot/mongodb": "workspace:*",
|
||||
"@stock-bot/postgres": "workspace:*",
|
||||
"@stock-bot/cache": "workspace:*",
|
||||
"@stock-bot/queue": "workspace:*",
|
||||
"mongodb": "^6.3.0",
|
||||
"pg": "^8.11.3"
|
||||
"@stock-bot/logger": "workspace:*"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/pg": "^8.10.7"
|
||||
|
|
|
|||
|
|
@ -165,51 +165,43 @@ export function createServiceContainer(
|
|||
singleton: true,
|
||||
});
|
||||
|
||||
// Register ProxyManager
|
||||
container.register({
|
||||
name: 'proxyManager',
|
||||
factory: async () => {
|
||||
const { ProxyManager } = await import('@stock-bot/utils');
|
||||
await ProxyManager.initialize();
|
||||
return ProxyManager.getInstance();
|
||||
},
|
||||
singleton: true,
|
||||
dispose: async (proxyManager) => {
|
||||
// ProxyManager handles its own cleanup
|
||||
if (proxyManager && typeof proxyManager.shutdown === 'function') {
|
||||
await proxyManager.shutdown();
|
||||
}
|
||||
}
|
||||
});
|
||||
// Optional services - comment out for now to avoid circular dependencies
|
||||
// These can be registered manually by apps that need them
|
||||
|
||||
// Register Browser service
|
||||
container.register({
|
||||
name: 'browser',
|
||||
factory: async () => {
|
||||
const { Browser } = await import('@stock-bot/browser');
|
||||
return Browser;
|
||||
},
|
||||
singleton: true,
|
||||
dispose: async (browser) => {
|
||||
if (browser && typeof browser.close === 'function') {
|
||||
await browser.close();
|
||||
}
|
||||
}
|
||||
});
|
||||
// // Register ProxyManager
|
||||
// container.register({
|
||||
// name: 'proxyManager',
|
||||
// factory: async () => {
|
||||
// const { ProxyManager } = await import('@stock-bot/utils');
|
||||
// await ProxyManager.initialize();
|
||||
// return ProxyManager.getInstance();
|
||||
// },
|
||||
// singleton: true,
|
||||
// });
|
||||
|
||||
// Register HttpClient with default configuration
|
||||
container.register({
|
||||
name: 'httpClient',
|
||||
factory: async () => {
|
||||
const { createHttpClient } = await import('@stock-bot/http');
|
||||
return createHttpClient({
|
||||
timeout: 30000,
|
||||
retries: 3,
|
||||
userAgent: 'stock-bot/1.0',
|
||||
});
|
||||
},
|
||||
singleton: true,
|
||||
});
|
||||
// // Register Browser service
|
||||
// container.register({
|
||||
// name: 'browser',
|
||||
// factory: async () => {
|
||||
// const { Browser } = await import('@stock-bot/browser');
|
||||
// return Browser;
|
||||
// },
|
||||
// singleton: true,
|
||||
// });
|
||||
|
||||
// // Register HttpClient with default configuration
|
||||
// container.register({
|
||||
// name: 'httpClient',
|
||||
// factory: async () => {
|
||||
// const { createHttpClient } = await import('@stock-bot/http');
|
||||
// return createHttpClient({
|
||||
// timeout: 30000,
|
||||
// retries: 3,
|
||||
// userAgent: 'stock-bot/1.0',
|
||||
// });
|
||||
// },
|
||||
// singleton: true,
|
||||
// });
|
||||
|
||||
return container;
|
||||
}
|
||||
|
|
@ -12,10 +12,6 @@
|
|||
"exclude": ["node_modules", "dist"],
|
||||
"references": [
|
||||
{ "path": "../config" },
|
||||
{ "path": "../logger" },
|
||||
{ "path": "../../data/mongodb" },
|
||||
{ "path": "../../data/postgres" },
|
||||
{ "path": "../../data/cache" },
|
||||
{ "path": "../../services/queue" }
|
||||
{ "path": "../logger" }
|
||||
]
|
||||
}
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
import type { MongoDBClientConfig } from '@stock-bot/mongodb-client';
|
||||
import type { PostgreSQLClientConfig } from '@stock-bot/postgres-client';
|
||||
import type { CacheOptions } from '@stock-bot/cache';
|
||||
import type { QueueManagerConfig } from '@stock-bot/queue';
|
||||
// Generic types to avoid circular dependencies
|
||||
export interface GenericClientConfig {
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
export interface ConnectionPoolConfig {
|
||||
name: string;
|
||||
|
|
@ -14,19 +14,19 @@ export interface ConnectionPoolConfig {
|
|||
}
|
||||
|
||||
export interface MongoDBPoolConfig extends ConnectionPoolConfig {
|
||||
config: MongoDBClientConfig;
|
||||
config: GenericClientConfig;
|
||||
}
|
||||
|
||||
export interface PostgreSQLPoolConfig extends ConnectionPoolConfig {
|
||||
config: PostgreSQLClientConfig;
|
||||
config: GenericClientConfig;
|
||||
}
|
||||
|
||||
export interface CachePoolConfig extends ConnectionPoolConfig {
|
||||
config: CacheOptions;
|
||||
config: GenericClientConfig;
|
||||
}
|
||||
|
||||
export interface QueuePoolConfig extends ConnectionPoolConfig {
|
||||
config: QueueManagerConfig;
|
||||
config: GenericClientConfig;
|
||||
}
|
||||
|
||||
export interface ConnectionFactoryConfig {
|
||||
|
|
|
|||
|
|
@ -11,7 +11,8 @@
|
|||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/config": "workspace:*",
|
||||
"@stock-bot/logger": "workspace:*"
|
||||
"@stock-bot/logger": "workspace:*",
|
||||
"@stock-bot/di": "workspace:*"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.11.0",
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { getLogger } from '@stock-bot/logger';
|
||||
import type { ServiceContainer } from '@stock-bot/di';
|
||||
import type { IHandler, ExecutionContext } from '../types/types';
|
||||
|
||||
/**
|
||||
|
|
@ -8,7 +9,7 @@ import type { IHandler, ExecutionContext } from '../types/types';
|
|||
export abstract class BaseHandler implements IHandler {
|
||||
protected readonly logger;
|
||||
|
||||
constructor(protected readonly container: any) {
|
||||
constructor(protected readonly container: ServiceContainer) {
|
||||
this.logger = getLogger(this.constructor.name);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
// import type { ServiceContainer } from '@stock-bot/di'; // Temporarily commented
|
||||
import type { ServiceContainer } from '@stock-bot/di';
|
||||
|
||||
// Simple execution context - mostly queue for now
|
||||
export interface ExecutionContext {
|
||||
type: 'queue'; // | 'event' - commented for future
|
||||
serviceContainer: any; // ServiceContainer - temporarily any
|
||||
serviceContainer: ServiceContainer;
|
||||
metadata: {
|
||||
source?: string;
|
||||
jobId?: string;
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@
|
|||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../config" },
|
||||
{ "path": "../logger" }
|
||||
{ "path": "../logger" },
|
||||
{ "path": "../di" }
|
||||
]
|
||||
}
|
||||
|
|
@ -7,7 +7,7 @@
|
|||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../logger" },
|
||||
{ "path": "../types" }
|
||||
{ "path": "../../core/logger" },
|
||||
{ "path": "../../core/types" }
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../logger" },
|
||||
{ "path": "../../core/logger" },
|
||||
{ "path": "../http" }
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,6 @@
|
|||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../logger" }
|
||||
{ "path": "../../core/logger" }
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,14 @@
|
|||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"extends": "../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"composite": true
|
||||
"composite": true,
|
||||
"skipLibCheck": true,
|
||||
"types": ["node", "bun-types"]
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", "dist"],
|
||||
"references": [
|
||||
{ "path": "../core/types" },
|
||||
{ "path": "../data/cache" },
|
||||
|
|
|
|||
|
|
@ -31,28 +31,28 @@ trap cleanup EXIT
|
|||
|
||||
# Build order is important due to dependencies
|
||||
libs=(
|
||||
# Core Libraries
|
||||
# Core Libraries - minimal dependencies
|
||||
"core/types" # Base types - no dependencies
|
||||
"core/config" # Configuration - depends on types
|
||||
"core/logger" # Logging utilities - depends on types
|
||||
"core/handlers" # Handler infrastructure - depends on core libs
|
||||
"utils" # Utilities - depends on types and config
|
||||
|
||||
# Data access libraries
|
||||
"data/postgres" # PostgreSQL client - depends on core libs
|
||||
"data/mongodb" # MongoDB client - depends on core libs
|
||||
"data/questdb" # QuestDB client - depends on core libs
|
||||
"data/cache" # Cache - depends on core libs
|
||||
"data/mongodb" # MongoDB client - depends on core libs
|
||||
"data/postgres" # PostgreSQL client - depends on core libs
|
||||
"data/questdb" # QuestDB client - depends on core libs
|
||||
|
||||
# Service libraries
|
||||
"services/http" # HTTP client - depends on core libs
|
||||
"services/event-bus" # Event bus - depends on core libs
|
||||
"services/queue" # Queue - depends on core libs and cache
|
||||
"services/shutdown" # Shutdown - depends on core libs
|
||||
"services/browser" # Browser - depends on core libs
|
||||
"services/queue" # Queue - depends on core libs, cache, and handlers
|
||||
|
||||
# DI and Connection Factory - depends on everything
|
||||
"core/di" # Dependency injection - depends on all other libs
|
||||
# Utils and DI last - depend on many other libs
|
||||
"utils" # Utilities - depends on many libs
|
||||
"core/di" # Dependency injection - depends on data and service libs
|
||||
)
|
||||
|
||||
# Build each library in order
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue