moved folders around
This commit is contained in:
parent
4f89affc2b
commit
36cb84b343
202 changed files with 1160 additions and 660 deletions
33
libs/data/cache/package.json
vendored
Normal file
33
libs/data/cache/package.json
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
{
|
||||
"name": "@stock-bot/cache",
|
||||
"version": "1.0.0",
|
||||
"description": "Caching library for Redis and in-memory providers",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"clean": "rimraf dist",
|
||||
"test": "bun test"
|
||||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/logger": "*",
|
||||
"ioredis": "^5.3.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.11.0",
|
||||
"typescript": "^5.3.0",
|
||||
"bun-types": "^1.2.15"
|
||||
},
|
||||
"exports": {
|
||||
".": {
|
||||
"import": "./dist/index.js",
|
||||
"require": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts"
|
||||
}
|
||||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"README.md"
|
||||
]
|
||||
}
|
||||
273
libs/data/cache/src/connection-manager.ts
vendored
Normal file
273
libs/data/cache/src/connection-manager.ts
vendored
Normal file
|
|
@ -0,0 +1,273 @@
|
|||
import Redis from 'ioredis';
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import type { RedisConfig } from './types';
|
||||
|
||||
interface ConnectionConfig {
|
||||
name: string;
|
||||
singleton?: boolean;
|
||||
db?: number;
|
||||
redisConfig: RedisConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* Redis Connection Manager for managing shared and unique connections
|
||||
*/
|
||||
export class RedisConnectionManager {
|
||||
private connections = new Map<string, Redis>();
|
||||
private static sharedConnections = new Map<string, Redis>();
|
||||
private static instance: RedisConnectionManager;
|
||||
private logger = getLogger('redis-connection-manager');
|
||||
private static readyConnections = new Set<string>();
|
||||
|
||||
// Singleton pattern for the manager itself
|
||||
static getInstance(): RedisConnectionManager {
|
||||
if (!this.instance) {
|
||||
this.instance = new RedisConnectionManager();
|
||||
}
|
||||
return this.instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or create a Redis connection
|
||||
* @param config Connection configuration
|
||||
* @returns Redis connection instance
|
||||
*/
|
||||
getConnection(config: ConnectionConfig): Redis {
|
||||
const { name, singleton = false, db, redisConfig } = config;
|
||||
|
||||
if (singleton) {
|
||||
// Use shared connection across all instances
|
||||
if (!RedisConnectionManager.sharedConnections.has(name)) {
|
||||
const connection = this.createConnection(name, redisConfig, db);
|
||||
RedisConnectionManager.sharedConnections.set(name, connection);
|
||||
this.logger.info(`Created shared Redis connection: ${name}`);
|
||||
}
|
||||
const connection = RedisConnectionManager.sharedConnections.get(name);
|
||||
if (!connection) {
|
||||
throw new Error(`Expected connection ${name} to exist in shared connections`);
|
||||
}
|
||||
return connection;
|
||||
} else {
|
||||
// Create unique connection per instance
|
||||
const uniqueName = `${name}-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
const connection = this.createConnection(uniqueName, redisConfig, db);
|
||||
this.connections.set(uniqueName, connection);
|
||||
this.logger.debug(`Created unique Redis connection: ${uniqueName}`);
|
||||
return connection;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new Redis connection with configuration
|
||||
*/
|
||||
private createConnection(name: string, config: RedisConfig, db?: number): Redis {
|
||||
const redisOptions = {
|
||||
host: config.host,
|
||||
port: config.port,
|
||||
password: config.password || undefined,
|
||||
username: config.username || undefined,
|
||||
db: db ?? config.db ?? 0,
|
||||
maxRetriesPerRequest: config.maxRetriesPerRequest ?? 3,
|
||||
retryDelayOnFailover: config.retryDelayOnFailover ?? 100,
|
||||
connectTimeout: config.connectTimeout ?? 10000,
|
||||
commandTimeout: config.commandTimeout ?? 5000,
|
||||
keepAlive: config.keepAlive ?? 0,
|
||||
connectionName: name,
|
||||
lazyConnect: false, // Connect immediately instead of waiting for first command
|
||||
...(config.tls && {
|
||||
tls: {
|
||||
cert: config.tls.cert || undefined,
|
||||
key: config.tls.key || undefined,
|
||||
ca: config.tls.ca || undefined,
|
||||
rejectUnauthorized: config.tls.rejectUnauthorized ?? true,
|
||||
},
|
||||
}),
|
||||
};
|
||||
|
||||
const redis = new Redis(redisOptions);
|
||||
|
||||
// Setup event handlers
|
||||
redis.on('connect', () => {
|
||||
this.logger.info(`Redis connection established: ${name}`);
|
||||
});
|
||||
|
||||
redis.on('ready', () => {
|
||||
this.logger.info(`Redis connection ready: ${name}`);
|
||||
});
|
||||
|
||||
redis.on('error', err => {
|
||||
this.logger.error(`Redis connection error for ${name}:`, err);
|
||||
});
|
||||
|
||||
redis.on('close', () => {
|
||||
this.logger.warn(`Redis connection closed: ${name}`);
|
||||
});
|
||||
|
||||
redis.on('reconnecting', () => {
|
||||
this.logger.warn(`Redis reconnecting: ${name}`);
|
||||
});
|
||||
|
||||
return redis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Close a specific connection
|
||||
*/
|
||||
async closeConnection(connection: Redis): Promise<void> {
|
||||
try {
|
||||
await connection.quit();
|
||||
} catch (error) {
|
||||
this.logger.warn('Error closing Redis connection:', error as Error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close all connections managed by this instance
|
||||
*/
|
||||
async closeAllConnections(): Promise<void> {
|
||||
// Close instance-specific connections
|
||||
const instancePromises = Array.from(this.connections.values()).map(conn =>
|
||||
this.closeConnection(conn)
|
||||
);
|
||||
await Promise.all(instancePromises);
|
||||
this.connections.clear();
|
||||
|
||||
// Close shared connections (only if this is the last instance)
|
||||
if (RedisConnectionManager.instance === this) {
|
||||
const sharedPromises = Array.from(RedisConnectionManager.sharedConnections.values()).map(
|
||||
conn => this.closeConnection(conn)
|
||||
);
|
||||
await Promise.all(sharedPromises);
|
||||
RedisConnectionManager.sharedConnections.clear();
|
||||
}
|
||||
|
||||
this.logger.info('All Redis connections closed');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get connection statistics
|
||||
*/
|
||||
getConnectionCount(): { shared: number; unique: number } {
|
||||
return {
|
||||
shared: RedisConnectionManager.sharedConnections.size,
|
||||
unique: this.connections.size,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all connection names for monitoring
|
||||
*/
|
||||
getConnectionNames(): { shared: string[]; unique: string[] } {
|
||||
return {
|
||||
shared: Array.from(RedisConnectionManager.sharedConnections.keys()),
|
||||
unique: Array.from(this.connections.keys()),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Health check for all connections
|
||||
*/
|
||||
async healthCheck(): Promise<{ healthy: boolean; details: Record<string, boolean> }> {
|
||||
const details: Record<string, boolean> = {};
|
||||
let allHealthy = true;
|
||||
|
||||
// Check shared connections
|
||||
for (const [name, connection] of RedisConnectionManager.sharedConnections) {
|
||||
try {
|
||||
await connection.ping();
|
||||
details[`shared:${name}`] = true;
|
||||
} catch {
|
||||
details[`shared:${name}`] = false;
|
||||
allHealthy = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Check instance connections
|
||||
for (const [name, connection] of this.connections) {
|
||||
try {
|
||||
await connection.ping();
|
||||
details[`unique:${name}`] = true;
|
||||
} catch {
|
||||
details[`unique:${name}`] = false;
|
||||
allHealthy = false;
|
||||
}
|
||||
}
|
||||
|
||||
return { healthy: allHealthy, details };
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for all created connections to be ready
|
||||
* @param timeout Maximum time to wait in milliseconds
|
||||
* @returns Promise that resolves when all connections are ready
|
||||
*/
|
||||
static async waitForAllConnections(timeout: number = 30000): Promise<void> {
|
||||
const instance = this.getInstance();
|
||||
const allConnections = new Map([...instance.connections, ...this.sharedConnections]);
|
||||
|
||||
if (allConnections.size === 0) {
|
||||
instance.logger.debug('No Redis connections to wait for');
|
||||
return;
|
||||
}
|
||||
|
||||
instance.logger.info(`Waiting for ${allConnections.size} Redis connections to be ready...`);
|
||||
|
||||
const connectionPromises = Array.from(allConnections.entries()).map(([name, redis]) =>
|
||||
instance.waitForConnection(redis, name, timeout)
|
||||
);
|
||||
|
||||
try {
|
||||
await Promise.all(connectionPromises);
|
||||
instance.logger.info('All Redis connections are ready');
|
||||
} catch (error) {
|
||||
instance.logger.error('Failed to establish all Redis connections:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for a specific connection to be ready
|
||||
*/
|
||||
private async waitForConnection(redis: Redis, name: string, timeout: number): Promise<void> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const timeoutId = setTimeout(() => {
|
||||
reject(new Error(`Redis connection ${name} failed to be ready within ${timeout}ms`));
|
||||
}, timeout);
|
||||
|
||||
const onReady = () => {
|
||||
clearTimeout(timeoutId);
|
||||
RedisConnectionManager.readyConnections.add(name);
|
||||
this.logger.debug(`Redis connection ready: ${name}`);
|
||||
resolve();
|
||||
};
|
||||
|
||||
const onError = (err: Error) => {
|
||||
clearTimeout(timeoutId);
|
||||
this.logger.error(`Redis connection failed for ${name}:`, err);
|
||||
reject(err);
|
||||
};
|
||||
|
||||
if (redis.status === 'ready') {
|
||||
onReady();
|
||||
} else {
|
||||
redis.once('ready', onReady);
|
||||
redis.once('error', onError);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if all connections are ready
|
||||
*/
|
||||
static areAllConnectionsReady(): boolean {
|
||||
const instance = this.getInstance();
|
||||
const allConnections = new Map([...instance.connections, ...this.sharedConnections]);
|
||||
|
||||
return (
|
||||
allConnections.size > 0 &&
|
||||
Array.from(allConnections.keys()).every(name => this.readyConnections.has(name))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default RedisConnectionManager;
|
||||
50
libs/data/cache/src/index.ts
vendored
Normal file
50
libs/data/cache/src/index.ts
vendored
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
import { RedisCache } from './redis-cache';
|
||||
import type { CacheOptions, CacheProvider } from './types';
|
||||
|
||||
// Cache instances registry to prevent multiple instances with same prefix
|
||||
const cacheInstances = new Map<string, CacheProvider>();
|
||||
|
||||
/**
|
||||
* Create a Redis cache instance with trading-optimized defaults
|
||||
*/
|
||||
export function createCache(options: CacheOptions): CacheProvider {
|
||||
const defaultOptions: CacheOptions = {
|
||||
keyPrefix: 'cache:',
|
||||
ttl: 3600, // 1 hour default
|
||||
enableMetrics: true,
|
||||
shared: true, // Default to shared connections
|
||||
...options,
|
||||
};
|
||||
|
||||
// For shared connections, reuse cache instances with the same key prefix
|
||||
if (defaultOptions.shared) {
|
||||
const cacheKey = `${defaultOptions.keyPrefix}-${defaultOptions.ttl}`;
|
||||
|
||||
if (cacheInstances.has(cacheKey)) {
|
||||
const cachedInstance = cacheInstances.get(cacheKey);
|
||||
if (!cachedInstance) {
|
||||
throw new Error(`Expected cache instance ${cacheKey} to exist`);
|
||||
}
|
||||
return cachedInstance;
|
||||
}
|
||||
|
||||
const cache = new RedisCache(defaultOptions);
|
||||
cacheInstances.set(cacheKey, cache);
|
||||
return cache;
|
||||
}
|
||||
|
||||
// For non-shared connections, always create new instances
|
||||
return new RedisCache(defaultOptions);
|
||||
}
|
||||
|
||||
// Export types and classes
|
||||
export type {
|
||||
CacheConfig, CacheKey, CacheOptions, CacheProvider, CacheStats, RedisConfig, SerializationOptions
|
||||
} from './types';
|
||||
|
||||
export { RedisConnectionManager } from './connection-manager';
|
||||
export { CacheKeyGenerator } from './key-generator';
|
||||
export { RedisCache } from './redis-cache';
|
||||
|
||||
// Default export for convenience
|
||||
export default createCache;
|
||||
73
libs/data/cache/src/key-generator.ts
vendored
Normal file
73
libs/data/cache/src/key-generator.ts
vendored
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
export class CacheKeyGenerator {
|
||||
/**
|
||||
* Generate cache key for market data
|
||||
*/
|
||||
static marketData(symbol: string, timeframe: string, date?: Date): string {
|
||||
const dateStr = date ? date.toISOString().split('T')[0] : 'latest';
|
||||
return `market:${symbol.toLowerCase()}:${timeframe}:${dateStr}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate cache key for technical indicators
|
||||
*/
|
||||
static indicator(symbol: string, indicator: string, period: number, dataHash: string): string {
|
||||
return `indicator:${symbol.toLowerCase()}:${indicator}:${period}:${dataHash}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate cache key for backtest results
|
||||
*/
|
||||
static backtest(strategyName: string, params: Record<string, unknown>): string {
|
||||
const paramHash = this.hashObject(params);
|
||||
return `backtest:${strategyName}:${paramHash}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate cache key for strategy results
|
||||
*/
|
||||
static strategy(strategyName: string, symbol: string, timeframe: string): string {
|
||||
return `strategy:${strategyName}:${symbol.toLowerCase()}:${timeframe}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate cache key for user sessions
|
||||
*/
|
||||
static userSession(userId: string): string {
|
||||
return `session:${userId}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate cache key for portfolio data
|
||||
*/
|
||||
static portfolio(userId: string, portfolioId: string): string {
|
||||
return `portfolio:${userId}:${portfolioId}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate cache key for real-time prices
|
||||
*/
|
||||
static realtimePrice(symbol: string): string {
|
||||
return `price:realtime:${symbol.toLowerCase()}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate cache key for order book data
|
||||
*/
|
||||
static orderBook(symbol: string, depth: number = 10): string {
|
||||
return `orderbook:${symbol.toLowerCase()}:${depth}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a simple hash from object for cache keys
|
||||
*/
|
||||
private static hashObject(obj: Record<string, unknown>): string {
|
||||
const str = JSON.stringify(obj, Object.keys(obj).sort());
|
||||
let hash = 0;
|
||||
for (let i = 0; i < str.length; i++) {
|
||||
const char = str.charCodeAt(i);
|
||||
hash = (hash << 5) - hash + char;
|
||||
hash = hash & hash; // Convert to 32-bit integer
|
||||
}
|
||||
return Math.abs(hash).toString(36);
|
||||
}
|
||||
}
|
||||
437
libs/data/cache/src/redis-cache.ts
vendored
Normal file
437
libs/data/cache/src/redis-cache.ts
vendored
Normal file
|
|
@ -0,0 +1,437 @@
|
|||
import Redis from 'ioredis';
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import { RedisConnectionManager } from './connection-manager';
|
||||
import { CacheOptions, CacheProvider, CacheStats } from './types';
|
||||
|
||||
/**
|
||||
* Simplified Redis-based cache provider using connection manager
|
||||
*/
|
||||
export class RedisCache implements CacheProvider {
|
||||
private redis: Redis;
|
||||
private logger = getLogger('redis-cache');
|
||||
private defaultTTL: number;
|
||||
private keyPrefix: string;
|
||||
private enableMetrics: boolean;
|
||||
private isConnected = false;
|
||||
private startTime = Date.now();
|
||||
private connectionManager: RedisConnectionManager;
|
||||
|
||||
private stats: CacheStats = {
|
||||
hits: 0,
|
||||
misses: 0,
|
||||
errors: 0,
|
||||
hitRate: 0,
|
||||
total: 0,
|
||||
uptime: 0,
|
||||
};
|
||||
|
||||
constructor(options: CacheOptions) {
|
||||
this.defaultTTL = options.ttl ?? 3600; // 1 hour default
|
||||
this.keyPrefix = options.keyPrefix ?? 'cache:';
|
||||
this.enableMetrics = options.enableMetrics ?? true;
|
||||
|
||||
// Get connection manager instance
|
||||
this.connectionManager = RedisConnectionManager.getInstance();
|
||||
|
||||
// Generate connection name based on cache type
|
||||
const baseName =
|
||||
options.name ||
|
||||
this.keyPrefix
|
||||
.replace(':', '')
|
||||
.replace(/[^a-zA-Z0-9]/g, '')
|
||||
.toUpperCase() ||
|
||||
'CACHE';
|
||||
|
||||
// Get Redis connection (shared by default for cache)
|
||||
this.redis = this.connectionManager.getConnection({
|
||||
name: `${baseName}-SERVICE`,
|
||||
singleton: options.shared ?? true, // Default to shared connection for cache
|
||||
redisConfig: options.redisConfig,
|
||||
});
|
||||
|
||||
// Only setup event handlers for non-shared connections to avoid memory leaks
|
||||
if (!(options.shared ?? true)) {
|
||||
this.setupEventHandlers();
|
||||
} else {
|
||||
// For shared connections, just monitor the connection status without adding handlers
|
||||
this.isConnected = this.redis.status === 'ready';
|
||||
}
|
||||
}
|
||||
|
||||
private setupEventHandlers(): void {
|
||||
this.redis.on('connect', () => {
|
||||
this.logger.info('Redis cache connected');
|
||||
});
|
||||
|
||||
this.redis.on('ready', () => {
|
||||
this.isConnected = true;
|
||||
this.logger.info('Redis cache ready');
|
||||
});
|
||||
|
||||
this.redis.on('error', (error: Error) => {
|
||||
this.isConnected = false;
|
||||
this.logger.error('Redis cache connection error', { error: error.message });
|
||||
});
|
||||
|
||||
this.redis.on('close', () => {
|
||||
this.isConnected = false;
|
||||
this.logger.warn('Redis cache connection closed');
|
||||
});
|
||||
|
||||
this.redis.on('reconnecting', () => {
|
||||
this.logger.warn('Redis cache reconnecting...');
|
||||
});
|
||||
}
|
||||
|
||||
private getKey(key: string): string {
|
||||
return `${this.keyPrefix}${key}`;
|
||||
}
|
||||
|
||||
private updateStats(hit: boolean, error = false): void {
|
||||
if (!this.enableMetrics) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (error) {
|
||||
this.stats.errors++;
|
||||
} else if (hit) {
|
||||
this.stats.hits++;
|
||||
} else {
|
||||
this.stats.misses++;
|
||||
}
|
||||
|
||||
this.stats.total = this.stats.hits + this.stats.misses;
|
||||
this.stats.hitRate = this.stats.total > 0 ? this.stats.hits / this.stats.total : 0;
|
||||
this.stats.uptime = Date.now() - this.startTime;
|
||||
}
|
||||
|
||||
private async safeExecute<T>(
|
||||
operation: () => Promise<T>,
|
||||
fallback: T,
|
||||
operationName: string
|
||||
): Promise<T> {
|
||||
try {
|
||||
if (!this.isReady()) {
|
||||
this.logger.warn(`Redis not ready for ${operationName}, using fallback`);
|
||||
this.updateStats(false, true);
|
||||
return fallback;
|
||||
}
|
||||
return await operation();
|
||||
} catch (error) {
|
||||
this.logger.error(`Redis ${operationName} failed`, {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
});
|
||||
this.updateStats(false, true);
|
||||
return fallback;
|
||||
}
|
||||
}
|
||||
|
||||
async get<T>(key: string): Promise<T | null> {
|
||||
return this.safeExecute(
|
||||
async () => {
|
||||
const fullKey = this.getKey(key);
|
||||
const value = await this.redis.get(fullKey);
|
||||
|
||||
if (value === null) {
|
||||
this.updateStats(false);
|
||||
this.logger.debug('Cache miss', { key });
|
||||
return null;
|
||||
}
|
||||
|
||||
this.updateStats(true);
|
||||
this.logger.debug('Cache hit', { key });
|
||||
|
||||
try {
|
||||
return JSON.parse(value) as T;
|
||||
} catch {
|
||||
// If parsing fails, return as string
|
||||
return value as unknown as T;
|
||||
}
|
||||
},
|
||||
null,
|
||||
'get'
|
||||
);
|
||||
}
|
||||
|
||||
async set<T>(
|
||||
key: string,
|
||||
value: T,
|
||||
options?:
|
||||
| number
|
||||
| {
|
||||
ttl?: number;
|
||||
preserveTTL?: boolean;
|
||||
onlyIfExists?: boolean;
|
||||
onlyIfNotExists?: boolean;
|
||||
getOldValue?: boolean;
|
||||
}
|
||||
): Promise<T | null> {
|
||||
return this.safeExecute(
|
||||
async () => {
|
||||
const fullKey = this.getKey(key);
|
||||
const serialized = typeof value === 'string' ? value : JSON.stringify(value);
|
||||
|
||||
// Handle backward compatibility - if options is a number, treat as TTL
|
||||
const config = typeof options === 'number' ? { ttl: options } : options || {};
|
||||
|
||||
let oldValue: T | null = null;
|
||||
|
||||
// Get old value if requested
|
||||
if (config.getOldValue) {
|
||||
const existingValue = await this.redis.get(fullKey);
|
||||
if (existingValue !== null) {
|
||||
try {
|
||||
oldValue = JSON.parse(existingValue) as T;
|
||||
} catch {
|
||||
oldValue = existingValue as unknown as T;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle preserveTTL logic
|
||||
if (config.preserveTTL) {
|
||||
const currentTTL = await this.redis.ttl(fullKey);
|
||||
|
||||
if (currentTTL === -2) {
|
||||
// Key doesn't exist
|
||||
if (config.onlyIfExists) {
|
||||
this.logger.debug('Set skipped - key does not exist and onlyIfExists is true', {
|
||||
key,
|
||||
});
|
||||
return oldValue;
|
||||
}
|
||||
// Set with default or specified TTL
|
||||
const ttl = config.ttl ?? this.defaultTTL;
|
||||
await this.redis.setex(fullKey, ttl, serialized);
|
||||
this.logger.debug('Cache set with new TTL (key did not exist)', { key, ttl });
|
||||
} else if (currentTTL === -1) {
|
||||
// Key exists but has no expiry - preserve the no-expiry state
|
||||
await this.redis.set(fullKey, serialized);
|
||||
this.logger.debug('Cache set preserving no-expiry', { key });
|
||||
} else {
|
||||
// Key exists with TTL - preserve it
|
||||
await this.redis.setex(fullKey, currentTTL, serialized);
|
||||
this.logger.debug('Cache set preserving existing TTL', { key, ttl: currentTTL });
|
||||
}
|
||||
} else {
|
||||
// Standard set logic with conditional operations
|
||||
if (config.onlyIfExists && config.onlyIfNotExists) {
|
||||
throw new Error('Cannot specify both onlyIfExists and onlyIfNotExists');
|
||||
}
|
||||
|
||||
if (config.onlyIfExists) {
|
||||
// Only set if key exists (XX flag)
|
||||
const ttl = config.ttl ?? this.defaultTTL;
|
||||
const result = await this.redis.set(fullKey, serialized, 'EX', ttl, 'XX');
|
||||
if (result === null) {
|
||||
this.logger.debug('Set skipped - key does not exist', { key });
|
||||
return oldValue;
|
||||
}
|
||||
} else if (config.onlyIfNotExists) {
|
||||
// Only set if key doesn't exist (NX flag)
|
||||
const ttl = config.ttl ?? this.defaultTTL;
|
||||
const result = await this.redis.set(fullKey, serialized, 'EX', ttl, 'NX');
|
||||
if (result === null) {
|
||||
this.logger.debug('Set skipped - key already exists', { key });
|
||||
return oldValue;
|
||||
}
|
||||
} else {
|
||||
// Standard set
|
||||
const ttl = config.ttl ?? this.defaultTTL;
|
||||
await this.redis.setex(fullKey, ttl, serialized);
|
||||
}
|
||||
|
||||
this.logger.debug('Cache set', { key, ttl: config.ttl ?? this.defaultTTL });
|
||||
}
|
||||
|
||||
return oldValue;
|
||||
},
|
||||
null,
|
||||
'set'
|
||||
);
|
||||
}
|
||||
|
||||
async del(key: string): Promise<void> {
|
||||
await this.safeExecute(
|
||||
async () => {
|
||||
const fullKey = this.getKey(key);
|
||||
await this.redis.del(fullKey);
|
||||
this.logger.debug('Cache delete', { key });
|
||||
},
|
||||
undefined,
|
||||
'del'
|
||||
);
|
||||
}
|
||||
|
||||
async exists(key: string): Promise<boolean> {
|
||||
return this.safeExecute(
|
||||
async () => {
|
||||
const fullKey = this.getKey(key);
|
||||
const result = await this.redis.exists(fullKey);
|
||||
return result === 1;
|
||||
},
|
||||
false,
|
||||
'exists'
|
||||
);
|
||||
}
|
||||
|
||||
async clear(): Promise<void> {
|
||||
await this.safeExecute(
|
||||
async () => {
|
||||
const pattern = `${this.keyPrefix}*`;
|
||||
const keys = await this.redis.keys(pattern);
|
||||
if (keys.length > 0) {
|
||||
await this.redis.del(...keys);
|
||||
this.logger.warn('Cache cleared', { keysDeleted: keys.length });
|
||||
}
|
||||
},
|
||||
undefined,
|
||||
'clear'
|
||||
);
|
||||
}
|
||||
|
||||
async keys(pattern: string): Promise<string[]> {
|
||||
return this.safeExecute(
|
||||
async () => {
|
||||
const fullPattern = `${this.keyPrefix}${pattern}`;
|
||||
const keys = await this.redis.keys(fullPattern);
|
||||
// Remove the prefix from returned keys to match the interface expectation
|
||||
return keys.map(key => key.replace(this.keyPrefix, ''));
|
||||
},
|
||||
[],
|
||||
'keys'
|
||||
);
|
||||
}
|
||||
|
||||
async health(): Promise<boolean> {
|
||||
try {
|
||||
const pong = await this.redis.ping();
|
||||
return pong === 'PONG';
|
||||
} catch (error) {
|
||||
this.logger.error('Redis health check failed', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
});
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
getStats(): CacheStats {
|
||||
return {
|
||||
...this.stats,
|
||||
uptime: Date.now() - this.startTime,
|
||||
};
|
||||
}
|
||||
|
||||
async waitForReady(timeout = 5000): Promise<void> {
|
||||
return new Promise((resolve, reject) => {
|
||||
if (this.redis.status === 'ready') {
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
reject(new Error(`Redis connection timeout after ${timeout}ms`));
|
||||
}, timeout);
|
||||
|
||||
this.redis.once('ready', () => {
|
||||
clearTimeout(timeoutId);
|
||||
resolve();
|
||||
});
|
||||
|
||||
this.redis.once('error', error => {
|
||||
clearTimeout(timeoutId);
|
||||
reject(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
isReady(): boolean {
|
||||
// Always check the actual Redis connection status
|
||||
const ready = this.redis.status === 'ready';
|
||||
|
||||
// Update local flag if we're not using shared connection
|
||||
if (this.isConnected !== ready) {
|
||||
this.isConnected = ready;
|
||||
}
|
||||
|
||||
return ready;
|
||||
}
|
||||
|
||||
// Enhanced convenience methods
|
||||
async update<T>(key: string, value: T): Promise<T | null> {
|
||||
return this.set(key, value, { preserveTTL: true, getOldValue: true });
|
||||
}
|
||||
|
||||
async setIfExists<T>(key: string, value: T, ttl?: number): Promise<boolean> {
|
||||
const result = await this.set(key, value, { ttl, onlyIfExists: true });
|
||||
return result !== null || (await this.exists(key));
|
||||
}
|
||||
|
||||
async setIfNotExists<T>(key: string, value: T, ttl?: number): Promise<boolean> {
|
||||
const oldValue = await this.set(key, value, { ttl, onlyIfNotExists: true, getOldValue: true });
|
||||
return oldValue === null; // Returns true if key didn't exist before
|
||||
}
|
||||
|
||||
async replace<T>(key: string, value: T, ttl?: number): Promise<T | null> {
|
||||
return this.set(key, value, { ttl, onlyIfExists: true, getOldValue: true });
|
||||
}
|
||||
|
||||
// Atomic update with transformation
|
||||
async updateField<T>(
|
||||
key: string,
|
||||
updater: (current: T | null) => T,
|
||||
ttl?: number
|
||||
): Promise<T | null> {
|
||||
return this.safeExecute(
|
||||
async () => {
|
||||
const fullKey = this.getKey(key);
|
||||
|
||||
// Use Lua script for atomic read-modify-write
|
||||
const luaScript = `
|
||||
local key = KEYS[1]
|
||||
|
||||
-- Get current value and TTL
|
||||
local current_value = redis.call('GET', key)
|
||||
local current_ttl = redis.call('TTL', key)
|
||||
|
||||
-- Return current value for processing
|
||||
return {current_value, current_ttl}
|
||||
`;
|
||||
|
||||
const [currentValue, currentTTL] = (await this.redis.eval(luaScript, 1, fullKey)) as [
|
||||
string | null,
|
||||
number,
|
||||
];
|
||||
|
||||
// Parse current value
|
||||
let parsed: T | null = null;
|
||||
if (currentValue !== null) {
|
||||
try {
|
||||
parsed = JSON.parse(currentValue) as T;
|
||||
} catch {
|
||||
parsed = currentValue as unknown as T;
|
||||
}
|
||||
}
|
||||
|
||||
// Apply updater function
|
||||
const newValue = updater(parsed);
|
||||
|
||||
// Set the new value with appropriate TTL logic
|
||||
if (ttl !== undefined) {
|
||||
// Use specified TTL
|
||||
await this.set(key, newValue, ttl);
|
||||
} else if (currentTTL === -2) {
|
||||
// Key didn't exist, use default TTL
|
||||
await this.set(key, newValue);
|
||||
} else {
|
||||
// Preserve existing TTL
|
||||
await this.set(key, newValue, { preserveTTL: true });
|
||||
}
|
||||
|
||||
return parsed;
|
||||
},
|
||||
null,
|
||||
'updateField'
|
||||
);
|
||||
}
|
||||
}
|
||||
112
libs/data/cache/src/types.ts
vendored
Normal file
112
libs/data/cache/src/types.ts
vendored
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
export interface RedisConfig {
|
||||
host: string;
|
||||
port: number;
|
||||
password?: string;
|
||||
username?: string;
|
||||
db?: number;
|
||||
keyPrefix?: string;
|
||||
maxRetriesPerRequest?: number;
|
||||
retryDelayOnFailover?: number;
|
||||
connectTimeout?: number;
|
||||
commandTimeout?: number;
|
||||
keepAlive?: number;
|
||||
tls?: {
|
||||
cert?: string;
|
||||
key?: string;
|
||||
ca?: string;
|
||||
rejectUnauthorized?: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
export interface CacheProvider {
|
||||
get<T>(key: string): Promise<T | null>;
|
||||
set<T>(
|
||||
key: string,
|
||||
value: T,
|
||||
options?:
|
||||
| number
|
||||
| {
|
||||
ttl?: number;
|
||||
preserveTTL?: boolean;
|
||||
onlyIfExists?: boolean;
|
||||
onlyIfNotExists?: boolean;
|
||||
getOldValue?: boolean;
|
||||
}
|
||||
): Promise<T | null>;
|
||||
del(key: string): Promise<void>;
|
||||
exists(key: string): Promise<boolean>;
|
||||
clear(): Promise<void>;
|
||||
keys(pattern: string): Promise<string[]>;
|
||||
getStats(): CacheStats;
|
||||
health(): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Wait for the cache to be ready and connected
|
||||
* @param timeout Maximum time to wait in milliseconds (default: 5000)
|
||||
* @returns Promise that resolves when cache is ready
|
||||
*/
|
||||
waitForReady(timeout?: number): Promise<void>;
|
||||
|
||||
/**
|
||||
* Check if the cache is currently ready
|
||||
*/
|
||||
isReady(): boolean;
|
||||
|
||||
// Enhanced cache methods
|
||||
/**
|
||||
* Update value preserving existing TTL
|
||||
*/
|
||||
update?<T>(key: string, value: T): Promise<T | null>;
|
||||
|
||||
/**
|
||||
* Set value only if key exists
|
||||
*/
|
||||
setIfExists?<T>(key: string, value: T, ttl?: number): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Set value only if key doesn't exist
|
||||
*/
|
||||
setIfNotExists?<T>(key: string, value: T, ttl?: number): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Replace existing key's value and TTL
|
||||
*/
|
||||
replace?<T>(key: string, value: T, ttl?: number): Promise<T | null>;
|
||||
/**
|
||||
* Atomically update field with transformation function
|
||||
*/
|
||||
updateField?<T>(key: string, updater: (current: T | null) => T, ttl?: number): Promise<T | null>;
|
||||
}
|
||||
|
||||
export interface CacheOptions {
|
||||
ttl?: number;
|
||||
keyPrefix?: string;
|
||||
enableMetrics?: boolean;
|
||||
name?: string; // Name for connection identification
|
||||
shared?: boolean; // Whether to use shared connection
|
||||
redisConfig: RedisConfig;
|
||||
}
|
||||
|
||||
export interface CacheStats {
|
||||
hits: number;
|
||||
misses: number;
|
||||
errors: number;
|
||||
hitRate: number;
|
||||
total: number;
|
||||
uptime: number;
|
||||
}
|
||||
|
||||
export interface CacheConfig {
|
||||
type: 'redis';
|
||||
keyPrefix?: string;
|
||||
defaultTTL?: number;
|
||||
enableMetrics?: boolean;
|
||||
compression?: boolean;
|
||||
}
|
||||
|
||||
export type CacheKey = string | (() => string);
|
||||
|
||||
export interface SerializationOptions {
|
||||
compress?: boolean;
|
||||
binary?: boolean;
|
||||
}
|
||||
12
libs/data/cache/tsconfig.json
vendored
Normal file
12
libs/data/cache/tsconfig.json
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../../core/logger" }
|
||||
]
|
||||
}
|
||||
72
libs/data/mongodb/README.md
Normal file
72
libs/data/mongodb/README.md
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
# MongoDB Client Library
|
||||
|
||||
A comprehensive MongoDB client library for the Stock Bot trading platform, designed for handling document storage, raw data, and unstructured content.
|
||||
|
||||
## Features
|
||||
|
||||
- **Connection Management**: Robust connection pooling and failover
|
||||
- **Schema Validation**: Built-in validation using Zod schemas
|
||||
- **Type Safety**: Full TypeScript support with typed collections
|
||||
- **Error Handling**: Comprehensive error handling and retry logic
|
||||
- **Health Monitoring**: Connection health monitoring and metrics
|
||||
- **Transactions**: Support for multi-document transactions
|
||||
- **Aggregation**: Helper methods for complex aggregation pipelines
|
||||
|
||||
## Usage
|
||||
|
||||
```typescript
|
||||
import { MongoDBClient } from '@stock-bot/mongodb';
|
||||
|
||||
// Initialize client
|
||||
const mongoClient = new MongoDBClient();
|
||||
await mongoClient.connect();
|
||||
|
||||
// Get a typed collection
|
||||
const collection = mongoClient.getCollection('sentiment_data');
|
||||
|
||||
// Insert document
|
||||
await collection.insertOne({
|
||||
symbol: 'AAPL',
|
||||
sentiment: 'positive',
|
||||
source: 'reddit',
|
||||
timestamp: new Date()
|
||||
});
|
||||
|
||||
// Query with aggregation
|
||||
const results = await collection.aggregate([
|
||||
{ $match: { symbol: 'AAPL' } },
|
||||
{ $group: { _id: '$sentiment', count: { $sum: 1 } } }
|
||||
]);
|
||||
```
|
||||
|
||||
## Collections
|
||||
|
||||
The client provides typed access to the following collections:
|
||||
|
||||
- **sentiment_data**: Social media sentiment analysis
|
||||
- **raw_documents**: Unprocessed documents and content
|
||||
- **news_articles**: Financial news and articles
|
||||
- **sec_filings**: SEC filing documents
|
||||
- **earnings_transcripts**: Earnings call transcripts
|
||||
- **analyst_reports**: Research reports and analysis
|
||||
|
||||
## Configuration
|
||||
|
||||
Configure using environment variables:
|
||||
|
||||
```env
|
||||
MONGODB_HOST=localhost
|
||||
MONGODB_PORT=27017
|
||||
MONGODB_DATABASE=trading_documents
|
||||
MONGODB_USERNAME=trading_admin
|
||||
MONGODB_PASSWORD=your_password
|
||||
```
|
||||
|
||||
## Health Monitoring
|
||||
|
||||
The client includes built-in health monitoring:
|
||||
|
||||
```typescript
|
||||
const health = await mongoClient.getHealth();
|
||||
console.log(health.status); // 'healthy' | 'degraded' | 'unhealthy'
|
||||
```
|
||||
52
libs/data/mongodb/package.json
Normal file
52
libs/data/mongodb/package.json
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
{
|
||||
"name": "@stock-bot/mongodb",
|
||||
"version": "1.0.0",
|
||||
"description": "MongoDB client library for Stock Bot platform",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"test": "bun test",
|
||||
"lint": "eslint src/**/*.ts",
|
||||
"type-check": "tsc --noEmit",
|
||||
"clean": "rimraf dist"
|
||||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/logger": "*",
|
||||
"@stock-bot/types": "*",
|
||||
"@types/mongodb": "^4.0.7",
|
||||
"mongodb": "^6.17.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.11.0",
|
||||
"typescript": "^5.3.0",
|
||||
"eslint": "^8.56.0",
|
||||
"@typescript-eslint/eslint-plugin": "^6.19.0",
|
||||
"@typescript-eslint/parser": "^6.19.0",
|
||||
"bun-types": "^1.2.15"
|
||||
},
|
||||
"keywords": [
|
||||
"mongodb",
|
||||
"database",
|
||||
"client",
|
||||
"stock-bot"
|
||||
],
|
||||
"exports": {
|
||||
".": {
|
||||
"import": "./dist/index.js",
|
||||
"require": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts"
|
||||
}
|
||||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"README.md"
|
||||
],
|
||||
"paths": {
|
||||
"*": [
|
||||
"node_modules/*",
|
||||
"../../node_modules/*"
|
||||
]
|
||||
}
|
||||
}
|
||||
520
libs/data/mongodb/src/client.ts
Normal file
520
libs/data/mongodb/src/client.ts
Normal file
|
|
@ -0,0 +1,520 @@
|
|||
import { getLogger } from '@stock-bot/logger';
|
||||
import { Collection, Db, MongoClient, OptionalUnlessRequiredId } from 'mongodb';
|
||||
import type { DocumentBase, MongoDBClientConfig, PoolMetrics, ConnectionEvents, DynamicPoolConfig } from './types';
|
||||
|
||||
/**
|
||||
* MongoDB Client for Stock Bot Data Service
|
||||
*
|
||||
* MongoDB client focused on batch upsert operations
|
||||
* with minimal configuration and no health monitoring complexity.
|
||||
*/
|
||||
export class MongoDBClient {
|
||||
private client: MongoClient | null = null;
|
||||
private db: Db | null = null;
|
||||
private readonly config: MongoDBClientConfig;
|
||||
private defaultDatabase: string;
|
||||
private readonly logger = getLogger('mongodb-client');
|
||||
private isConnected = false;
|
||||
private readonly metrics: PoolMetrics;
|
||||
private readonly events?: ConnectionEvents;
|
||||
private dynamicPoolConfig?: DynamicPoolConfig;
|
||||
private poolMonitorInterval?: Timer;
|
||||
|
||||
constructor(config: MongoDBClientConfig, events?: ConnectionEvents) {
|
||||
this.config = config;
|
||||
this.defaultDatabase = config.database || 'stock';
|
||||
this.events = events;
|
||||
this.metrics = {
|
||||
totalConnections: 0,
|
||||
activeConnections: 0,
|
||||
idleConnections: 0,
|
||||
waitingRequests: 0,
|
||||
errors: 0,
|
||||
created: new Date(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to MongoDB with simple configuration
|
||||
*/
|
||||
async connect(): Promise<void> {
|
||||
if (this.isConnected && this.client) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const uri = this.buildConnectionUri();
|
||||
this.logger.info('Connecting to MongoDB...');
|
||||
|
||||
this.client = new MongoClient(uri, {
|
||||
maxPoolSize: this.config.poolSettings?.maxPoolSize || 10,
|
||||
minPoolSize: this.config.poolSettings?.minPoolSize || 1,
|
||||
connectTimeoutMS: this.config.timeouts?.connectTimeout || 10000,
|
||||
socketTimeoutMS: this.config.timeouts?.socketTimeout || 30000,
|
||||
serverSelectionTimeoutMS: this.config.timeouts?.serverSelectionTimeout || 5000,
|
||||
});
|
||||
|
||||
await this.client.connect();
|
||||
await this.client.db(this.defaultDatabase).admin().ping();
|
||||
|
||||
// Set default database from config
|
||||
this.db = this.client.db(this.defaultDatabase);
|
||||
this.isConnected = true;
|
||||
|
||||
// Update metrics
|
||||
this.metrics.totalConnections = this.config.poolSettings?.maxPoolSize || 10;
|
||||
this.metrics.idleConnections = this.metrics.totalConnections;
|
||||
|
||||
// Fire connection event
|
||||
if (this.events?.onConnect) {
|
||||
await Promise.resolve(this.events.onConnect());
|
||||
}
|
||||
|
||||
// Fire pool created event
|
||||
if (this.events?.onPoolCreated) {
|
||||
await Promise.resolve(this.events.onPoolCreated());
|
||||
}
|
||||
|
||||
this.logger.info('Successfully connected to MongoDB', {
|
||||
database: this.defaultDatabase,
|
||||
poolSize: this.metrics.totalConnections,
|
||||
});
|
||||
|
||||
// Start pool monitoring if dynamic sizing is enabled
|
||||
if (this.dynamicPoolConfig?.enabled) {
|
||||
this.startPoolMonitoring();
|
||||
}
|
||||
} catch (error) {
|
||||
this.metrics.errors++;
|
||||
this.metrics.lastError = error instanceof Error ? error.message : 'Unknown error';
|
||||
|
||||
// Fire error event
|
||||
if (this.events?.onError) {
|
||||
await Promise.resolve(this.events.onError(error as Error));
|
||||
}
|
||||
|
||||
this.logger.error('MongoDB connection failed:', error);
|
||||
if (this.client) {
|
||||
await this.client.close();
|
||||
this.client = null;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Disconnect from MongoDB
|
||||
*/
|
||||
async disconnect(): Promise<void> {
|
||||
if (!this.client) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Stop pool monitoring
|
||||
if (this.poolMonitorInterval) {
|
||||
clearInterval(this.poolMonitorInterval);
|
||||
this.poolMonitorInterval = undefined;
|
||||
}
|
||||
|
||||
await this.client.close();
|
||||
this.isConnected = false;
|
||||
this.client = null;
|
||||
this.db = null;
|
||||
|
||||
// Fire disconnect event
|
||||
if (this.events?.onDisconnect) {
|
||||
await Promise.resolve(this.events.onDisconnect());
|
||||
}
|
||||
|
||||
this.logger.info('Disconnected from MongoDB');
|
||||
} catch (error) {
|
||||
this.logger.error('Error disconnecting from MongoDB:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the default database for operations
|
||||
*/
|
||||
setDefaultDatabase(databaseName: string): void {
|
||||
this.defaultDatabase = databaseName;
|
||||
if (this.client) {
|
||||
this.db = this.client.db(databaseName);
|
||||
this.logger.debug(`Default database changed to: ${databaseName}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current default database name
|
||||
*/
|
||||
getDefaultDatabase(): string {
|
||||
return this.defaultDatabase;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a database instance by name
|
||||
*/
|
||||
getDatabase(databaseName?: string): Db {
|
||||
if (!this.client) {
|
||||
throw new Error('MongoDB client not connected');
|
||||
}
|
||||
|
||||
const dbName = databaseName || this.defaultDatabase;
|
||||
return this.client.db(dbName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch upsert documents for high-performance operations
|
||||
* Supports single or multiple unique keys for matching
|
||||
*/
|
||||
async batchUpsert<T extends DocumentBase>(
|
||||
collectionName: string,
|
||||
documents: Array<
|
||||
Omit<T, '_id' | 'created_at' | 'updated_at'> & Partial<Pick<T, 'created_at' | 'updated_at'>>
|
||||
>,
|
||||
uniqueKeys: string | string[],
|
||||
options: {
|
||||
chunkSize?: number;
|
||||
database?: string; // Optional database override
|
||||
} = {}
|
||||
): Promise<{ insertedCount: number; updatedCount: number; errors: unknown[] }> {
|
||||
if (!this.client) {
|
||||
throw new Error('MongoDB client not connected');
|
||||
}
|
||||
|
||||
if (documents.length === 0) {
|
||||
return { insertedCount: 0, updatedCount: 0, errors: [] };
|
||||
}
|
||||
|
||||
// Normalize uniqueKeys to array
|
||||
const keyFields = Array.isArray(uniqueKeys) ? uniqueKeys : [uniqueKeys];
|
||||
|
||||
if (keyFields.length === 0) {
|
||||
throw new Error('At least one unique key must be provided');
|
||||
}
|
||||
|
||||
const { chunkSize = 10000, database } = options;
|
||||
const db = this.getDatabase(database);
|
||||
const collection = db.collection<T>(collectionName);
|
||||
const operationId = Math.random().toString(36).substring(7);
|
||||
const dbName = database || this.defaultDatabase;
|
||||
|
||||
let totalInserted = 0;
|
||||
let totalUpdated = 0;
|
||||
const errors: unknown[] = [];
|
||||
|
||||
this.logger.info(`Starting batch upsert operation [${collectionName}-${documents.length}][${operationId}]`, {
|
||||
database: dbName,
|
||||
collection: collectionName,
|
||||
totalDocuments: documents.length,
|
||||
uniqueKeys: keyFields,
|
||||
chunkSize,
|
||||
});
|
||||
|
||||
// Process documents in chunks to avoid memory issues
|
||||
for (let i = 0; i < documents.length; i += chunkSize) {
|
||||
const chunk = documents.slice(i, i + chunkSize);
|
||||
|
||||
try {
|
||||
const startTime = Date.now();
|
||||
|
||||
// Prepare bulk operations
|
||||
const bulkOps = chunk.map(doc => {
|
||||
const now = new Date();
|
||||
const docWithTimestamps = {
|
||||
...doc,
|
||||
created_at: doc.created_at || now,
|
||||
updated_at: now,
|
||||
};
|
||||
|
||||
// Create filter using multiple unique keys
|
||||
const filter: Record<string, unknown> = {};
|
||||
keyFields.forEach(key => {
|
||||
const value = (doc as Record<string, unknown>)[key];
|
||||
if (value === undefined || value === null) {
|
||||
throw new Error(`Document missing required unique key: ${key}`);
|
||||
}
|
||||
filter[key] = value;
|
||||
});
|
||||
|
||||
// Remove created_at from $set to avoid conflict with $setOnInsert
|
||||
const { created_at, ...updateFields } = docWithTimestamps;
|
||||
|
||||
return {
|
||||
updateOne: {
|
||||
filter,
|
||||
update: {
|
||||
$set: updateFields,
|
||||
$setOnInsert: { created_at },
|
||||
},
|
||||
upsert: true,
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
// Execute bulk operation with type assertion to handle complex MongoDB types
|
||||
const result = await collection.bulkWrite(bulkOps as never, { ordered: false });
|
||||
|
||||
const executionTime = Date.now() - startTime;
|
||||
const inserted = result.upsertedCount;
|
||||
const updated = result.modifiedCount;
|
||||
|
||||
totalInserted += inserted;
|
||||
totalUpdated += updated;
|
||||
|
||||
this.logger.debug(`Batch upsert chunk processed [${operationId}]`, {
|
||||
chunkNumber: Math.floor(i / chunkSize) + 1,
|
||||
chunkSize: chunk.length,
|
||||
inserted,
|
||||
updated,
|
||||
executionTime,
|
||||
database: dbName,
|
||||
collection: collectionName,
|
||||
});
|
||||
} catch (error) {
|
||||
this.logger.error(`Batch upsert failed on chunk [${operationId}]`, {
|
||||
error,
|
||||
database: dbName,
|
||||
collection: collectionName,
|
||||
chunkNumber: Math.floor(i / chunkSize) + 1,
|
||||
chunkStart: i,
|
||||
chunkSize: chunk.length,
|
||||
uniqueKeys: keyFields,
|
||||
});
|
||||
errors.push(error);
|
||||
}
|
||||
}
|
||||
|
||||
this.logger.info(`Batch upsert completed [${operationId}]`, {
|
||||
database: dbName,
|
||||
collection: collectionName,
|
||||
totalRecords: documents.length,
|
||||
inserted: totalInserted,
|
||||
updated: totalUpdated,
|
||||
errors: errors.length,
|
||||
uniqueKeys: keyFields,
|
||||
});
|
||||
|
||||
return { insertedCount: totalInserted, updatedCount: totalUpdated, errors };
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a typed collection
|
||||
*/
|
||||
getCollection<T extends DocumentBase>(name: string, database?: string): Collection<T> {
|
||||
const db = this.getDatabase(database);
|
||||
return db.collection<T>(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple insert operation
|
||||
*/
|
||||
async insertOne<T extends DocumentBase>(
|
||||
collectionName: string,
|
||||
document: Omit<T, '_id' | 'created_at' | 'updated_at'> &
|
||||
Partial<Pick<T, 'created_at' | 'updated_at'>>,
|
||||
database?: string
|
||||
): Promise<T> {
|
||||
const collection = this.getCollection<T>(collectionName, database);
|
||||
|
||||
const now = new Date();
|
||||
const docWithTimestamps = {
|
||||
...document,
|
||||
created_at: document.created_at || now,
|
||||
updated_at: now,
|
||||
} as T;
|
||||
|
||||
const result = await collection.insertOne(docWithTimestamps as OptionalUnlessRequiredId<T>);
|
||||
return { ...docWithTimestamps, _id: result.insertedId } as T;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if client is connected
|
||||
*/
|
||||
get connected(): boolean {
|
||||
return this.isConnected && !!this.client;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default database instance
|
||||
*/
|
||||
get database(): Db | null {
|
||||
return this.db;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience methods for common databases
|
||||
*/
|
||||
|
||||
// Stock database operations
|
||||
async batchUpsertStock<T extends DocumentBase>(
|
||||
collectionName: string,
|
||||
documents: Array<
|
||||
Omit<T, '_id' | 'created_at' | 'updated_at'> & Partial<Pick<T, 'created_at' | 'updated_at'>>
|
||||
>,
|
||||
uniqueKeys: string | string[],
|
||||
options: { chunkSize?: number } = {}
|
||||
) {
|
||||
return this.batchUpsert(collectionName, documents, uniqueKeys, {
|
||||
...options,
|
||||
database: 'stock',
|
||||
});
|
||||
}
|
||||
|
||||
// Trading documents database operations
|
||||
async batchUpsertTrading<T extends DocumentBase>(
|
||||
collectionName: string,
|
||||
documents: Array<
|
||||
Omit<T, '_id' | 'created_at' | 'updated_at'> & Partial<Pick<T, 'created_at' | 'updated_at'>>
|
||||
>,
|
||||
uniqueKeys: string | string[],
|
||||
options: { chunkSize?: number } = {}
|
||||
) {
|
||||
return this.batchUpsert(collectionName, documents, uniqueKeys, {
|
||||
...options,
|
||||
database: 'trading_documents',
|
||||
});
|
||||
}
|
||||
|
||||
// Analytics database operations
|
||||
async batchUpsertAnalytics<T extends DocumentBase>(
|
||||
collectionName: string,
|
||||
documents: Array<
|
||||
Omit<T, '_id' | 'created_at' | 'updated_at'> & Partial<Pick<T, 'created_at' | 'updated_at'>>
|
||||
>,
|
||||
uniqueKeys: string | string[],
|
||||
options: { chunkSize?: number } = {}
|
||||
) {
|
||||
return this.batchUpsert(collectionName, documents, uniqueKeys, {
|
||||
...options,
|
||||
database: 'analytics',
|
||||
});
|
||||
}
|
||||
|
||||
private buildConnectionUri(): string {
|
||||
if (this.config.uri) {
|
||||
return this.config.uri;
|
||||
}
|
||||
|
||||
const { host, port, username, password, database, authSource } = this.config;
|
||||
|
||||
// Build URI components
|
||||
const auth = username && password ? `${username}:${password}@` : '';
|
||||
const authParam = authSource && username ? `?authSource=${authSource}` : '';
|
||||
|
||||
return `mongodb://${auth}${host}:${port}/${database}${authParam}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current pool metrics
|
||||
*/
|
||||
getPoolMetrics(): PoolMetrics {
|
||||
// Update last used timestamp
|
||||
this.metrics.lastUsed = new Date();
|
||||
|
||||
// Note: MongoDB driver doesn't expose detailed pool metrics
|
||||
// These are estimates based on configuration
|
||||
return { ...this.metrics };
|
||||
}
|
||||
|
||||
/**
|
||||
* Set dynamic pool configuration
|
||||
*/
|
||||
setDynamicPoolConfig(config: DynamicPoolConfig): void {
|
||||
this.dynamicPoolConfig = config;
|
||||
|
||||
if (config.enabled && this.isConnected && !this.poolMonitorInterval) {
|
||||
this.startPoolMonitoring();
|
||||
} else if (!config.enabled && this.poolMonitorInterval) {
|
||||
clearInterval(this.poolMonitorInterval);
|
||||
this.poolMonitorInterval = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start monitoring pool and adjust size dynamically
|
||||
*/
|
||||
private startPoolMonitoring(): void {
|
||||
if (!this.dynamicPoolConfig || this.poolMonitorInterval) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.poolMonitorInterval = setInterval(() => {
|
||||
this.evaluatePoolSize();
|
||||
}, this.dynamicPoolConfig.evaluationInterval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluate and adjust pool size based on usage
|
||||
*/
|
||||
private async evaluatePoolSize(): Promise<void> {
|
||||
if (!this.dynamicPoolConfig || !this.client) {
|
||||
return;
|
||||
}
|
||||
|
||||
const { minSize, maxSize, scaleUpThreshold, scaleDownThreshold } = this.dynamicPoolConfig;
|
||||
const currentSize = this.metrics.totalConnections;
|
||||
const utilization = ((this.metrics.activeConnections / currentSize) * 100);
|
||||
|
||||
this.logger.debug('Pool utilization', {
|
||||
utilization: `${utilization.toFixed(1)}%`,
|
||||
active: this.metrics.activeConnections,
|
||||
total: currentSize,
|
||||
});
|
||||
|
||||
// Scale up if utilization is high
|
||||
if (utilization > scaleUpThreshold && currentSize < maxSize) {
|
||||
const newSize = Math.min(currentSize + this.dynamicPoolConfig.scaleUpIncrement, maxSize);
|
||||
await this.resizePool(newSize);
|
||||
this.logger.info('Scaling up connection pool', { from: currentSize, to: newSize, utilization });
|
||||
}
|
||||
// Scale down if utilization is low
|
||||
else if (utilization < scaleDownThreshold && currentSize > minSize) {
|
||||
const newSize = Math.max(currentSize - this.dynamicPoolConfig.scaleDownIncrement, minSize);
|
||||
await this.resizePool(newSize);
|
||||
this.logger.info('Scaling down connection pool', { from: currentSize, to: newSize, utilization });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resize the connection pool
|
||||
* Note: MongoDB driver doesn't support dynamic resizing, this would require reconnection
|
||||
*/
|
||||
private async resizePool(newSize: number): Promise<void> {
|
||||
// MongoDB doesn't support dynamic pool resizing
|
||||
// This is a placeholder for future implementation
|
||||
this.logger.warn('Dynamic pool resizing not yet implemented for MongoDB', { requestedSize: newSize });
|
||||
|
||||
// Update metrics to reflect desired state
|
||||
this.metrics.totalConnections = newSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable pool warmup on connect
|
||||
*/
|
||||
async warmupPool(): Promise<void> {
|
||||
if (!this.client || !this.isConnected) {
|
||||
throw new Error('Client not connected');
|
||||
}
|
||||
|
||||
const minSize = this.config.poolSettings?.minPoolSize || 1;
|
||||
const promises: Promise<void>[] = [];
|
||||
|
||||
// Create minimum connections by running parallel pings
|
||||
for (let i = 0; i < minSize; i++) {
|
||||
promises.push(
|
||||
this.client.db(this.defaultDatabase).admin().ping()
|
||||
.then(() => {
|
||||
this.logger.debug(`Warmed up connection ${i + 1}/${minSize}`);
|
||||
})
|
||||
.catch(error => {
|
||||
this.logger.warn(`Failed to warm up connection ${i + 1}`, { error });
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
await Promise.allSettled(promises);
|
||||
this.logger.info('Connection pool warmup complete', { connections: minSize });
|
||||
}
|
||||
}
|
||||
21
libs/data/mongodb/src/factory.ts
Normal file
21
libs/data/mongodb/src/factory.ts
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
import { MongoDBClient } from './client';
|
||||
import type { MongoDBClientConfig, ConnectionEvents } from './types';
|
||||
|
||||
/**
|
||||
* Factory function to create a MongoDB client instance
|
||||
*/
|
||||
export function createMongoDBClient(config: MongoDBClientConfig, events?: ConnectionEvents): MongoDBClient {
|
||||
return new MongoDBClient(config, events);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create and connect a MongoDB client
|
||||
*/
|
||||
export async function createAndConnectMongoDBClient(
|
||||
config: MongoDBClientConfig,
|
||||
events?: ConnectionEvents
|
||||
): Promise<MongoDBClient> {
|
||||
const client = createMongoDBClient(config, events);
|
||||
await client.connect();
|
||||
return client;
|
||||
}
|
||||
34
libs/data/mongodb/src/index.ts
Normal file
34
libs/data/mongodb/src/index.ts
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
/**
|
||||
* MongoDB Client Library for Stock Bot Data Service
|
||||
*
|
||||
* Provides a MongoDB client focused on batch upsert operations
|
||||
* for high-performance data ingestion.
|
||||
*/
|
||||
|
||||
export { MongoDBClient } from './client';
|
||||
|
||||
// Types
|
||||
export type {
|
||||
AnalystReport,
|
||||
DocumentBase,
|
||||
EarningsTranscript,
|
||||
ExchangeSourceMapping,
|
||||
MasterExchange,
|
||||
MongoDBClientConfig,
|
||||
MongoDBConnectionOptions,
|
||||
NewsArticle,
|
||||
RawDocument,
|
||||
SecFiling,
|
||||
SentimentData,
|
||||
PoolMetrics,
|
||||
ConnectionEvents,
|
||||
DynamicPoolConfig,
|
||||
} from './types';
|
||||
|
||||
// Factory functions
|
||||
export {
|
||||
createMongoDBClient,
|
||||
createAndConnectMongoDBClient,
|
||||
} from './factory';
|
||||
|
||||
// Singleton pattern removed - use factory functions instead
|
||||
261
libs/data/mongodb/src/types.ts
Normal file
261
libs/data/mongodb/src/types.ts
Normal file
|
|
@ -0,0 +1,261 @@
|
|||
import type { ObjectId } from 'mongodb';
|
||||
|
||||
/**
|
||||
* MongoDB Client Configuration
|
||||
*/
|
||||
export interface MongoDBClientConfig {
|
||||
host: string;
|
||||
port: number;
|
||||
database: string;
|
||||
username?: string;
|
||||
password?: string;
|
||||
authSource?: string;
|
||||
uri?: string;
|
||||
poolSettings?: {
|
||||
maxPoolSize: number;
|
||||
minPoolSize: number;
|
||||
maxIdleTime: number;
|
||||
};
|
||||
timeouts?: {
|
||||
connectTimeout: number;
|
||||
socketTimeout: number;
|
||||
serverSelectionTimeout: number;
|
||||
};
|
||||
tls?: {
|
||||
enabled: boolean;
|
||||
insecure: boolean;
|
||||
caFile?: string;
|
||||
};
|
||||
options?: {
|
||||
retryWrites: boolean;
|
||||
journal: boolean;
|
||||
readPreference: 'primary' | 'primaryPreferred' | 'secondary' | 'secondaryPreferred' | 'nearest';
|
||||
writeConcern: string;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* MongoDB Connection Options
|
||||
*/
|
||||
export interface MongoDBConnectionOptions {
|
||||
retryAttempts?: number;
|
||||
retryDelay?: number;
|
||||
healthCheckInterval?: number;
|
||||
}
|
||||
|
||||
export interface PoolMetrics {
|
||||
totalConnections: number;
|
||||
activeConnections: number;
|
||||
idleConnections: number;
|
||||
waitingRequests: number;
|
||||
errors: number;
|
||||
lastError?: string;
|
||||
avgResponseTime?: number;
|
||||
created: Date;
|
||||
lastUsed?: Date;
|
||||
}
|
||||
|
||||
export interface ConnectionEvents {
|
||||
onConnect?: () => void | Promise<void>;
|
||||
onDisconnect?: () => void | Promise<void>;
|
||||
onError?: (error: Error) => void | Promise<void>;
|
||||
onPoolCreated?: () => void | Promise<void>;
|
||||
}
|
||||
|
||||
export interface DynamicPoolConfig {
|
||||
enabled: boolean;
|
||||
minSize: number;
|
||||
maxSize: number;
|
||||
scaleUpThreshold: number; // % of pool in use (0-100)
|
||||
scaleDownThreshold: number; // % of pool idle (0-100)
|
||||
scaleUpIncrement: number; // connections to add
|
||||
scaleDownIncrement: number; // connections to remove
|
||||
evaluationInterval: number; // ms between checks
|
||||
}
|
||||
|
||||
/**
|
||||
* Health Status Types
|
||||
*/
|
||||
export type MongoDBHealthStatus = 'healthy' | 'degraded' | 'unhealthy';
|
||||
|
||||
export interface MongoDBHealthCheck {
|
||||
status: MongoDBHealthStatus;
|
||||
timestamp: Date;
|
||||
latency: number;
|
||||
connections: {
|
||||
active: number;
|
||||
available: number;
|
||||
total: number;
|
||||
};
|
||||
errors?: string[];
|
||||
}
|
||||
|
||||
export interface MongoDBMetrics {
|
||||
operationsPerSecond: number;
|
||||
averageLatency: number;
|
||||
errorRate: number;
|
||||
connectionPoolUtilization: number;
|
||||
documentsProcessed: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Base Document Interface
|
||||
*/
|
||||
export interface DocumentBase {
|
||||
_id?: ObjectId;
|
||||
created_at: Date;
|
||||
updated_at: Date;
|
||||
source: string;
|
||||
metadata?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sentiment Data Document
|
||||
*/
|
||||
export interface SentimentData extends DocumentBase {
|
||||
symbol: string;
|
||||
sentiment_score: number;
|
||||
sentiment_label: 'positive' | 'negative' | 'neutral';
|
||||
confidence: number;
|
||||
text: string;
|
||||
source_type: 'reddit' | 'twitter' | 'news' | 'forums';
|
||||
source_id: string;
|
||||
timestamp: Date;
|
||||
processed_at: Date;
|
||||
language: string;
|
||||
keywords: string[];
|
||||
entities: Array<{
|
||||
name: string;
|
||||
type: string;
|
||||
confidence: number;
|
||||
}>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Raw Document
|
||||
*/
|
||||
export interface RawDocument extends DocumentBase {
|
||||
document_type: 'html' | 'pdf' | 'text' | 'json' | 'xml';
|
||||
content: string;
|
||||
content_hash: string;
|
||||
url?: string;
|
||||
title?: string;
|
||||
author?: string;
|
||||
published_date?: Date;
|
||||
extracted_text?: string;
|
||||
processing_status: 'pending' | 'processed' | 'failed';
|
||||
size_bytes: number;
|
||||
language?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* News Article
|
||||
*/
|
||||
export interface NewsArticle extends DocumentBase {
|
||||
headline: string;
|
||||
content: string;
|
||||
summary?: string;
|
||||
author: string;
|
||||
publication: string;
|
||||
published_date: Date;
|
||||
url: string;
|
||||
symbols: string[];
|
||||
categories: string[];
|
||||
sentiment_score?: number;
|
||||
relevance_score?: number;
|
||||
image_url?: string;
|
||||
tags: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* SEC Filing
|
||||
*/
|
||||
export interface SecFiling extends DocumentBase {
|
||||
cik: string;
|
||||
accession_number: string;
|
||||
filing_type: string;
|
||||
company_name: string;
|
||||
symbols: string[];
|
||||
filing_date: Date;
|
||||
period_end_date: Date;
|
||||
url: string;
|
||||
content: string;
|
||||
extracted_data?: Record<string, unknown>;
|
||||
financial_statements?: Array<{
|
||||
statement_type: string;
|
||||
data: Record<string, number>;
|
||||
}>;
|
||||
processing_status: 'pending' | 'processed' | 'failed';
|
||||
}
|
||||
|
||||
/**
|
||||
* Earnings Transcript
|
||||
*/
|
||||
export interface EarningsTranscript extends DocumentBase {
|
||||
symbol: string;
|
||||
company_name: string;
|
||||
quarter: string;
|
||||
year: number;
|
||||
call_date: Date;
|
||||
transcript: string;
|
||||
participants: Array<{
|
||||
name: string;
|
||||
title: string;
|
||||
type: 'executive' | 'analyst';
|
||||
}>;
|
||||
key_topics: string[];
|
||||
sentiment_analysis?: {
|
||||
overall_sentiment: number;
|
||||
topic_sentiments: Record<string, number>;
|
||||
};
|
||||
financial_highlights?: Record<string, number>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyst Report
|
||||
*/
|
||||
export interface AnalystReport extends DocumentBase {
|
||||
symbol: string;
|
||||
analyst_firm: string;
|
||||
analyst_name: string;
|
||||
report_title: string;
|
||||
report_date: Date;
|
||||
rating: 'buy' | 'hold' | 'sell' | 'strong_buy' | 'strong_sell';
|
||||
price_target?: number;
|
||||
previous_rating?: string;
|
||||
content: string;
|
||||
summary: string;
|
||||
key_points: string[];
|
||||
financial_projections?: Record<string, number>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Exchange-related types
|
||||
*/
|
||||
export interface ExchangeSourceMapping {
|
||||
id: string;
|
||||
name: string;
|
||||
code?: string;
|
||||
aliases?: string[];
|
||||
lastUpdated: Date;
|
||||
}
|
||||
|
||||
export interface MasterExchange extends DocumentBase {
|
||||
masterExchangeId: string;
|
||||
shortName?: string;
|
||||
officialName: string;
|
||||
country: string;
|
||||
currency: string;
|
||||
timezone: string;
|
||||
active?: boolean;
|
||||
|
||||
tradingHours?: {
|
||||
open: string;
|
||||
close: string;
|
||||
timezone: string;
|
||||
};
|
||||
|
||||
sourceMappings: Record<string, ExchangeSourceMapping>;
|
||||
confidence: number;
|
||||
verified: boolean;
|
||||
}
|
||||
13
libs/data/mongodb/tsconfig.json
Normal file
13
libs/data/mongodb/tsconfig.json
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../../core/logger" },
|
||||
{ "path": "../../core/types" }
|
||||
]
|
||||
}
|
||||
82
libs/data/postgres/README.md
Normal file
82
libs/data/postgres/README.md
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
# PostgreSQL Client Library
|
||||
|
||||
A comprehensive PostgreSQL client library for the Stock Bot trading platform, designed for operational data, transactions, and relational queries.
|
||||
|
||||
## Features
|
||||
|
||||
- **Connection Pooling**: Robust connection pool management
|
||||
- **Type Safety**: Full TypeScript support with typed queries
|
||||
- **Transaction Support**: Multi-statement transactions with rollback
|
||||
- **Schema Management**: Database schema validation and migrations
|
||||
- **Query Builder**: Fluent query building interface
|
||||
- **Health Monitoring**: Connection health monitoring and metrics
|
||||
- **Performance Tracking**: Query performance monitoring and optimization
|
||||
|
||||
## Usage
|
||||
|
||||
```typescript
|
||||
import { PostgreSQLClient } from '@stock-bot/postgres';
|
||||
|
||||
// Initialize client
|
||||
const pgClient = new PostgreSQLClient();
|
||||
await pgClient.connect();
|
||||
|
||||
// Execute a query
|
||||
const users = await pgClient.query('SELECT * FROM users WHERE active = $1', [true]);
|
||||
|
||||
// Use query builder
|
||||
const trades = await pgClient
|
||||
.select('*')
|
||||
.from('trades')
|
||||
.where('symbol', '=', 'AAPL')
|
||||
.orderBy('created_at', 'DESC')
|
||||
.limit(10)
|
||||
.execute();
|
||||
|
||||
// Execute in transaction
|
||||
await pgClient.transaction(async (tx) => {
|
||||
await tx.query('INSERT INTO trades (...) VALUES (...)', []);
|
||||
await tx.query('UPDATE portfolio SET balance = balance - $1', [amount]);
|
||||
});
|
||||
```
|
||||
|
||||
## Database Schemas
|
||||
|
||||
The client provides typed access to the following schemas:
|
||||
|
||||
- **trading**: Core trading operations (trades, orders, positions)
|
||||
- **strategy**: Strategy definitions and performance
|
||||
- **risk**: Risk management and compliance
|
||||
- **audit**: Audit trails and logging
|
||||
|
||||
## Configuration
|
||||
|
||||
Configure using environment variables:
|
||||
|
||||
```env
|
||||
POSTGRES_HOST=localhost
|
||||
POSTGRES_PORT=5432
|
||||
POSTGRES_DATABASE=stockbot
|
||||
POSTGRES_USERNAME=stockbot
|
||||
POSTGRES_PASSWORD=your_password
|
||||
```
|
||||
|
||||
## Query Builder
|
||||
|
||||
The fluent query builder supports:
|
||||
|
||||
- SELECT, INSERT, UPDATE, DELETE operations
|
||||
- Complex WHERE conditions with AND/OR logic
|
||||
- JOINs (INNER, LEFT, RIGHT, FULL)
|
||||
- Aggregations (COUNT, SUM, AVG, etc.)
|
||||
- Subqueries and CTEs
|
||||
- Window functions
|
||||
|
||||
## Health Monitoring
|
||||
|
||||
The client includes built-in health monitoring:
|
||||
|
||||
```typescript
|
||||
const health = await pgClient.getHealth();
|
||||
console.log(health.status); // 'healthy' | 'degraded' | 'unhealthy'
|
||||
```
|
||||
46
libs/data/postgres/package.json
Normal file
46
libs/data/postgres/package.json
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
{
|
||||
"name": "@stock-bot/postgres",
|
||||
"version": "1.0.0",
|
||||
"description": "PostgreSQL client library for Stock Bot platform",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"test": "bun test",
|
||||
"lint": "eslint src/**/*.ts",
|
||||
"type-check": "tsc --noEmit",
|
||||
"clean": "rimraf dist"
|
||||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/logger": "*",
|
||||
"@stock-bot/types": "*",
|
||||
"pg": "^8.11.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.11.0",
|
||||
"@types/pg": "^8.10.7",
|
||||
"typescript": "^5.3.0",
|
||||
"eslint": "^8.56.0",
|
||||
"@typescript-eslint/eslint-plugin": "^6.19.0",
|
||||
"@typescript-eslint/parser": "^6.19.0",
|
||||
"bun-types": "^1.2.15"
|
||||
},
|
||||
"keywords": [
|
||||
"postgresql",
|
||||
"database",
|
||||
"client",
|
||||
"stock-bot"
|
||||
],
|
||||
"exports": {
|
||||
".": {
|
||||
"import": "./dist/index.js",
|
||||
"require": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts"
|
||||
}
|
||||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"README.md"
|
||||
]
|
||||
}
|
||||
605
libs/data/postgres/src/client.ts
Normal file
605
libs/data/postgres/src/client.ts
Normal file
|
|
@ -0,0 +1,605 @@
|
|||
import { Pool, QueryResultRow } from 'pg';
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import { PostgreSQLHealthMonitor } from './health';
|
||||
import { PostgreSQLQueryBuilder } from './query-builder';
|
||||
import { PostgreSQLTransactionManager } from './transactions';
|
||||
import type {
|
||||
PostgreSQLClientConfig,
|
||||
PostgreSQLConnectionOptions,
|
||||
QueryResult,
|
||||
TransactionCallback,
|
||||
PoolMetrics,
|
||||
ConnectionEvents,
|
||||
DynamicPoolConfig,
|
||||
} from './types';
|
||||
|
||||
/**
|
||||
* PostgreSQL Client for Stock Bot
|
||||
*
|
||||
* Provides type-safe access to PostgreSQL with connection pooling,
|
||||
* health monitoring, and transaction support.
|
||||
*/
|
||||
export class PostgreSQLClient {
|
||||
private pool: Pool | null = null;
|
||||
private readonly config: PostgreSQLClientConfig;
|
||||
private readonly options: PostgreSQLConnectionOptions;
|
||||
private readonly logger: ReturnType<typeof getLogger>;
|
||||
private readonly healthMonitor: PostgreSQLHealthMonitor;
|
||||
private readonly transactionManager: PostgreSQLTransactionManager;
|
||||
private isConnected = false;
|
||||
private readonly metrics: PoolMetrics;
|
||||
private readonly events?: ConnectionEvents;
|
||||
private dynamicPoolConfig?: DynamicPoolConfig;
|
||||
private poolMonitorInterval?: NodeJS.Timeout;
|
||||
|
||||
constructor(config: PostgreSQLClientConfig, options?: PostgreSQLConnectionOptions, events?: ConnectionEvents) {
|
||||
this.config = config;
|
||||
this.options = {
|
||||
retryAttempts: 3,
|
||||
retryDelay: 1000,
|
||||
healthCheckInterval: 30000,
|
||||
...options,
|
||||
};
|
||||
this.events = events;
|
||||
|
||||
this.logger = getLogger('postgres-client');
|
||||
this.healthMonitor = new PostgreSQLHealthMonitor(this);
|
||||
this.transactionManager = new PostgreSQLTransactionManager(this);
|
||||
|
||||
this.metrics = {
|
||||
totalConnections: 0,
|
||||
activeConnections: 0,
|
||||
idleConnections: 0,
|
||||
waitingRequests: 0,
|
||||
errors: 0,
|
||||
created: new Date(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to PostgreSQL
|
||||
*/
|
||||
async connect(): Promise<void> {
|
||||
if (this.isConnected && this.pool) {
|
||||
return;
|
||||
}
|
||||
|
||||
let lastError: Error | null = null;
|
||||
|
||||
for (let attempt = 1; attempt <= (this.options.retryAttempts ?? 3); attempt++) {
|
||||
try {
|
||||
this.logger.info(
|
||||
`Connecting to PostgreSQL (attempt ${attempt}/${this.options.retryAttempts})...`
|
||||
);
|
||||
|
||||
this.pool = new Pool(this.buildPoolConfig());
|
||||
|
||||
// Test the connection
|
||||
const client = await this.pool.connect();
|
||||
await client.query('SELECT 1');
|
||||
client.release();
|
||||
|
||||
this.isConnected = true;
|
||||
|
||||
// Update metrics
|
||||
const poolConfig = this.config.poolSettings;
|
||||
this.metrics.totalConnections = poolConfig?.max || 10;
|
||||
this.metrics.idleConnections = poolConfig?.min || 2;
|
||||
|
||||
// Fire connection event
|
||||
if (this.events?.onConnect) {
|
||||
await Promise.resolve(this.events.onConnect());
|
||||
}
|
||||
|
||||
// Fire pool created event
|
||||
if (this.events?.onPoolCreated) {
|
||||
await Promise.resolve(this.events.onPoolCreated());
|
||||
}
|
||||
|
||||
this.logger.info('Successfully connected to PostgreSQL', {
|
||||
poolSize: this.metrics.totalConnections,
|
||||
});
|
||||
|
||||
// Start health monitoring
|
||||
this.healthMonitor.start();
|
||||
|
||||
// Setup error handlers
|
||||
this.setupErrorHandlers();
|
||||
|
||||
// Setup pool event listeners for metrics
|
||||
this.setupPoolMetrics();
|
||||
|
||||
// Start dynamic pool monitoring if enabled
|
||||
if (this.dynamicPoolConfig?.enabled) {
|
||||
this.startPoolMonitoring();
|
||||
}
|
||||
|
||||
return;
|
||||
} catch (error) {
|
||||
lastError = error as Error;
|
||||
this.metrics.errors++;
|
||||
this.metrics.lastError = lastError.message;
|
||||
|
||||
// Fire error event
|
||||
if (this.events?.onError) {
|
||||
await Promise.resolve(this.events.onError(lastError));
|
||||
}
|
||||
|
||||
this.logger.error(`PostgreSQL connection attempt ${attempt} failed:`, error);
|
||||
|
||||
if (this.pool) {
|
||||
await this.pool.end();
|
||||
this.pool = null;
|
||||
}
|
||||
|
||||
if (attempt < (this.options.retryAttempts ?? 3)) {
|
||||
await this.delay((this.options.retryDelay ?? 1000) * attempt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`Failed to connect to PostgreSQL after ${this.options.retryAttempts} attempts: ${lastError?.message}`
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disconnect from PostgreSQL
|
||||
*/
|
||||
async disconnect(): Promise<void> {
|
||||
if (!this.pool) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Stop pool monitoring
|
||||
if (this.poolMonitorInterval) {
|
||||
clearInterval(this.poolMonitorInterval);
|
||||
this.poolMonitorInterval = undefined;
|
||||
}
|
||||
|
||||
this.healthMonitor.stop();
|
||||
await this.pool.end();
|
||||
this.isConnected = false;
|
||||
this.pool = null;
|
||||
|
||||
// Fire disconnect event
|
||||
if (this.events?.onDisconnect) {
|
||||
await Promise.resolve(this.events.onDisconnect());
|
||||
}
|
||||
|
||||
this.logger.info('Disconnected from PostgreSQL');
|
||||
} catch (error) {
|
||||
this.logger.error('Error disconnecting from PostgreSQL:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a query
|
||||
*/
|
||||
async query<T extends QueryResultRow = any>(
|
||||
text: string,
|
||||
params?: any[]
|
||||
): Promise<QueryResult<T>> {
|
||||
if (!this.pool) {
|
||||
throw new Error('PostgreSQL client not connected');
|
||||
}
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
const result = await this.pool.query<T>(text, params);
|
||||
const executionTime = Date.now() - startTime;
|
||||
|
||||
this.logger.debug(`Query executed in ${executionTime}ms`, {
|
||||
query: text.substring(0, 100),
|
||||
params: params?.length,
|
||||
});
|
||||
|
||||
return {
|
||||
...result,
|
||||
executionTime,
|
||||
} as QueryResult<T>;
|
||||
} catch (error) {
|
||||
const executionTime = Date.now() - startTime;
|
||||
this.logger.error(`Query failed after ${executionTime}ms:`, {
|
||||
error,
|
||||
query: text,
|
||||
params,
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute multiple queries in a transaction
|
||||
*/
|
||||
async transaction<T>(callback: TransactionCallback<T>): Promise<T> {
|
||||
return await this.transactionManager.execute(callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a query builder instance
|
||||
*/
|
||||
queryBuilder(): PostgreSQLQueryBuilder {
|
||||
return new PostgreSQLQueryBuilder(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new query builder with SELECT
|
||||
*/
|
||||
select(columns: string | string[] = '*'): PostgreSQLQueryBuilder {
|
||||
return this.queryBuilder().select(columns);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new query builder with INSERT
|
||||
*/
|
||||
insert(table: string): PostgreSQLQueryBuilder {
|
||||
return this.queryBuilder().insert(table);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new query builder with UPDATE
|
||||
*/
|
||||
update(table: string): PostgreSQLQueryBuilder {
|
||||
return this.queryBuilder().update(table);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new query builder with DELETE
|
||||
*/
|
||||
delete(table: string): PostgreSQLQueryBuilder {
|
||||
return this.queryBuilder().delete(table);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a stored procedure or function
|
||||
*/
|
||||
async callFunction<T extends QueryResultRow = any>(
|
||||
functionName: string,
|
||||
params?: any[]
|
||||
): Promise<QueryResult<T>> {
|
||||
const placeholders = params ? params.map((_, i) => `$${i + 1}`).join(', ') : '';
|
||||
const query = `SELECT * FROM ${functionName}(${placeholders})`;
|
||||
return await this.query<T>(query, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch upsert operation for high-performance inserts/updates
|
||||
*/
|
||||
async batchUpsert(
|
||||
tableName: string,
|
||||
data: Record<string, unknown>[],
|
||||
conflictColumn: string,
|
||||
options: {
|
||||
chunkSize?: number;
|
||||
excludeColumns?: string[];
|
||||
} = {}
|
||||
): Promise<{ insertedCount: number; updatedCount: number }> {
|
||||
if (!this.pool) {
|
||||
throw new Error('PostgreSQL client not connected');
|
||||
}
|
||||
|
||||
if (data.length === 0) {
|
||||
return { insertedCount: 0, updatedCount: 0 };
|
||||
}
|
||||
|
||||
const { chunkSize = 1000, excludeColumns = [] } = options;
|
||||
const columns = Object.keys(data[0] ?? {}).filter(col => !excludeColumns.includes(col));
|
||||
const updateColumns = columns.filter(col => col !== conflictColumn);
|
||||
|
||||
let totalInserted = 0;
|
||||
let totalUpdated = 0;
|
||||
|
||||
// Process in chunks to avoid memory issues and parameter limits
|
||||
for (let i = 0; i < data.length; i += chunkSize) {
|
||||
const chunk = data.slice(i, i + chunkSize);
|
||||
|
||||
// Build placeholders for this chunk
|
||||
const placeholders = chunk.map((_, rowIndex) => {
|
||||
const rowPlaceholders = columns.map((_, colIndex) => {
|
||||
return `$${rowIndex * columns.length + colIndex + 1}`;
|
||||
});
|
||||
return `(${rowPlaceholders.join(', ')})`;
|
||||
});
|
||||
|
||||
// Flatten the chunk data
|
||||
const values = chunk.flatMap(row => columns.map(col => row[col as keyof typeof row]));
|
||||
|
||||
// Build the upsert query
|
||||
const updateClauses = updateColumns.map(col => `${col} = EXCLUDED.${col}`);
|
||||
const query = `
|
||||
INSERT INTO ${tableName} (${columns.join(', ')})
|
||||
VALUES ${placeholders.join(', ')}
|
||||
ON CONFLICT (${conflictColumn})
|
||||
DO UPDATE SET
|
||||
${updateClauses.join(', ')},
|
||||
updated_at = NOW()
|
||||
RETURNING (xmax = 0) AS is_insert
|
||||
`;
|
||||
|
||||
try {
|
||||
const startTime = Date.now();
|
||||
const result = await this.pool.query(query, values);
|
||||
const executionTime = Date.now() - startTime;
|
||||
|
||||
// Count inserts vs updates
|
||||
const inserted = result.rows.filter((row: { is_insert: boolean }) => row.is_insert).length;
|
||||
const updated = result.rows.length - inserted;
|
||||
|
||||
totalInserted += inserted;
|
||||
totalUpdated += updated;
|
||||
|
||||
this.logger.debug(`Batch upsert chunk processed in ${executionTime}ms`, {
|
||||
chunkSize: chunk.length,
|
||||
inserted,
|
||||
updated,
|
||||
table: tableName,
|
||||
});
|
||||
} catch (error) {
|
||||
this.logger.error(`Batch upsert failed on chunk ${Math.floor(i / chunkSize) + 1}:`, {
|
||||
error,
|
||||
table: tableName,
|
||||
chunkStart: i,
|
||||
chunkSize: chunk.length,
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
this.logger.info('Batch upsert completed', {
|
||||
table: tableName,
|
||||
totalRecords: data.length,
|
||||
inserted: totalInserted,
|
||||
updated: totalUpdated,
|
||||
});
|
||||
|
||||
return { insertedCount: totalInserted, updatedCount: totalUpdated };
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a table exists
|
||||
*/
|
||||
async tableExists(tableName: string, schemaName: string = 'public'): Promise<boolean> {
|
||||
const result = await this.query(
|
||||
`SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = $1 AND table_name = $2
|
||||
)`,
|
||||
[schemaName, tableName]
|
||||
);
|
||||
return result.rows[0].exists;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get table schema information
|
||||
*/
|
||||
async getTableSchema(tableName: string, schemaName: string = 'public'): Promise<any[]> {
|
||||
const result = await this.query(
|
||||
`SELECT
|
||||
column_name,
|
||||
data_type,
|
||||
is_nullable,
|
||||
column_default,
|
||||
character_maximum_length
|
||||
FROM information_schema.columns
|
||||
WHERE table_schema = $1 AND table_name = $2
|
||||
ORDER BY ordinal_position`,
|
||||
[schemaName, tableName]
|
||||
);
|
||||
return result.rows;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute EXPLAIN for query analysis
|
||||
*/
|
||||
async explain(query: string, params?: any[]): Promise<any[]> {
|
||||
const explainQuery = `EXPLAIN (ANALYZE, BUFFERS, FORMAT JSON) ${query}`;
|
||||
const result = await this.query(explainQuery, params);
|
||||
return result.rows[0]['QUERY PLAN'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get database statistics
|
||||
*/
|
||||
async getStats(): Promise<any> {
|
||||
const result = await this.query(`
|
||||
SELECT
|
||||
(SELECT count(*) FROM pg_stat_activity WHERE state = 'active') as active_connections,
|
||||
(SELECT count(*) FROM pg_stat_activity WHERE state = 'idle') as idle_connections,
|
||||
(SELECT setting FROM pg_settings WHERE name = 'max_connections') as max_connections,
|
||||
pg_size_pretty(pg_database_size(current_database())) as database_size
|
||||
`);
|
||||
return result.rows[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if client is connected
|
||||
*/
|
||||
get connected(): boolean {
|
||||
return this.isConnected && !!this.pool;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the underlying connection pool
|
||||
*/
|
||||
get connectionPool(): Pool | null {
|
||||
return this.pool;
|
||||
}
|
||||
|
||||
|
||||
private buildPoolConfig(): any {
|
||||
return {
|
||||
host: this.config.host,
|
||||
port: this.config.port,
|
||||
database: this.config.database,
|
||||
user: this.config.username,
|
||||
password: this.config.password,
|
||||
min: this.config.poolSettings?.min,
|
||||
max: this.config.poolSettings?.max,
|
||||
idleTimeoutMillis: this.config.poolSettings?.idleTimeoutMillis,
|
||||
connectionTimeoutMillis: this.config.timeouts?.connection,
|
||||
query_timeout: this.config.timeouts?.query,
|
||||
statement_timeout: this.config.timeouts?.statement,
|
||||
lock_timeout: this.config.timeouts?.lock,
|
||||
idle_in_transaction_session_timeout: this.config.timeouts?.idleInTransaction,
|
||||
ssl: this.config.ssl?.enabled
|
||||
? {
|
||||
rejectUnauthorized: this.config.ssl.rejectUnauthorized,
|
||||
}
|
||||
: false,
|
||||
};
|
||||
}
|
||||
|
||||
private setupErrorHandlers(): void {
|
||||
if (!this.pool) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.pool.on('error', error => {
|
||||
this.logger.error('PostgreSQL pool error:', error);
|
||||
});
|
||||
|
||||
this.pool.on('connect', () => {
|
||||
this.logger.debug('New PostgreSQL client connected');
|
||||
});
|
||||
|
||||
this.pool.on('remove', () => {
|
||||
this.logger.debug('PostgreSQL client removed from pool');
|
||||
});
|
||||
}
|
||||
|
||||
private delay(ms: number): Promise<void> {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current pool metrics
|
||||
*/
|
||||
getPoolMetrics(): PoolMetrics {
|
||||
// Update last used timestamp
|
||||
this.metrics.lastUsed = new Date();
|
||||
|
||||
// Update metrics from pool if available
|
||||
if (this.pool) {
|
||||
this.metrics.totalConnections = this.pool.totalCount;
|
||||
this.metrics.idleConnections = this.pool.idleCount;
|
||||
this.metrics.waitingRequests = this.pool.waitingCount;
|
||||
this.metrics.activeConnections = this.metrics.totalConnections - this.metrics.idleConnections;
|
||||
}
|
||||
|
||||
return { ...this.metrics };
|
||||
}
|
||||
|
||||
/**
|
||||
* Set dynamic pool configuration
|
||||
*/
|
||||
setDynamicPoolConfig(config: DynamicPoolConfig): void {
|
||||
this.dynamicPoolConfig = config;
|
||||
|
||||
if (config.enabled && this.isConnected && !this.poolMonitorInterval) {
|
||||
this.startPoolMonitoring();
|
||||
} else if (!config.enabled && this.poolMonitorInterval) {
|
||||
clearInterval(this.poolMonitorInterval);
|
||||
this.poolMonitorInterval = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start monitoring pool and adjust size dynamically
|
||||
*/
|
||||
private startPoolMonitoring(): void {
|
||||
if (!this.dynamicPoolConfig || this.poolMonitorInterval) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.poolMonitorInterval = setInterval(() => {
|
||||
this.evaluatePoolSize();
|
||||
}, this.dynamicPoolConfig.evaluationInterval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup pool event listeners for metrics
|
||||
*/
|
||||
private setupPoolMetrics(): void {
|
||||
if (!this.pool) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Track when connections are acquired
|
||||
this.pool.on('acquire', () => {
|
||||
this.metrics.activeConnections++;
|
||||
this.metrics.idleConnections--;
|
||||
});
|
||||
|
||||
// Track when connections are released
|
||||
this.pool.on('release', () => {
|
||||
this.metrics.activeConnections--;
|
||||
this.metrics.idleConnections++;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluate and adjust pool size based on usage
|
||||
*/
|
||||
private async evaluatePoolSize(): Promise<void> {
|
||||
if (!this.dynamicPoolConfig || !this.pool) {
|
||||
return;
|
||||
}
|
||||
|
||||
const metrics = this.getPoolMetrics();
|
||||
const { minSize, maxSize, scaleUpThreshold, scaleDownThreshold } = this.dynamicPoolConfig;
|
||||
const currentSize = metrics.totalConnections;
|
||||
const utilization = currentSize > 0 ? ((metrics.activeConnections / currentSize) * 100) : 0;
|
||||
|
||||
this.logger.debug('Pool utilization', {
|
||||
utilization: `${utilization.toFixed(1)}%`,
|
||||
active: metrics.activeConnections,
|
||||
total: currentSize,
|
||||
waiting: metrics.waitingRequests,
|
||||
});
|
||||
|
||||
// Scale up if utilization is high or there are waiting requests
|
||||
if ((utilization > scaleUpThreshold || metrics.waitingRequests > 0) && currentSize < maxSize) {
|
||||
const newSize = Math.min(currentSize + this.dynamicPoolConfig.scaleUpIncrement, maxSize);
|
||||
this.logger.info('Would scale up connection pool', { from: currentSize, to: newSize, utilization });
|
||||
// Note: pg module doesn't support dynamic resizing, would need reconnection
|
||||
}
|
||||
// Scale down if utilization is low
|
||||
else if (utilization < scaleDownThreshold && currentSize > minSize) {
|
||||
const newSize = Math.max(currentSize - this.dynamicPoolConfig.scaleDownIncrement, minSize);
|
||||
this.logger.info('Would scale down connection pool', { from: currentSize, to: newSize, utilization });
|
||||
// Note: pg module doesn't support dynamic resizing, would need reconnection
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable pool warmup on connect
|
||||
*/
|
||||
async warmupPool(): Promise<void> {
|
||||
if (!this.pool || !this.isConnected) {
|
||||
throw new Error('Client not connected');
|
||||
}
|
||||
|
||||
const minSize = this.config.poolSettings?.min || 2;
|
||||
const promises: Promise<void>[] = [];
|
||||
|
||||
// Create minimum connections by running parallel queries
|
||||
for (let i = 0; i < minSize; i++) {
|
||||
promises.push(
|
||||
this.pool.query('SELECT 1')
|
||||
.then(() => {
|
||||
this.logger.debug(`Warmed up connection ${i + 1}/${minSize}`);
|
||||
})
|
||||
.catch(error => {
|
||||
this.logger.warn(`Failed to warm up connection ${i + 1}`, { error });
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
await Promise.allSettled(promises);
|
||||
this.logger.info('Connection pool warmup complete', { connections: minSize });
|
||||
}
|
||||
}
|
||||
27
libs/data/postgres/src/factory.ts
Normal file
27
libs/data/postgres/src/factory.ts
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
import { PostgreSQLClient } from './client';
|
||||
import type { PostgreSQLClientConfig, PostgreSQLConnectionOptions, ConnectionEvents } from './types';
|
||||
|
||||
/**
|
||||
* Factory function to create a PostgreSQL client instance
|
||||
*/
|
||||
export function createPostgreSQLClient(
|
||||
config: PostgreSQLClientConfig,
|
||||
options?: PostgreSQLConnectionOptions,
|
||||
events?: ConnectionEvents
|
||||
): PostgreSQLClient {
|
||||
return new PostgreSQLClient(config, options, events);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create and connect a PostgreSQL client
|
||||
*/
|
||||
export async function createAndConnectPostgreSQLClient(
|
||||
config: PostgreSQLClientConfig,
|
||||
options?: PostgreSQLConnectionOptions,
|
||||
events?: ConnectionEvents
|
||||
): Promise<PostgreSQLClient> {
|
||||
const client = createPostgreSQLClient(config, options, events);
|
||||
await client.connect();
|
||||
return client;
|
||||
}
|
||||
|
||||
145
libs/data/postgres/src/health.ts
Normal file
145
libs/data/postgres/src/health.ts
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
import { getLogger } from '@stock-bot/logger';
|
||||
import type { PostgreSQLClient } from './client';
|
||||
import type { PostgreSQLHealthCheck, PostgreSQLHealthStatus, PostgreSQLMetrics } from './types';
|
||||
|
||||
/**
|
||||
* PostgreSQL Health Monitor
|
||||
*
|
||||
* Monitors PostgreSQL connection health and provides metrics
|
||||
*/
|
||||
export class PostgreSQLHealthMonitor {
|
||||
private readonly client: PostgreSQLClient;
|
||||
private readonly logger: ReturnType<typeof getLogger>;
|
||||
private healthCheckInterval: NodeJS.Timeout | null = null;
|
||||
private metrics: PostgreSQLMetrics;
|
||||
private lastHealthCheck: PostgreSQLHealthCheck | null = null;
|
||||
|
||||
constructor(client: PostgreSQLClient) {
|
||||
this.client = client;
|
||||
this.logger = getLogger('postgres-health-monitor');
|
||||
this.metrics = {
|
||||
queriesPerSecond: 0,
|
||||
averageQueryTime: 0,
|
||||
errorRate: 0,
|
||||
connectionPoolUtilization: 0,
|
||||
slowQueries: 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Start health monitoring
|
||||
*/
|
||||
start(intervalMs: number = 30000): void {
|
||||
if (this.healthCheckInterval) {
|
||||
this.stop();
|
||||
}
|
||||
|
||||
this.logger.info(`Starting PostgreSQL health monitoring (interval: ${intervalMs}ms)`);
|
||||
|
||||
this.healthCheckInterval = setInterval(async () => {
|
||||
try {
|
||||
await this.performHealthCheck();
|
||||
} catch (error) {
|
||||
this.logger.error('Health check failed:', error);
|
||||
}
|
||||
}, intervalMs);
|
||||
|
||||
// Perform initial health check
|
||||
this.performHealthCheck().catch(error => {
|
||||
this.logger.error('Initial health check failed:', error);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop health monitoring
|
||||
*/
|
||||
stop(): void {
|
||||
if (this.healthCheckInterval) {
|
||||
clearInterval(this.healthCheckInterval);
|
||||
this.healthCheckInterval = null;
|
||||
this.logger.info('Stopped PostgreSQL health monitoring');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current health status
|
||||
*/
|
||||
async getHealth(): Promise<PostgreSQLHealthCheck> {
|
||||
if (!this.lastHealthCheck) {
|
||||
await this.performHealthCheck();
|
||||
}
|
||||
if (!this.lastHealthCheck) {
|
||||
throw new Error('Health check failed to produce results');
|
||||
}
|
||||
return this.lastHealthCheck;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current metrics
|
||||
*/
|
||||
getMetrics(): PostgreSQLMetrics {
|
||||
return { ...this.metrics };
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a health check
|
||||
*/
|
||||
private async performHealthCheck(): Promise<void> {
|
||||
const startTime = Date.now();
|
||||
const errors: string[] = [];
|
||||
let status: PostgreSQLHealthStatus = 'healthy';
|
||||
|
||||
try {
|
||||
if (!this.client.connected) {
|
||||
errors.push('PostgreSQL client not connected');
|
||||
status = 'unhealthy';
|
||||
} else {
|
||||
// Test basic connectivity
|
||||
await this.client.query('SELECT 1');
|
||||
|
||||
// Get connection stats
|
||||
const stats = await this.client.getStats();
|
||||
|
||||
// Check connection pool utilization
|
||||
const utilization = parseInt(stats.active_connections) / parseInt(stats.max_connections);
|
||||
if (utilization > 0.8) {
|
||||
errors.push('High connection pool utilization');
|
||||
status = status === 'healthy' ? 'degraded' : status;
|
||||
}
|
||||
|
||||
// Check for high latency
|
||||
const latency = Date.now() - startTime;
|
||||
if (latency > 1000) {
|
||||
errors.push(`High latency: ${latency}ms`);
|
||||
status = status === 'healthy' ? 'degraded' : status;
|
||||
}
|
||||
|
||||
this.metrics.connectionPoolUtilization = utilization;
|
||||
}
|
||||
} catch (error) {
|
||||
errors.push(`Health check failed: ${(error as Error).message}`);
|
||||
status = 'unhealthy';
|
||||
}
|
||||
|
||||
const latency = Date.now() - startTime;
|
||||
|
||||
this.lastHealthCheck = {
|
||||
status,
|
||||
timestamp: new Date(),
|
||||
latency,
|
||||
connections: {
|
||||
active: 1,
|
||||
idle: 9,
|
||||
total: 10,
|
||||
},
|
||||
errors: errors.length > 0 ? errors : undefined,
|
||||
};
|
||||
|
||||
// Log health status changes
|
||||
if (status !== 'healthy') {
|
||||
this.logger.warn(`PostgreSQL health status: ${status}`, { errors, latency });
|
||||
} else {
|
||||
this.logger.debug(`PostgreSQL health check passed (${latency}ms)`);
|
||||
}
|
||||
}
|
||||
}
|
||||
42
libs/data/postgres/src/index.ts
Normal file
42
libs/data/postgres/src/index.ts
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
/**
|
||||
* PostgreSQL Client Library for Stock Bot
|
||||
*
|
||||
* Provides type-safe PostgreSQL access for operational data,
|
||||
* transactions, and relational queries.
|
||||
*/
|
||||
|
||||
export { PostgreSQLClient } from './client';
|
||||
export { PostgreSQLHealthMonitor } from './health';
|
||||
export { PostgreSQLTransactionManager } from './transactions';
|
||||
export { PostgreSQLQueryBuilder } from './query-builder';
|
||||
// export { PostgreSQLMigrationManager } from './migrations'; // TODO: Implement migrations
|
||||
|
||||
// Types
|
||||
export type {
|
||||
PostgreSQLClientConfig,
|
||||
PostgreSQLConnectionOptions,
|
||||
PostgreSQLHealthStatus,
|
||||
PostgreSQLMetrics,
|
||||
QueryResult,
|
||||
TransactionCallback,
|
||||
SchemaNames,
|
||||
TableNames,
|
||||
Trade,
|
||||
Order,
|
||||
Position,
|
||||
Portfolio,
|
||||
Strategy,
|
||||
RiskLimit,
|
||||
AuditLog,
|
||||
PoolMetrics,
|
||||
ConnectionEvents,
|
||||
DynamicPoolConfig,
|
||||
} from './types';
|
||||
|
||||
// Factory functions
|
||||
export {
|
||||
createPostgreSQLClient,
|
||||
createAndConnectPostgreSQLClient,
|
||||
} from './factory';
|
||||
|
||||
// Singleton pattern removed - use factory functions instead
|
||||
270
libs/data/postgres/src/query-builder.ts
Normal file
270
libs/data/postgres/src/query-builder.ts
Normal file
|
|
@ -0,0 +1,270 @@
|
|||
import type { QueryResultRow } from 'pg';
|
||||
import type { PostgreSQLClient } from './client';
|
||||
import type { JoinCondition, OrderByCondition, QueryResult, WhereCondition } from './types';
|
||||
|
||||
/**
|
||||
* PostgreSQL Query Builder
|
||||
*
|
||||
* Provides a fluent interface for building SQL queries
|
||||
*/
|
||||
export class PostgreSQLQueryBuilder {
|
||||
private queryType: 'SELECT' | 'INSERT' | 'UPDATE' | 'DELETE' | null = null;
|
||||
private selectColumns: string[] = [];
|
||||
private fromTable: string = '';
|
||||
private joins: JoinCondition[] = [];
|
||||
private whereConditions: WhereCondition[] = [];
|
||||
private groupByColumns: string[] = [];
|
||||
private havingConditions: WhereCondition[] = [];
|
||||
private orderByConditions: OrderByCondition[] = [];
|
||||
private limitCount: number | null = null;
|
||||
private offsetCount: number | null = null;
|
||||
private insertValues: Record<string, any> = {};
|
||||
private updateValues: Record<string, any> = {};
|
||||
|
||||
private readonly client: PostgreSQLClient;
|
||||
|
||||
constructor(client: PostgreSQLClient) {
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
/**
|
||||
* SELECT statement
|
||||
*/
|
||||
select(columns: string | string[] = '*'): this {
|
||||
this.queryType = 'SELECT';
|
||||
this.selectColumns = Array.isArray(columns) ? columns : [columns];
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* FROM clause
|
||||
*/
|
||||
from(table: string): this {
|
||||
this.fromTable = table;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* JOIN clause
|
||||
*/
|
||||
join(table: string, on: string, type: 'INNER' | 'LEFT' | 'RIGHT' | 'FULL' = 'INNER'): this {
|
||||
this.joins.push({ type, table, on });
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* WHERE clause
|
||||
*/
|
||||
where(column: string, operator: string, value?: any): this {
|
||||
this.whereConditions.push({ column, operator: operator as any, value });
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* GROUP BY clause
|
||||
*/
|
||||
groupBy(columns: string | string[]): this {
|
||||
this.groupByColumns = Array.isArray(columns) ? columns : [columns];
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* ORDER BY clause
|
||||
*/
|
||||
orderBy(column: string, direction: 'ASC' | 'DESC' = 'ASC'): this {
|
||||
this.orderByConditions.push({ column, direction });
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* LIMIT clause
|
||||
*/
|
||||
limit(count: number): this {
|
||||
this.limitCount = count;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* OFFSET clause
|
||||
*/
|
||||
offset(count: number): this {
|
||||
this.offsetCount = count;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* INSERT statement
|
||||
*/
|
||||
insert(table: string): this {
|
||||
this.queryType = 'INSERT';
|
||||
this.fromTable = table;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* VALUES for INSERT
|
||||
*/
|
||||
values(data: Record<string, any>): this {
|
||||
this.insertValues = data;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* UPDATE statement
|
||||
*/
|
||||
update(table: string): this {
|
||||
this.queryType = 'UPDATE';
|
||||
this.fromTable = table;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* SET for UPDATE
|
||||
*/
|
||||
set(data: Record<string, any>): this {
|
||||
this.updateValues = data;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* DELETE statement
|
||||
*/
|
||||
delete(table: string): this {
|
||||
this.queryType = 'DELETE';
|
||||
this.fromTable = table;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build and execute the query
|
||||
*/
|
||||
async execute<T extends QueryResultRow = any>(): Promise<QueryResult<T>> {
|
||||
const { sql, params } = this.build();
|
||||
return await this.client.query<T>(sql, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the SQL query
|
||||
*/
|
||||
build(): { sql: string; params: any[] } {
|
||||
const params: any[] = [];
|
||||
let sql = '';
|
||||
|
||||
switch (this.queryType) {
|
||||
case 'SELECT':
|
||||
sql = this.buildSelectQuery(params);
|
||||
break;
|
||||
case 'INSERT':
|
||||
sql = this.buildInsertQuery(params);
|
||||
break;
|
||||
case 'UPDATE':
|
||||
sql = this.buildUpdateQuery(params);
|
||||
break;
|
||||
case 'DELETE':
|
||||
sql = this.buildDeleteQuery(params);
|
||||
break;
|
||||
default:
|
||||
throw new Error('Query type not specified');
|
||||
}
|
||||
|
||||
return { sql, params };
|
||||
}
|
||||
|
||||
private buildSelectQuery(params: any[]): string {
|
||||
let sql = `SELECT ${this.selectColumns.join(', ')}`;
|
||||
|
||||
if (this.fromTable) {
|
||||
sql += ` FROM ${this.fromTable}`;
|
||||
}
|
||||
|
||||
// Add JOINs
|
||||
for (const join of this.joins) {
|
||||
sql += ` ${join.type} JOIN ${join.table} ON ${join.on}`;
|
||||
}
|
||||
|
||||
// Add WHERE
|
||||
if (this.whereConditions.length > 0) {
|
||||
sql += ' WHERE ' + this.buildWhereClause(this.whereConditions, params);
|
||||
}
|
||||
|
||||
// Add GROUP BY
|
||||
if (this.groupByColumns.length > 0) {
|
||||
sql += ` GROUP BY ${this.groupByColumns.join(', ')}`;
|
||||
}
|
||||
|
||||
// Add HAVING
|
||||
if (this.havingConditions.length > 0) {
|
||||
sql += ' HAVING ' + this.buildWhereClause(this.havingConditions, params);
|
||||
}
|
||||
|
||||
// Add ORDER BY
|
||||
if (this.orderByConditions.length > 0) {
|
||||
const orderBy = this.orderByConditions
|
||||
.map(order => `${order.column} ${order.direction}`)
|
||||
.join(', ');
|
||||
sql += ` ORDER BY ${orderBy}`;
|
||||
}
|
||||
|
||||
// Add LIMIT
|
||||
if (this.limitCount !== null) {
|
||||
sql += ` LIMIT $${params.length + 1}`;
|
||||
params.push(this.limitCount);
|
||||
}
|
||||
|
||||
// Add OFFSET
|
||||
if (this.offsetCount !== null) {
|
||||
sql += ` OFFSET $${params.length + 1}`;
|
||||
params.push(this.offsetCount);
|
||||
}
|
||||
|
||||
return sql;
|
||||
}
|
||||
|
||||
private buildInsertQuery(params: any[]): string {
|
||||
const columns = Object.keys(this.insertValues);
|
||||
const placeholders = columns.map((_, i) => `$${params.length + i + 1}`);
|
||||
|
||||
params.push(...Object.values(this.insertValues));
|
||||
|
||||
return `INSERT INTO ${this.fromTable} (${columns.join(', ')}) VALUES (${placeholders.join(', ')})`;
|
||||
}
|
||||
|
||||
private buildUpdateQuery(params: any[]): string {
|
||||
const sets = Object.keys(this.updateValues).map((key, i) => {
|
||||
return `${key} = $${params.length + i + 1}`;
|
||||
});
|
||||
|
||||
params.push(...Object.values(this.updateValues));
|
||||
|
||||
let sql = `UPDATE ${this.fromTable} SET ${sets.join(', ')}`;
|
||||
|
||||
if (this.whereConditions.length > 0) {
|
||||
sql += ' WHERE ' + this.buildWhereClause(this.whereConditions, params);
|
||||
}
|
||||
|
||||
return sql;
|
||||
}
|
||||
|
||||
private buildDeleteQuery(params: any[]): string {
|
||||
let sql = `DELETE FROM ${this.fromTable}`;
|
||||
|
||||
if (this.whereConditions.length > 0) {
|
||||
sql += ' WHERE ' + this.buildWhereClause(this.whereConditions, params);
|
||||
}
|
||||
|
||||
return sql;
|
||||
}
|
||||
|
||||
private buildWhereClause(conditions: WhereCondition[], params: any[]): string {
|
||||
return conditions
|
||||
.map(condition => {
|
||||
if (condition.operator === 'IS NULL' || condition.operator === 'IS NOT NULL') {
|
||||
return `${condition.column} ${condition.operator}`;
|
||||
} else {
|
||||
params.push(condition.value);
|
||||
return `${condition.column} ${condition.operator} $${params.length}`;
|
||||
}
|
||||
})
|
||||
.join(' AND ');
|
||||
}
|
||||
}
|
||||
55
libs/data/postgres/src/transactions.ts
Normal file
55
libs/data/postgres/src/transactions.ts
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
import { getLogger } from '@stock-bot/logger';
|
||||
import type { PostgreSQLClient } from './client';
|
||||
import type { TransactionCallback } from './types';
|
||||
|
||||
/**
|
||||
* PostgreSQL Transaction Manager
|
||||
*
|
||||
* Provides transaction support for multi-statement operations
|
||||
*/
|
||||
export class PostgreSQLTransactionManager {
|
||||
private readonly client: PostgreSQLClient;
|
||||
private readonly logger: ReturnType<typeof getLogger>;
|
||||
|
||||
constructor(client: PostgreSQLClient) {
|
||||
this.client = client;
|
||||
this.logger = getLogger('postgres-transaction-manager');
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute operations within a transaction
|
||||
*/
|
||||
async execute<T>(callback: TransactionCallback<T>): Promise<T> {
|
||||
const pool = this.client.connectionPool;
|
||||
if (!pool) {
|
||||
throw new Error('PostgreSQL client not connected');
|
||||
}
|
||||
|
||||
const client = await pool.connect();
|
||||
|
||||
try {
|
||||
this.logger.debug('Starting PostgreSQL transaction');
|
||||
|
||||
await client.query('BEGIN');
|
||||
|
||||
const result = await callback(client);
|
||||
|
||||
await client.query('COMMIT');
|
||||
|
||||
this.logger.debug('PostgreSQL transaction committed successfully');
|
||||
return result;
|
||||
} catch (error) {
|
||||
this.logger.error('PostgreSQL transaction failed, rolling back:', error);
|
||||
|
||||
try {
|
||||
await client.query('ROLLBACK');
|
||||
} catch (rollbackError) {
|
||||
this.logger.error('Failed to rollback transaction:', rollbackError);
|
||||
}
|
||||
|
||||
throw error;
|
||||
} finally {
|
||||
client.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
248
libs/data/postgres/src/types.ts
Normal file
248
libs/data/postgres/src/types.ts
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
import type { QueryResult as PgQueryResult, PoolClient, QueryResultRow } from 'pg';
|
||||
|
||||
/**
|
||||
* PostgreSQL Client Configuration
|
||||
*/
|
||||
export interface PostgreSQLClientConfig {
|
||||
host: string;
|
||||
port: number;
|
||||
database: string;
|
||||
username: string;
|
||||
password: string;
|
||||
poolSettings?: {
|
||||
min: number;
|
||||
max: number;
|
||||
idleTimeoutMillis: number;
|
||||
};
|
||||
ssl?: {
|
||||
enabled: boolean;
|
||||
rejectUnauthorized: boolean;
|
||||
};
|
||||
timeouts?: {
|
||||
query: number;
|
||||
connection: number;
|
||||
statement: number;
|
||||
lock: number;
|
||||
idleInTransaction: number;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* PostgreSQL Connection Options
|
||||
*/
|
||||
export interface PostgreSQLConnectionOptions {
|
||||
retryAttempts?: number;
|
||||
retryDelay?: number;
|
||||
healthCheckInterval?: number;
|
||||
}
|
||||
|
||||
export interface PoolMetrics {
|
||||
totalConnections: number;
|
||||
activeConnections: number;
|
||||
idleConnections: number;
|
||||
waitingRequests: number;
|
||||
errors: number;
|
||||
lastError?: string;
|
||||
avgResponseTime?: number;
|
||||
created: Date;
|
||||
lastUsed?: Date;
|
||||
}
|
||||
|
||||
export interface ConnectionEvents {
|
||||
onConnect?: () => void | Promise<void>;
|
||||
onDisconnect?: () => void | Promise<void>;
|
||||
onError?: (error: Error) => void | Promise<void>;
|
||||
onPoolCreated?: () => void | Promise<void>;
|
||||
}
|
||||
|
||||
export interface DynamicPoolConfig {
|
||||
enabled: boolean;
|
||||
minSize: number;
|
||||
maxSize: number;
|
||||
scaleUpThreshold: number; // % of pool in use (0-100)
|
||||
scaleDownThreshold: number; // % of pool idle (0-100)
|
||||
scaleUpIncrement: number; // connections to add
|
||||
scaleDownIncrement: number; // connections to remove
|
||||
evaluationInterval: number; // ms between checks
|
||||
}
|
||||
|
||||
/**
|
||||
* Health Status Types
|
||||
*/
|
||||
export type PostgreSQLHealthStatus = 'healthy' | 'degraded' | 'unhealthy';
|
||||
|
||||
export interface PostgreSQLHealthCheck {
|
||||
status: PostgreSQLHealthStatus;
|
||||
timestamp: Date;
|
||||
latency: number;
|
||||
connections: {
|
||||
active: number;
|
||||
idle: number;
|
||||
total: number;
|
||||
};
|
||||
errors?: string[];
|
||||
}
|
||||
|
||||
export interface PostgreSQLMetrics {
|
||||
queriesPerSecond: number;
|
||||
averageQueryTime: number;
|
||||
errorRate: number;
|
||||
connectionPoolUtilization: number;
|
||||
slowQueries: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Query Result Types
|
||||
*/
|
||||
export interface QueryResult<T extends QueryResultRow = any> extends PgQueryResult<T> {
|
||||
executionTime?: number;
|
||||
}
|
||||
|
||||
export type TransactionCallback<T> = (client: PoolClient) => Promise<T>;
|
||||
|
||||
/**
|
||||
* Schema and Table Names
|
||||
*/
|
||||
export type SchemaNames = 'trading' | 'strategy' | 'risk' | 'audit';
|
||||
|
||||
export type TableNames =
|
||||
| 'trades'
|
||||
| 'orders'
|
||||
| 'positions'
|
||||
| 'portfolios'
|
||||
| 'strategies'
|
||||
| 'risk_limits'
|
||||
| 'audit_logs'
|
||||
| 'users'
|
||||
| 'accounts'
|
||||
| 'symbols'
|
||||
| 'exchanges';
|
||||
|
||||
/**
|
||||
* Trading Domain Types
|
||||
*/
|
||||
export interface Trade {
|
||||
id: string;
|
||||
order_id: string;
|
||||
symbol: string;
|
||||
side: 'buy' | 'sell';
|
||||
quantity: number;
|
||||
price: number;
|
||||
executed_at: Date;
|
||||
commission: number;
|
||||
fees: number;
|
||||
portfolio_id: string;
|
||||
strategy_id?: string;
|
||||
created_at: Date;
|
||||
updated_at: Date;
|
||||
}
|
||||
|
||||
export interface Order {
|
||||
id: string;
|
||||
symbol: string;
|
||||
side: 'buy' | 'sell';
|
||||
type: 'market' | 'limit' | 'stop' | 'stop_limit';
|
||||
quantity: number;
|
||||
price?: number;
|
||||
stop_price?: number;
|
||||
status: 'pending' | 'filled' | 'cancelled' | 'rejected';
|
||||
portfolio_id: string;
|
||||
strategy_id?: string;
|
||||
created_at: Date;
|
||||
updated_at: Date;
|
||||
expires_at?: Date;
|
||||
}
|
||||
|
||||
export interface Position {
|
||||
id: string;
|
||||
symbol: string;
|
||||
quantity: number;
|
||||
average_cost: number;
|
||||
market_value: number;
|
||||
unrealized_pnl: number;
|
||||
realized_pnl: number;
|
||||
portfolio_id: string;
|
||||
created_at: Date;
|
||||
updated_at: Date;
|
||||
}
|
||||
|
||||
export interface Portfolio {
|
||||
id: string;
|
||||
name: string;
|
||||
cash_balance: number;
|
||||
total_value: number;
|
||||
unrealized_pnl: number;
|
||||
realized_pnl: number;
|
||||
user_id: string;
|
||||
created_at: Date;
|
||||
updated_at: Date;
|
||||
}
|
||||
|
||||
export interface Strategy {
|
||||
id: string;
|
||||
name: string;
|
||||
description: string;
|
||||
parameters: Record<string, any>;
|
||||
status: 'active' | 'inactive' | 'paused';
|
||||
performance_metrics: Record<string, number>;
|
||||
portfolio_id: string;
|
||||
created_at: Date;
|
||||
updated_at: Date;
|
||||
}
|
||||
|
||||
export interface RiskLimit {
|
||||
id: string;
|
||||
type: 'position_size' | 'daily_loss' | 'max_drawdown' | 'concentration';
|
||||
value: number;
|
||||
threshold: number;
|
||||
status: 'active' | 'breached' | 'disabled';
|
||||
portfolio_id?: string;
|
||||
strategy_id?: string;
|
||||
created_at: Date;
|
||||
updated_at: Date;
|
||||
}
|
||||
|
||||
export interface AuditLog {
|
||||
id: string;
|
||||
action: string;
|
||||
entity_type: string;
|
||||
entity_id: string;
|
||||
old_values?: Record<string, any>;
|
||||
new_values?: Record<string, any>;
|
||||
user_id?: string;
|
||||
ip_address?: string;
|
||||
user_agent?: string;
|
||||
timestamp: Date;
|
||||
}
|
||||
|
||||
/**
|
||||
* Query Builder Types
|
||||
*/
|
||||
export interface WhereCondition {
|
||||
column: string;
|
||||
operator:
|
||||
| '='
|
||||
| '!='
|
||||
| '>'
|
||||
| '<'
|
||||
| '>='
|
||||
| '<='
|
||||
| 'IN'
|
||||
| 'NOT IN'
|
||||
| 'LIKE'
|
||||
| 'ILIKE'
|
||||
| 'IS NULL'
|
||||
| 'IS NOT NULL';
|
||||
value?: any;
|
||||
}
|
||||
|
||||
export interface JoinCondition {
|
||||
type: 'INNER' | 'LEFT' | 'RIGHT' | 'FULL';
|
||||
table: string;
|
||||
on: string;
|
||||
}
|
||||
|
||||
export interface OrderByCondition {
|
||||
column: string;
|
||||
direction: 'ASC' | 'DESC';
|
||||
}
|
||||
13
libs/data/postgres/tsconfig.json
Normal file
13
libs/data/postgres/tsconfig.json
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../../core/logger" },
|
||||
{ "path": "../../core/types" }
|
||||
]
|
||||
}
|
||||
102
libs/data/questdb/README.md
Normal file
102
libs/data/questdb/README.md
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
# QuestDB Client Library
|
||||
|
||||
A comprehensive QuestDB client library for the Stock Bot trading platform, optimized for time-series data, market analytics, and high-performance queries.
|
||||
|
||||
## Features
|
||||
|
||||
- **Time-Series Optimized**: Built specifically for time-series data patterns
|
||||
- **Dual Protocol Support**: HTTP REST API and PostgreSQL wire protocol
|
||||
- **InfluxDB Line Protocol**: High-performance data ingestion
|
||||
- **SQL Analytics**: Full SQL support for complex analytics
|
||||
- **Schema Management**: Automatic table creation and partitioning
|
||||
- **Performance Monitoring**: Query performance tracking and optimization
|
||||
- **Health Monitoring**: Connection health monitoring and metrics
|
||||
|
||||
## Usage
|
||||
|
||||
```typescript
|
||||
import { QuestDBClient } from '@stock-bot/questdb';
|
||||
|
||||
// Initialize client
|
||||
const questClient = new QuestDBClient();
|
||||
await questClient.connect();
|
||||
|
||||
// Insert market data using InfluxDB Line Protocol
|
||||
await questClient.insert('ohlcv', {
|
||||
symbol: 'AAPL',
|
||||
open: 150.00,
|
||||
high: 152.00,
|
||||
low: 149.50,
|
||||
close: 151.50,
|
||||
volume: 1000000,
|
||||
timestamp: new Date()
|
||||
});
|
||||
|
||||
// Query with SQL
|
||||
const prices = await questClient.query(`
|
||||
SELECT symbol, close, timestamp
|
||||
FROM ohlcv
|
||||
WHERE symbol = 'AAPL'
|
||||
AND timestamp > dateadd('d', -1, now())
|
||||
ORDER BY timestamp DESC
|
||||
`);
|
||||
|
||||
// Time-series aggregations
|
||||
const dailyStats = await questClient.aggregate('ohlcv')
|
||||
.select(['symbol', 'avg(close) as avg_price'])
|
||||
.where('symbol = ?', ['AAPL'])
|
||||
.groupBy('symbol')
|
||||
.sampleBy('1d', 'timestamp')
|
||||
.execute();
|
||||
```
|
||||
|
||||
## Data Types
|
||||
|
||||
The client provides typed access to the following time-series data:
|
||||
|
||||
- **ohlcv**: OHLCV candlestick data
|
||||
- **trades**: Individual trade executions
|
||||
- **quotes**: Bid/ask quote data
|
||||
- **indicators**: Technical indicator values
|
||||
- **performance**: Portfolio performance metrics
|
||||
- **risk_metrics**: Risk calculation results
|
||||
|
||||
## Configuration
|
||||
|
||||
Configure using environment variables:
|
||||
|
||||
```env
|
||||
QUESTDB_HOST=localhost
|
||||
QUESTDB_HTTP_PORT=9000
|
||||
QUESTDB_PG_PORT=8812
|
||||
QUESTDB_INFLUX_PORT=9009
|
||||
```
|
||||
|
||||
## Time-Series Features
|
||||
|
||||
QuestDB excels at:
|
||||
|
||||
- **High-frequency data**: Millions of data points per second
|
||||
- **Time-based partitioning**: Automatic partitioning by time
|
||||
- **ASOF JOINs**: Time-series specific joins
|
||||
- **SAMPLE BY**: Time-based aggregations
|
||||
- **LATEST BY**: Get latest values by key
|
||||
|
||||
## Performance
|
||||
|
||||
The client includes performance optimizations:
|
||||
|
||||
- Connection pooling for HTTP and PostgreSQL protocols
|
||||
- Batch insertions for high throughput
|
||||
- Compressed data transfer
|
||||
- Query result caching
|
||||
- Automatic schema optimization
|
||||
|
||||
## Health Monitoring
|
||||
|
||||
Built-in health monitoring:
|
||||
|
||||
```typescript
|
||||
const health = await questClient.getHealth();
|
||||
console.log(health.status); // 'healthy' | 'degraded' | 'unhealthy'
|
||||
```
|
||||
14
libs/data/questdb/bunfig.toml
Normal file
14
libs/data/questdb/bunfig.toml
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
# QuestDB Client Library Bun Test Configuration
|
||||
|
||||
[test]
|
||||
# Configure path mapping for tests
|
||||
preload = ["./test/setup.ts"]
|
||||
|
||||
# Test configuration
|
||||
timeout = 5000
|
||||
|
||||
# Enable TypeScript paths resolution
|
||||
[bun]
|
||||
paths = {
|
||||
"@/*" = ["./src/*"]
|
||||
}
|
||||
45
libs/data/questdb/package.json
Normal file
45
libs/data/questdb/package.json
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
{
|
||||
"name": "@stock-bot/questdb",
|
||||
"version": "1.0.0",
|
||||
"description": "QuestDB client library for Stock Bot platform",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"test": "bun test",
|
||||
"lint": "eslint src/**/*.ts",
|
||||
"type-check": "tsc --noEmit",
|
||||
"clean": "rimraf dist"
|
||||
},
|
||||
"dependencies": {
|
||||
"@stock-bot/logger": "*",
|
||||
"@stock-bot/types": "*",
|
||||
"pg": "^8.11.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.11.0",
|
||||
"typescript": "^5.3.0",
|
||||
"eslint": "^8.56.0",
|
||||
"@typescript-eslint/eslint-plugin": "^6.19.0",
|
||||
"@typescript-eslint/parser": "^6.19.0",
|
||||
"bun-types": "^1.2.15"
|
||||
},
|
||||
"keywords": [
|
||||
"questdb",
|
||||
"database",
|
||||
"client",
|
||||
"stock-bot"
|
||||
],
|
||||
"exports": {
|
||||
".": {
|
||||
"import": "./dist/index.js",
|
||||
"require": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts"
|
||||
}
|
||||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"README.md"
|
||||
]
|
||||
}
|
||||
450
libs/data/questdb/src/client.ts
Normal file
450
libs/data/questdb/src/client.ts
Normal file
|
|
@ -0,0 +1,450 @@
|
|||
import { Pool } from 'pg';
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
import { QuestDBHealthMonitor } from './health';
|
||||
import { QuestDBInfluxWriter } from './influx-writer';
|
||||
import { QuestDBQueryBuilder } from './query-builder';
|
||||
import { QuestDBSchemaManager } from './schema';
|
||||
import type {
|
||||
QueryResult,
|
||||
QuestDBClientConfig,
|
||||
QuestDBConnectionOptions,
|
||||
TableNames,
|
||||
} from './types';
|
||||
|
||||
/**
|
||||
* QuestDB Client for Stock Bot
|
||||
*
|
||||
* Provides high-performance time-series data access with support for
|
||||
* multiple protocols (HTTP, PostgreSQL, InfluxDB Line Protocol).
|
||||
*/
|
||||
export class QuestDBClient {
|
||||
private pgPool: Pool | null = null;
|
||||
private readonly config: QuestDBClientConfig;
|
||||
private readonly options: QuestDBConnectionOptions;
|
||||
private readonly logger = getLogger('QuestDBClient');
|
||||
private readonly healthMonitor: QuestDBHealthMonitor;
|
||||
private readonly influxWriter: QuestDBInfluxWriter;
|
||||
private readonly schemaManager: QuestDBSchemaManager;
|
||||
private isConnected = false;
|
||||
|
||||
constructor(config: QuestDBClientConfig, options?: QuestDBConnectionOptions) {
|
||||
this.config = config;
|
||||
this.options = {
|
||||
protocol: 'pg',
|
||||
retryAttempts: 3,
|
||||
retryDelay: 1000,
|
||||
healthCheckInterval: 30000,
|
||||
...options,
|
||||
};
|
||||
|
||||
this.healthMonitor = new QuestDBHealthMonitor(this);
|
||||
this.influxWriter = new QuestDBInfluxWriter(this);
|
||||
this.schemaManager = new QuestDBSchemaManager(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to QuestDB
|
||||
*/
|
||||
async connect(): Promise<void> {
|
||||
if (this.isConnected) {
|
||||
return;
|
||||
}
|
||||
|
||||
let lastError: Error | null = null;
|
||||
|
||||
for (let attempt = 1; attempt <= (this.options.retryAttempts ?? 3); attempt++) {
|
||||
try {
|
||||
this.logger.info(
|
||||
`Connecting to QuestDB (attempt ${attempt}/${this.options.retryAttempts})...`
|
||||
);
|
||||
|
||||
// Connect via PostgreSQL wire protocol
|
||||
this.pgPool = new Pool(this.buildPgPoolConfig());
|
||||
|
||||
// Test the connection
|
||||
const client = await this.pgPool.connect();
|
||||
await client.query('SELECT 1');
|
||||
client.release();
|
||||
|
||||
this.isConnected = true;
|
||||
this.logger.info('Successfully connected to QuestDB');
|
||||
// Initialize schema
|
||||
await this.schemaManager.initializeDatabase();
|
||||
|
||||
// Start health monitoring
|
||||
this.healthMonitor.startMonitoring();
|
||||
|
||||
return;
|
||||
} catch (error) {
|
||||
lastError = error as Error;
|
||||
this.logger.warn(`QuestDB connection attempt ${attempt} failed:`, error as Error);
|
||||
|
||||
if (this.pgPool) {
|
||||
await this.pgPool.end();
|
||||
this.pgPool = null;
|
||||
}
|
||||
|
||||
if (attempt < (this.options.retryAttempts ?? 3)) {
|
||||
await this.delay((this.options.retryDelay ?? 1000) * attempt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`Failed to connect to QuestDB after ${this.options.retryAttempts} attempts: ${lastError?.message}`
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disconnect from QuestDB
|
||||
*/
|
||||
async disconnect(): Promise<void> {
|
||||
if (!this.isConnected) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
this.healthMonitor.stopMonitoring();
|
||||
|
||||
if (this.pgPool) {
|
||||
await this.pgPool.end();
|
||||
this.pgPool = null;
|
||||
}
|
||||
|
||||
this.isConnected = false;
|
||||
this.logger.info('Disconnected from QuestDB');
|
||||
} catch (error) {
|
||||
this.logger.error('Error disconnecting from QuestDB:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a SQL query
|
||||
*/
|
||||
async query<T = any>(sql: string, params?: any[]): Promise<QueryResult<T>> {
|
||||
if (!this.pgPool) {
|
||||
throw new Error('QuestDB client not connected');
|
||||
}
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
const result = await this.pgPool.query(sql, params);
|
||||
const executionTime = Date.now() - startTime;
|
||||
|
||||
this.logger.debug(`Query executed in ${executionTime}ms`, {
|
||||
query: sql.substring(0, 100),
|
||||
rowCount: result.rowCount,
|
||||
});
|
||||
|
||||
return {
|
||||
rows: result.rows,
|
||||
rowCount: result.rowCount || 0,
|
||||
executionTime,
|
||||
metadata: {
|
||||
columns:
|
||||
result.fields?.map((field: any) => ({
|
||||
name: field.name,
|
||||
type: this.mapDataType(field.dataTypeID),
|
||||
})) || [],
|
||||
},
|
||||
};
|
||||
} catch (error) {
|
||||
const executionTime = Date.now() - startTime;
|
||||
this.logger.error(`Query failed after ${executionTime}ms:`, {
|
||||
error: (error as Error).message,
|
||||
query: sql,
|
||||
params,
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Write OHLCV data using InfluxDB Line Protocol
|
||||
*/
|
||||
async writeOHLCV(
|
||||
symbol: string,
|
||||
exchange: string,
|
||||
data: Array<{
|
||||
timestamp: Date;
|
||||
open: number;
|
||||
high: number;
|
||||
low: number;
|
||||
close: number;
|
||||
volume: number;
|
||||
}>
|
||||
): Promise<void> {
|
||||
return await this.influxWriter.writeOHLCV(symbol, exchange, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write market analytics data
|
||||
*/
|
||||
async writeMarketAnalytics(
|
||||
symbol: string,
|
||||
exchange: string,
|
||||
analytics: {
|
||||
timestamp: Date;
|
||||
rsi?: number;
|
||||
macd?: number;
|
||||
signal?: number;
|
||||
histogram?: number;
|
||||
bollinger_upper?: number;
|
||||
bollinger_lower?: number;
|
||||
volume_sma?: number;
|
||||
}
|
||||
): Promise<void> {
|
||||
return await this.influxWriter.writeMarketAnalytics(symbol, exchange, analytics);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a query builder instance
|
||||
*/
|
||||
queryBuilder(): QuestDBQueryBuilder {
|
||||
return new QuestDBQueryBuilder(this);
|
||||
}
|
||||
/**
|
||||
* Create a SELECT query builder
|
||||
*/
|
||||
select(...columns: string[]): QuestDBQueryBuilder {
|
||||
return this.queryBuilder().select(...columns);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an aggregation query builder
|
||||
*/
|
||||
aggregate(table: TableNames): QuestDBQueryBuilder {
|
||||
return this.queryBuilder().from(table);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a time-series specific query with SAMPLE BY
|
||||
*/
|
||||
async sampleBy<T = any>(
|
||||
table: TableNames,
|
||||
columns: string[],
|
||||
interval: string,
|
||||
_timeColumn: string = 'timestamp',
|
||||
where?: string,
|
||||
params?: any[]
|
||||
): Promise<QueryResult<T>> {
|
||||
const columnsStr = columns.join(', ');
|
||||
const whereClause = where ? `WHERE ${where}` : '';
|
||||
|
||||
const sql = `
|
||||
SELECT ${columnsStr}
|
||||
FROM ${table}
|
||||
${whereClause}
|
||||
SAMPLE BY ${interval}
|
||||
ALIGN TO CALENDAR
|
||||
`;
|
||||
|
||||
return await this.query<T>(sql, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get latest values by symbol using LATEST BY
|
||||
*/
|
||||
async latestBy<T = any>(
|
||||
table: TableNames,
|
||||
columns: string | string[] = '*',
|
||||
keyColumns: string | string[] = 'symbol'
|
||||
): Promise<QueryResult<T>> {
|
||||
const columnsStr = Array.isArray(columns) ? columns.join(', ') : columns;
|
||||
const keyColumnsStr = Array.isArray(keyColumns) ? keyColumns.join(', ') : keyColumns;
|
||||
|
||||
const sql = `
|
||||
SELECT ${columnsStr}
|
||||
FROM ${table}
|
||||
LATEST BY ${keyColumnsStr}
|
||||
`;
|
||||
|
||||
return await this.query<T>(sql);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute ASOF JOIN for time-series correlation
|
||||
*/
|
||||
async asofJoin<T = any>(
|
||||
leftTable: TableNames,
|
||||
rightTable: TableNames,
|
||||
joinCondition: string,
|
||||
columns?: string[],
|
||||
where?: string,
|
||||
params?: any[]
|
||||
): Promise<QueryResult<T>> {
|
||||
const columnsStr = columns ? columns.join(', ') : '*';
|
||||
const whereClause = where ? `WHERE ${where}` : '';
|
||||
|
||||
const sql = `
|
||||
SELECT ${columnsStr}
|
||||
FROM ${leftTable}
|
||||
ASOF JOIN ${rightTable} ON ${joinCondition}
|
||||
${whereClause}
|
||||
`;
|
||||
|
||||
return await this.query<T>(sql, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get database statistics
|
||||
*/
|
||||
async getStats(): Promise<any> {
|
||||
const result = await this.query(`
|
||||
SELECT
|
||||
table_name,
|
||||
row_count,
|
||||
partition_count,
|
||||
size_bytes
|
||||
FROM tables()
|
||||
WHERE table_name NOT LIKE 'sys.%'
|
||||
ORDER BY row_count DESC
|
||||
`);
|
||||
return result.rows;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get table information
|
||||
*/
|
||||
async getTableInfo(tableName: string): Promise<any> {
|
||||
const result = await this.query(`SELECT * FROM table_columns WHERE table_name = ?`, [
|
||||
tableName,
|
||||
]);
|
||||
return result.rows;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if PostgreSQL pool is healthy
|
||||
*/
|
||||
isPgPoolHealthy(): boolean {
|
||||
return this.pgPool !== null && !this.pgPool.ended;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get HTTP endpoint URL
|
||||
*/
|
||||
getHttpUrl(): string {
|
||||
const protocol = this.config.tls?.enabled ? 'https' : 'http';
|
||||
return `${protocol}://${this.config.host}:${this.config.httpPort}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get InfluxDB endpoint URL
|
||||
*/
|
||||
getInfluxUrl(): string {
|
||||
const protocol = this.config.tls?.enabled ? 'https' : 'http';
|
||||
return `${protocol}://${this.config.host}:${this.config.influxPort}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get health monitor instance
|
||||
*/
|
||||
getHealthMonitor(): QuestDBHealthMonitor {
|
||||
return this.healthMonitor;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get schema manager instance
|
||||
*/
|
||||
getSchemaManager(): QuestDBSchemaManager {
|
||||
return this.schemaManager;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get InfluxDB writer instance
|
||||
*/
|
||||
getInfluxWriter(): QuestDBInfluxWriter {
|
||||
return this.influxWriter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize table by rebuilding partitions
|
||||
*/
|
||||
async optimizeTable(tableName: string): Promise<void> {
|
||||
await this.query(`VACUUM TABLE ${tableName}`);
|
||||
this.logger.debug(`Optimized table: ${tableName}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a table with time-series optimizations
|
||||
*/
|
||||
async createTable(
|
||||
tableName: string,
|
||||
columns: string,
|
||||
partitionBy: string = 'DAY',
|
||||
timestampColumn: string = 'timestamp'
|
||||
): Promise<void> {
|
||||
const sql = `
|
||||
CREATE TABLE IF NOT EXISTS ${tableName} (
|
||||
${columns}
|
||||
) TIMESTAMP(${timestampColumn}) PARTITION BY ${partitionBy}
|
||||
`;
|
||||
|
||||
await this.query(sql);
|
||||
this.logger.info(`Created table: ${tableName}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if client is connected
|
||||
*/
|
||||
get connected(): boolean {
|
||||
return this.isConnected && !!this.pgPool;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the PostgreSQL connection pool
|
||||
*/
|
||||
get connectionPool(): Pool | null {
|
||||
return this.pgPool;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get configuration
|
||||
*/
|
||||
get configuration(): QuestDBClientConfig {
|
||||
return { ...this.config };
|
||||
}
|
||||
|
||||
|
||||
private buildPgPoolConfig(): any {
|
||||
return {
|
||||
host: this.config.host,
|
||||
port: this.config.pgPort,
|
||||
database: this.config.database,
|
||||
user: this.config.user,
|
||||
password: this.config.password,
|
||||
connectionTimeoutMillis: this.config.timeouts?.connection,
|
||||
query_timeout: this.config.timeouts?.request,
|
||||
ssl: this.config.tls?.enabled
|
||||
? {
|
||||
rejectUnauthorized: this.config.tls.verifyServerCert,
|
||||
}
|
||||
: false,
|
||||
min: 2,
|
||||
max: 10,
|
||||
};
|
||||
}
|
||||
|
||||
private mapDataType(typeId: number): string {
|
||||
// Map PostgreSQL type IDs to QuestDB types
|
||||
const typeMap: Record<number, string> = {
|
||||
16: 'BOOLEAN',
|
||||
20: 'LONG',
|
||||
21: 'INT',
|
||||
23: 'INT',
|
||||
25: 'STRING',
|
||||
700: 'FLOAT',
|
||||
701: 'DOUBLE',
|
||||
1043: 'STRING',
|
||||
1082: 'DATE',
|
||||
1114: 'TIMESTAMP',
|
||||
1184: 'TIMESTAMP',
|
||||
};
|
||||
|
||||
return typeMap[typeId] || 'STRING';
|
||||
}
|
||||
|
||||
private delay(ms: number): Promise<void> {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
}
|
||||
}
|
||||
24
libs/data/questdb/src/factory.ts
Normal file
24
libs/data/questdb/src/factory.ts
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
import { QuestDBClient } from './client';
|
||||
import type { QuestDBClientConfig, QuestDBConnectionOptions } from './types';
|
||||
|
||||
/**
|
||||
* Factory function to create a QuestDB client instance
|
||||
*/
|
||||
export function createQuestDBClient(
|
||||
config: QuestDBClientConfig,
|
||||
options?: QuestDBConnectionOptions
|
||||
): QuestDBClient {
|
||||
return new QuestDBClient(config, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create and connect a QuestDB client
|
||||
*/
|
||||
export async function createAndConnectQuestDBClient(
|
||||
config: QuestDBClientConfig,
|
||||
options?: QuestDBConnectionOptions
|
||||
): Promise<QuestDBClient> {
|
||||
const client = createQuestDBClient(config, options);
|
||||
await client.connect();
|
||||
return client;
|
||||
}
|
||||
234
libs/data/questdb/src/health.ts
Normal file
234
libs/data/questdb/src/health.ts
Normal file
|
|
@ -0,0 +1,234 @@
|
|||
import { getLogger } from '@stock-bot/logger';
|
||||
import type { HealthStatus, PerformanceMetrics, QueryResult } from './types';
|
||||
|
||||
// Interface to avoid circular dependency
|
||||
interface QuestDBClientInterface {
|
||||
query<T = any>(sql: string, params?: any[]): Promise<QueryResult<T>>;
|
||||
isPgPoolHealthy(): boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* QuestDB Health Monitor
|
||||
*
|
||||
* Monitors connection health, performance metrics, and provides
|
||||
* automatic recovery capabilities for the QuestDB client.
|
||||
*/
|
||||
export class QuestDBHealthMonitor {
|
||||
private readonly logger: ReturnType<typeof getLogger>;
|
||||
private healthCheckInterval: NodeJS.Timeout | null = null;
|
||||
private lastHealthCheck: Date | null = null;
|
||||
private performanceMetrics: PerformanceMetrics = {
|
||||
totalQueries: 0,
|
||||
successfulQueries: 0,
|
||||
failedQueries: 0,
|
||||
averageResponseTime: 0,
|
||||
lastQueryTime: null,
|
||||
connectionUptime: 0,
|
||||
memoryUsage: 0,
|
||||
};
|
||||
constructor(private readonly client: QuestDBClientInterface) {
|
||||
this.logger = getLogger('questdb-health-monitor');
|
||||
}
|
||||
|
||||
/**
|
||||
* Start health monitoring
|
||||
*/
|
||||
public startMonitoring(intervalMs: number = 30000): void {
|
||||
if (this.healthCheckInterval) {
|
||||
this.stopMonitoring();
|
||||
}
|
||||
|
||||
this.logger.info(`Starting health monitoring with ${intervalMs}ms interval`);
|
||||
|
||||
this.healthCheckInterval = setInterval(async () => {
|
||||
try {
|
||||
await this.performHealthCheck();
|
||||
} catch (error) {
|
||||
this.logger.error('Health check failed', error);
|
||||
}
|
||||
}, intervalMs);
|
||||
|
||||
// Perform initial health check
|
||||
this.performHealthCheck().catch(error => {
|
||||
this.logger.error('Initial health check failed', error);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop health monitoring
|
||||
*/
|
||||
public stopMonitoring(): void {
|
||||
if (this.healthCheckInterval) {
|
||||
clearInterval(this.healthCheckInterval);
|
||||
this.healthCheckInterval = null;
|
||||
this.logger.info('Health monitoring stopped');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a health check
|
||||
*/
|
||||
public async performHealthCheck(): Promise<HealthStatus> {
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
// Test basic connectivity with a simple query
|
||||
await this.client.query('SELECT 1 as health_check');
|
||||
|
||||
const responseTime = Date.now() - startTime;
|
||||
this.lastHealthCheck = new Date();
|
||||
|
||||
const status: HealthStatus = {
|
||||
isHealthy: true,
|
||||
lastCheck: this.lastHealthCheck,
|
||||
responseTime,
|
||||
message: 'Connection healthy',
|
||||
details: {
|
||||
pgPool: this.client.isPgPoolHealthy(),
|
||||
httpEndpoint: true, // Will be implemented when HTTP client is added
|
||||
uptime: this.getUptime(),
|
||||
},
|
||||
};
|
||||
|
||||
this.logger.debug('Health check passed', { responseTime });
|
||||
return status;
|
||||
} catch (error) {
|
||||
const responseTime = Date.now() - startTime;
|
||||
this.lastHealthCheck = new Date();
|
||||
|
||||
const status: HealthStatus = {
|
||||
isHealthy: false,
|
||||
lastCheck: this.lastHealthCheck,
|
||||
responseTime,
|
||||
message: `Health check failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
error: error instanceof Error ? error : new Error('Unknown error'),
|
||||
details: {
|
||||
pgPool: false,
|
||||
httpEndpoint: false,
|
||||
uptime: this.getUptime(),
|
||||
},
|
||||
};
|
||||
|
||||
this.logger.error('Health check failed', { error, responseTime });
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current health status
|
||||
*/
|
||||
public async getHealthStatus(): Promise<HealthStatus> {
|
||||
if (!this.lastHealthCheck || Date.now() - this.lastHealthCheck.getTime() > 60000) {
|
||||
return await this.performHealthCheck();
|
||||
}
|
||||
|
||||
// Return cached status if recent
|
||||
return {
|
||||
isHealthy: true,
|
||||
lastCheck: this.lastHealthCheck,
|
||||
responseTime: 0,
|
||||
message: 'Using cached health status',
|
||||
details: {
|
||||
pgPool: this.client.isPgPoolHealthy(),
|
||||
httpEndpoint: true,
|
||||
uptime: this.getUptime(),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Record query performance metrics
|
||||
*/
|
||||
public recordQuery(success: boolean, responseTime: number): void {
|
||||
this.performanceMetrics.totalQueries++;
|
||||
this.performanceMetrics.lastQueryTime = new Date();
|
||||
|
||||
if (success) {
|
||||
this.performanceMetrics.successfulQueries++;
|
||||
} else {
|
||||
this.performanceMetrics.failedQueries++;
|
||||
}
|
||||
|
||||
// Update rolling average response time
|
||||
const totalResponseTime =
|
||||
this.performanceMetrics.averageResponseTime * (this.performanceMetrics.totalQueries - 1) +
|
||||
responseTime;
|
||||
this.performanceMetrics.averageResponseTime =
|
||||
totalResponseTime / this.performanceMetrics.totalQueries;
|
||||
|
||||
// Update memory usage
|
||||
this.performanceMetrics.memoryUsage = process.memoryUsage().heapUsed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get performance metrics
|
||||
*/
|
||||
public getPerformanceMetrics(): PerformanceMetrics {
|
||||
return { ...this.performanceMetrics };
|
||||
}
|
||||
|
||||
/**
|
||||
* Get connection uptime in seconds
|
||||
*/
|
||||
private getUptime(): number {
|
||||
return Math.floor(process.uptime());
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset performance metrics
|
||||
*/
|
||||
public resetMetrics(): void {
|
||||
this.performanceMetrics = {
|
||||
totalQueries: 0,
|
||||
successfulQueries: 0,
|
||||
failedQueries: 0,
|
||||
averageResponseTime: 0,
|
||||
lastQueryTime: null,
|
||||
connectionUptime: this.getUptime(),
|
||||
memoryUsage: process.memoryUsage().heapUsed,
|
||||
};
|
||||
|
||||
this.logger.debug('Performance metrics reset');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get health summary for monitoring dashboards
|
||||
*/
|
||||
public async getHealthSummary(): Promise<{
|
||||
status: HealthStatus;
|
||||
metrics: PerformanceMetrics;
|
||||
recommendations: string[];
|
||||
}> {
|
||||
const status = await this.getHealthStatus();
|
||||
const metrics = this.getPerformanceMetrics();
|
||||
const recommendations: string[] = [];
|
||||
|
||||
// Generate recommendations based on metrics
|
||||
if (metrics.failedQueries > metrics.successfulQueries * 0.1) {
|
||||
recommendations.push('High error rate detected - check query patterns');
|
||||
}
|
||||
|
||||
if (metrics.averageResponseTime > 1000) {
|
||||
recommendations.push('High response times - consider query optimization');
|
||||
}
|
||||
|
||||
if (metrics.memoryUsage > 100 * 1024 * 1024) {
|
||||
// 100MB
|
||||
recommendations.push('High memory usage - monitor for memory leaks');
|
||||
}
|
||||
|
||||
return {
|
||||
status,
|
||||
metrics,
|
||||
recommendations,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup resources
|
||||
*/
|
||||
public destroy(): void {
|
||||
this.stopMonitoring();
|
||||
this.logger.debug('Health monitor destroyed');
|
||||
}
|
||||
}
|
||||
32
libs/data/questdb/src/index.ts
Normal file
32
libs/data/questdb/src/index.ts
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
/**
|
||||
* QuestDB Client Library for Stock Bot
|
||||
*
|
||||
* Provides high-performance time-series data access with support for
|
||||
* InfluxDB Line Protocol, SQL queries, and PostgreSQL wire protocol.
|
||||
*/
|
||||
|
||||
export { QuestDBClient } from './client';
|
||||
export { QuestDBHealthMonitor } from './health';
|
||||
export { QuestDBQueryBuilder } from './query-builder';
|
||||
export { QuestDBInfluxWriter } from './influx-writer';
|
||||
export { QuestDBSchemaManager } from './schema';
|
||||
|
||||
// Types
|
||||
export type {
|
||||
QuestDBClientConfig,
|
||||
QuestDBConnectionOptions,
|
||||
QuestDBHealthStatus,
|
||||
QuestDBMetrics,
|
||||
TableNames,
|
||||
OHLCVData,
|
||||
TradeData,
|
||||
QuoteData,
|
||||
IndicatorData,
|
||||
PerformanceData,
|
||||
RiskMetrics,
|
||||
QueryResult,
|
||||
InsertResult,
|
||||
} from './types';
|
||||
|
||||
// Utils
|
||||
export { createQuestDBClient, createAndConnectQuestDBClient } from './factory';
|
||||
430
libs/data/questdb/src/influx-writer.ts
Normal file
430
libs/data/questdb/src/influx-writer.ts
Normal file
|
|
@ -0,0 +1,430 @@
|
|||
import { getLogger } from '@stock-bot/logger';
|
||||
import type { InfluxLineData, InfluxWriteOptions } from './types';
|
||||
|
||||
// Interface to avoid circular dependency
|
||||
interface QuestDBClientInterface {
|
||||
getHttpUrl(): string;
|
||||
}
|
||||
|
||||
/**
|
||||
* QuestDB InfluxDB Line Protocol Writer
|
||||
*
|
||||
* Provides high-performance data ingestion using InfluxDB Line Protocol
|
||||
* which QuestDB supports natively for optimal time-series data insertion.
|
||||
*/
|
||||
export class QuestDBInfluxWriter {
|
||||
private readonly logger: ReturnType<typeof getLogger>;
|
||||
private writeBuffer: string[] = [];
|
||||
private flushTimer: NodeJS.Timeout | null = null;
|
||||
private readonly defaultOptions: Required<InfluxWriteOptions> = {
|
||||
batchSize: 1000,
|
||||
flushInterval: 5000,
|
||||
autoFlush: true,
|
||||
precision: 'ms',
|
||||
retryAttempts: 3,
|
||||
retryDelay: 1000,
|
||||
};
|
||||
constructor(private readonly client: QuestDBClientInterface) {
|
||||
this.logger = getLogger('questdb-influx-writer');
|
||||
}
|
||||
|
||||
/**
|
||||
* Write single data point using InfluxDB Line Protocol
|
||||
*/
|
||||
public async writePoint(
|
||||
measurement: string,
|
||||
tags: Record<string, string>,
|
||||
fields: Record<string, number | string | boolean>,
|
||||
timestamp?: Date,
|
||||
options?: Partial<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
const line = this.buildLineProtocol(measurement, tags, fields, timestamp);
|
||||
const opts = { ...this.defaultOptions, ...options };
|
||||
|
||||
if (opts.autoFlush && this.writeBuffer.length === 0) {
|
||||
// Single point write - send immediately
|
||||
await this.sendLines([line], opts);
|
||||
} else {
|
||||
// Add to buffer
|
||||
this.writeBuffer.push(line);
|
||||
|
||||
if (opts.autoFlush) {
|
||||
this.scheduleFlush(opts);
|
||||
}
|
||||
|
||||
// Flush if buffer is full
|
||||
if (this.writeBuffer.length >= opts.batchSize) {
|
||||
await this.flush(opts);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write multiple data points
|
||||
*/
|
||||
public async writePoints(
|
||||
data: InfluxLineData[],
|
||||
options?: Partial<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
const opts = { ...this.defaultOptions, ...options };
|
||||
const lines = data.map(point =>
|
||||
this.buildLineProtocol(point.measurement, point.tags, point.fields, point.timestamp)
|
||||
);
|
||||
|
||||
if (opts.autoFlush) {
|
||||
// Send immediately for batch writes
|
||||
await this.sendLines(lines, opts);
|
||||
} else {
|
||||
// Add to buffer
|
||||
this.writeBuffer.push(...lines);
|
||||
|
||||
// Flush if buffer exceeds batch size
|
||||
while (this.writeBuffer.length >= opts.batchSize) {
|
||||
const batch = this.writeBuffer.splice(0, opts.batchSize);
|
||||
await this.sendLines(batch, opts);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write OHLCV data optimized for QuestDB
|
||||
*/
|
||||
public async writeOHLCV(
|
||||
symbol: string,
|
||||
exchange: string,
|
||||
data: {
|
||||
timestamp: Date;
|
||||
open: number;
|
||||
high: number;
|
||||
low: number;
|
||||
close: number;
|
||||
volume: number;
|
||||
}[],
|
||||
options?: Partial<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
const influxData: InfluxLineData[] = data.map(candle => ({
|
||||
measurement: 'ohlcv_data',
|
||||
tags: {
|
||||
symbol,
|
||||
exchange,
|
||||
data_source: 'market_feed',
|
||||
},
|
||||
fields: {
|
||||
open: candle.open,
|
||||
high: candle.high,
|
||||
low: candle.low,
|
||||
close: candle.close,
|
||||
volume: candle.volume,
|
||||
},
|
||||
timestamp: candle.timestamp,
|
||||
}));
|
||||
|
||||
await this.writePoints(influxData, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write market analytics data
|
||||
*/
|
||||
public async writeMarketAnalytics(
|
||||
symbol: string,
|
||||
exchange: string,
|
||||
analytics: {
|
||||
timestamp: Date;
|
||||
rsi?: number;
|
||||
macd?: number;
|
||||
signal?: number;
|
||||
histogram?: number;
|
||||
bollinger_upper?: number;
|
||||
bollinger_lower?: number;
|
||||
volume_sma?: number;
|
||||
},
|
||||
options?: Partial<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
const fields: Record<string, number> = {};
|
||||
|
||||
// Only include defined values
|
||||
Object.entries(analytics).forEach(([key, value]) => {
|
||||
if (key !== 'timestamp' && value !== undefined && value !== null) {
|
||||
fields[key] = value as number;
|
||||
}
|
||||
});
|
||||
|
||||
if (Object.keys(fields).length === 0) {
|
||||
this.logger.debug('No analytics fields to write', { symbol, timestamp: analytics.timestamp });
|
||||
return;
|
||||
}
|
||||
|
||||
await this.writePoint(
|
||||
'market_analytics',
|
||||
{ symbol, exchange },
|
||||
fields,
|
||||
analytics.timestamp,
|
||||
options
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write trade execution data
|
||||
*/
|
||||
public async writeTradeExecution(
|
||||
execution: {
|
||||
symbol: string;
|
||||
side: 'buy' | 'sell';
|
||||
quantity: number;
|
||||
price: number;
|
||||
timestamp: Date;
|
||||
executionTime: number;
|
||||
orderId?: string;
|
||||
strategy?: string;
|
||||
},
|
||||
options?: Partial<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
const tags: Record<string, string> = {
|
||||
symbol: execution.symbol,
|
||||
side: execution.side,
|
||||
};
|
||||
|
||||
if (execution.orderId) {
|
||||
tags['order_id'] = execution.orderId;
|
||||
}
|
||||
|
||||
if (execution.strategy) {
|
||||
tags['strategy'] = execution.strategy;
|
||||
}
|
||||
|
||||
await this.writePoint(
|
||||
'trade_executions',
|
||||
tags,
|
||||
{
|
||||
quantity: execution.quantity,
|
||||
price: execution.price,
|
||||
execution_time: execution.executionTime,
|
||||
},
|
||||
execution.timestamp,
|
||||
options
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write performance metrics
|
||||
*/
|
||||
public async writePerformanceMetrics(
|
||||
metrics: {
|
||||
timestamp: Date;
|
||||
operation: string;
|
||||
responseTime: number;
|
||||
success: boolean;
|
||||
errorCode?: string;
|
||||
},
|
||||
options?: Partial<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
const tags: Record<string, string> = {
|
||||
operation: metrics.operation,
|
||||
success: metrics.success.toString(),
|
||||
};
|
||||
|
||||
if (metrics.errorCode) {
|
||||
tags['error_code'] = metrics.errorCode;
|
||||
}
|
||||
|
||||
await this.writePoint(
|
||||
'performance_metrics',
|
||||
tags,
|
||||
{ response_time: metrics.responseTime },
|
||||
metrics.timestamp,
|
||||
options
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Manually flush the write buffer
|
||||
*/
|
||||
public async flush(options?: Partial<InfluxWriteOptions>): Promise<void> {
|
||||
if (this.writeBuffer.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const opts = { ...this.defaultOptions, ...options };
|
||||
const lines = this.writeBuffer.splice(0); // Clear buffer
|
||||
|
||||
if (this.flushTimer) {
|
||||
clearTimeout(this.flushTimer);
|
||||
this.flushTimer = null;
|
||||
}
|
||||
|
||||
await this.sendLines(lines, opts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current buffer size
|
||||
*/
|
||||
public getBufferSize(): number {
|
||||
return this.writeBuffer.length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the buffer without writing
|
||||
*/
|
||||
public clearBuffer(): void {
|
||||
this.writeBuffer.length = 0;
|
||||
if (this.flushTimer) {
|
||||
clearTimeout(this.flushTimer);
|
||||
this.flushTimer = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build InfluxDB Line Protocol string
|
||||
*/
|
||||
private buildLineProtocol(
|
||||
measurement: string,
|
||||
tags: Record<string, string>,
|
||||
fields: Record<string, number | string | boolean>,
|
||||
timestamp?: Date
|
||||
): string {
|
||||
// Escape special characters in measurement name
|
||||
const escapedMeasurement = measurement.replace(/[, =]/g, '\\$&');
|
||||
|
||||
// Build tags string
|
||||
const tagString = Object.entries(tags)
|
||||
.filter(([_, value]) => value !== undefined && value !== null)
|
||||
.map(([key, value]) => `${this.escapeTagKey(key)}=${this.escapeTagValue(value)}`)
|
||||
.join(',');
|
||||
|
||||
// Build fields string
|
||||
const fieldString = Object.entries(fields)
|
||||
.filter(([_, value]) => value !== undefined && value !== null)
|
||||
.map(([key, value]) => `${this.escapeFieldKey(key)}=${this.formatFieldValue(value)}`)
|
||||
.join(',');
|
||||
|
||||
// Build timestamp
|
||||
const timestampString = timestamp
|
||||
? Math.floor(timestamp.getTime() * 1000000).toString() // Convert to nanoseconds
|
||||
: '';
|
||||
|
||||
// Combine parts
|
||||
let line = escapedMeasurement;
|
||||
if (tagString) {
|
||||
line += `,${tagString}`;
|
||||
}
|
||||
line += ` ${fieldString}`;
|
||||
if (timestampString) {
|
||||
line += ` ${timestampString}`;
|
||||
}
|
||||
|
||||
return line;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send lines to QuestDB via HTTP endpoint
|
||||
*/
|
||||
private async sendLines(lines: string[], options: Required<InfluxWriteOptions>): Promise<void> {
|
||||
if (lines.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const payload = lines.join('\n');
|
||||
let attempt = 0;
|
||||
|
||||
while (attempt <= options.retryAttempts) {
|
||||
try {
|
||||
// QuestDB InfluxDB Line Protocol endpoint
|
||||
const response = await fetch(`${this.client.getHttpUrl()}/write`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'text/plain',
|
||||
},
|
||||
body: payload,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
|
||||
}
|
||||
|
||||
this.logger.debug(`Successfully wrote ${lines.length} lines to QuestDB`);
|
||||
return;
|
||||
} catch (error) {
|
||||
attempt++;
|
||||
this.logger.warn(`Write attempt ${attempt} failed`, {
|
||||
error,
|
||||
linesCount: lines.length,
|
||||
willRetry: attempt <= options.retryAttempts,
|
||||
});
|
||||
|
||||
if (attempt <= options.retryAttempts) {
|
||||
await this.sleep(options.retryDelay * attempt); // Exponential backoff
|
||||
} else {
|
||||
throw new Error(
|
||||
`Failed to write to QuestDB after ${options.retryAttempts} attempts: $error`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule automatic flush
|
||||
*/
|
||||
private scheduleFlush(options: Required<InfluxWriteOptions>): void {
|
||||
if (this.flushTimer || !options.autoFlush) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.flushTimer = setTimeout(async () => {
|
||||
try {
|
||||
await this.flush(options);
|
||||
} catch (error) {
|
||||
this.logger.error('Scheduled flush failed', error);
|
||||
}
|
||||
}, options.flushInterval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Format field value for InfluxDB Line Protocol
|
||||
*/
|
||||
private formatFieldValue(value: number | string | boolean): string {
|
||||
if (typeof value === 'string') {
|
||||
return `"${value.replace(/"/g, '\\"')}"`;
|
||||
} else if (typeof value === 'boolean') {
|
||||
return value ? 'true' : 'false';
|
||||
} else {
|
||||
return value.toString();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape tag key
|
||||
*/
|
||||
private escapeTagKey(key: string): string {
|
||||
return key.replace(/[, =]/g, '\\$&');
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape tag value
|
||||
*/
|
||||
private escapeTagValue(value: string): string {
|
||||
return value.replace(/[, =]/g, '\\$&');
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape field key
|
||||
*/
|
||||
private escapeFieldKey(key: string): string {
|
||||
return key.replace(/[, =]/g, '\\$&');
|
||||
}
|
||||
|
||||
/**
|
||||
* Sleep utility
|
||||
*/
|
||||
private sleep(ms: number): Promise<void> {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup resources
|
||||
*/
|
||||
public destroy(): void {
|
||||
this.clearBuffer();
|
||||
this.logger.debug('InfluxDB writer destroyed');
|
||||
}
|
||||
}
|
||||
374
libs/data/questdb/src/query-builder.ts
Normal file
374
libs/data/questdb/src/query-builder.ts
Normal file
|
|
@ -0,0 +1,374 @@
|
|||
import { getLogger } from '@stock-bot/logger';
|
||||
import type {
|
||||
QueryResult,
|
||||
TableNames,
|
||||
TimeRange,
|
||||
} from './types';
|
||||
|
||||
// Interface to avoid circular dependency
|
||||
interface QuestDBClientInterface {
|
||||
query<T = any>(sql: string, params?: any[]): Promise<QueryResult<T>>;
|
||||
}
|
||||
|
||||
/**
|
||||
* QuestDB Query Builder
|
||||
*
|
||||
* Provides a fluent interface for building optimized time-series queries
|
||||
* with support for QuestDB-specific functions and optimizations.
|
||||
*/
|
||||
export class QuestDBQueryBuilder {
|
||||
private readonly logger: ReturnType<typeof getLogger>;
|
||||
private query!: {
|
||||
select: string[];
|
||||
from: string;
|
||||
where: string[];
|
||||
groupBy: string[];
|
||||
orderBy: string[];
|
||||
limit?: number;
|
||||
sampleBy?: string;
|
||||
latestBy?: string[];
|
||||
timeRange?: TimeRange;
|
||||
};
|
||||
constructor(private readonly client: QuestDBClientInterface) {
|
||||
this.logger = getLogger('questdb-query-builder');
|
||||
this.reset();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the query builder
|
||||
*/
|
||||
private reset(): QuestDBQueryBuilder {
|
||||
this.query = {
|
||||
select: [],
|
||||
from: '',
|
||||
where: [],
|
||||
groupBy: [],
|
||||
orderBy: [],
|
||||
sampleBy: undefined,
|
||||
latestBy: undefined,
|
||||
timeRange: undefined,
|
||||
};
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Start a new query
|
||||
*/
|
||||
public static create(client: QuestDBClientInterface): QuestDBQueryBuilder {
|
||||
return new QuestDBQueryBuilder(client);
|
||||
}
|
||||
|
||||
/**
|
||||
* Select columns
|
||||
*/
|
||||
public select(...columns: string[]): QuestDBQueryBuilder {
|
||||
this.query.select.push(...columns);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Select with aggregation functions
|
||||
*/
|
||||
public selectAgg(aggregations: Record<string, string>): QuestDBQueryBuilder {
|
||||
Object.entries(aggregations).forEach(([alias, expression]) => {
|
||||
this.query.select.push(`${expression} as ${alias}`);
|
||||
});
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* From table
|
||||
*/
|
||||
public from(table: TableNames | string): QuestDBQueryBuilder {
|
||||
this.query.from = table;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Where condition
|
||||
*/
|
||||
public where(condition: string): QuestDBQueryBuilder {
|
||||
this.query.where.push(condition);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Where symbol equals
|
||||
*/
|
||||
public whereSymbol(symbol: string): QuestDBQueryBuilder {
|
||||
this.query.where.push(`symbol = '${symbol}'`);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Where symbols in list
|
||||
*/
|
||||
public whereSymbolIn(symbols: string[]): QuestDBQueryBuilder {
|
||||
const symbolList = symbols.map(s => `'${s}'`).join(', ');
|
||||
this.query.where.push(`symbol IN (${symbolList})`);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Where exchange equals
|
||||
*/
|
||||
public whereExchange(exchange: string): QuestDBQueryBuilder {
|
||||
this.query.where.push(`exchange = '${exchange}'`);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Time range filter
|
||||
*/
|
||||
public whereTimeRange(startTime: Date, endTime: Date): QuestDBQueryBuilder {
|
||||
this.query.timeRange = { startTime, endTime };
|
||||
this.query.where.push(
|
||||
`timestamp >= '${startTime.toISOString()}' AND timestamp <= '${endTime.toISOString()}'`
|
||||
);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Last N hours
|
||||
*/
|
||||
public whereLastHours(hours: number): QuestDBQueryBuilder {
|
||||
this.query.where.push(`timestamp > dateadd('h', -${hours}, now())`);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Last N days
|
||||
*/
|
||||
public whereLastDays(days: number): QuestDBQueryBuilder {
|
||||
this.query.where.push(`timestamp > dateadd('d', -${days}, now())`);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Group by columns
|
||||
*/
|
||||
public groupBy(...columns: string[]): QuestDBQueryBuilder {
|
||||
this.query.groupBy.push(...columns);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Order by column
|
||||
*/
|
||||
public orderBy(column: string, direction: 'ASC' | 'DESC' = 'ASC'): QuestDBQueryBuilder {
|
||||
this.query.orderBy.push(`${column} ${direction}`);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Order by timestamp descending (most recent first)
|
||||
*/
|
||||
public orderByTimeDesc(): QuestDBQueryBuilder {
|
||||
this.query.orderBy.push('timestamp DESC');
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Limit results
|
||||
*/
|
||||
public limit(count: number): QuestDBQueryBuilder {
|
||||
this.query.limit = count;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sample by time interval (QuestDB specific)
|
||||
*/
|
||||
public sampleBy(interval: string): QuestDBQueryBuilder {
|
||||
this.query.sampleBy = interval;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Latest by columns (QuestDB specific)
|
||||
*/
|
||||
public latestBy(...columns: string[]): QuestDBQueryBuilder {
|
||||
this.query.latestBy = columns;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build and execute the query
|
||||
*/
|
||||
public async execute<T = any>(): Promise<QueryResult<T>> {
|
||||
const sql = this.build();
|
||||
this.logger.debug('Executing query', { sql });
|
||||
|
||||
try {
|
||||
const result = await this.client.query<T>(sql);
|
||||
this.reset(); // Reset for next query
|
||||
return result;
|
||||
} catch (error) {
|
||||
this.logger.error('Query execution failed', { sql, error });
|
||||
this.reset(); // Reset even on error
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the SQL query string
|
||||
*/
|
||||
public build(): string {
|
||||
if (!this.query.from) {
|
||||
throw new Error('FROM clause is required');
|
||||
}
|
||||
|
||||
if (this.query.select.length === 0) {
|
||||
this.query.select.push('*');
|
||||
}
|
||||
|
||||
let sql = `SELECT ${this.query.select.join(', ')} FROM ${this.query.from}`;
|
||||
|
||||
// Add WHERE clause
|
||||
if (this.query.where.length > 0) {
|
||||
sql += ` WHERE ${this.query.where.join(' AND ')}`;
|
||||
}
|
||||
|
||||
// Add LATEST BY (QuestDB specific - must come before GROUP BY)
|
||||
if (this.query.latestBy && this.query.latestBy.length > 0) {
|
||||
sql += ` LATEST BY ${this.query.latestBy.join(', ')}`;
|
||||
}
|
||||
|
||||
// Add SAMPLE BY (QuestDB specific)
|
||||
if (this.query.sampleBy) {
|
||||
sql += ` SAMPLE BY ${this.query.sampleBy}`;
|
||||
}
|
||||
|
||||
// Add GROUP BY
|
||||
if (this.query.groupBy.length > 0) {
|
||||
sql += ` GROUP BY ${this.query.groupBy.join(', ')}`;
|
||||
}
|
||||
|
||||
// Add ORDER BY
|
||||
if (this.query.orderBy.length > 0) {
|
||||
sql += ` ORDER BY ${this.query.orderBy.join(', ')}`;
|
||||
}
|
||||
|
||||
// Add LIMIT
|
||||
if (this.query.limit) {
|
||||
sql += ` LIMIT ${this.query.limit}`;
|
||||
}
|
||||
|
||||
return sql;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the built query without executing
|
||||
*/
|
||||
public toSQL(): string {
|
||||
return this.build();
|
||||
}
|
||||
|
||||
// Predefined query methods for common use cases
|
||||
/**
|
||||
* Get latest OHLCV data for symbols
|
||||
*/
|
||||
public static latestOHLCV(
|
||||
client: QuestDBClientInterface,
|
||||
symbols: string[],
|
||||
exchange?: string
|
||||
): QuestDBQueryBuilder {
|
||||
const builder = QuestDBQueryBuilder.create(client)
|
||||
.select('symbol', 'timestamp', 'open', 'high', 'low', 'close', 'volume')
|
||||
.from('ohlcv_data')
|
||||
.whereSymbolIn(symbols)
|
||||
.latestBy('symbol')
|
||||
.orderByTimeDesc();
|
||||
|
||||
if (exchange) {
|
||||
builder.whereExchange(exchange);
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
/**
|
||||
* Get OHLCV data with time sampling
|
||||
*/
|
||||
public static ohlcvTimeSeries(
|
||||
client: QuestDBClientInterface,
|
||||
symbol: string,
|
||||
interval: string,
|
||||
hours: number = 24
|
||||
): QuestDBQueryBuilder {
|
||||
return QuestDBQueryBuilder.create(client)
|
||||
.selectAgg({
|
||||
first_open: 'first(open)',
|
||||
max_high: 'max(high)',
|
||||
min_low: 'min(low)',
|
||||
last_close: 'last(close)',
|
||||
sum_volume: 'sum(volume)',
|
||||
})
|
||||
.from('ohlcv_data')
|
||||
.whereSymbol(symbol)
|
||||
.whereLastHours(hours)
|
||||
.sampleBy(interval)
|
||||
.orderByTimeDesc();
|
||||
}
|
||||
/**
|
||||
* Get market analytics data
|
||||
*/
|
||||
public static marketAnalytics(
|
||||
client: QuestDBClientInterface,
|
||||
symbols: string[],
|
||||
hours: number = 1
|
||||
): QuestDBQueryBuilder {
|
||||
return QuestDBQueryBuilder.create(client)
|
||||
.select(
|
||||
'symbol',
|
||||
'timestamp',
|
||||
'rsi',
|
||||
'macd',
|
||||
'bollinger_upper',
|
||||
'bollinger_lower',
|
||||
'volume_sma'
|
||||
)
|
||||
.from('market_analytics')
|
||||
.whereSymbolIn(symbols)
|
||||
.whereLastHours(hours)
|
||||
.orderBy('symbol')
|
||||
.orderByTimeDesc();
|
||||
}
|
||||
/**
|
||||
* Get performance metrics for a time range
|
||||
*/
|
||||
public static performanceMetrics(
|
||||
client: QuestDBClientInterface,
|
||||
startTime: Date,
|
||||
endTime: Date
|
||||
): QuestDBQueryBuilder {
|
||||
return QuestDBQueryBuilder.create(client)
|
||||
.selectAgg({
|
||||
total_trades: 'count(*)',
|
||||
avg_response_time: 'avg(response_time)',
|
||||
max_response_time: 'max(response_time)',
|
||||
error_rate: 'sum(case when success = false then 1 else 0 end) * 100.0 / count(*)',
|
||||
})
|
||||
.from('performance_metrics')
|
||||
.whereTimeRange(startTime, endTime)
|
||||
.sampleBy('1m');
|
||||
}
|
||||
/**
|
||||
* Get trade execution data
|
||||
*/
|
||||
public static tradeExecutions(
|
||||
client: QuestDBClientInterface,
|
||||
symbol?: string,
|
||||
hours: number = 24
|
||||
): QuestDBQueryBuilder {
|
||||
const builder = QuestDBQueryBuilder.create(client)
|
||||
.select('symbol', 'timestamp', 'side', 'quantity', 'price', 'execution_time')
|
||||
.from('trade_executions')
|
||||
.whereLastHours(hours)
|
||||
.orderByTimeDesc();
|
||||
|
||||
if (symbol) {
|
||||
builder.whereSymbol(symbol);
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
394
libs/data/questdb/src/schema.ts
Normal file
394
libs/data/questdb/src/schema.ts
Normal file
|
|
@ -0,0 +1,394 @@
|
|||
import { getLogger } from '@stock-bot/logger';
|
||||
import type { QueryResult, TableSchema } from './types';
|
||||
|
||||
// Interface to avoid circular dependency
|
||||
interface QuestDBClientInterface {
|
||||
query<T = any>(sql: string, params?: any[]): Promise<QueryResult<T>>;
|
||||
}
|
||||
|
||||
/**
|
||||
* QuestDB Schema Manager
|
||||
*
|
||||
* Manages database schemas, table creation, and optimization
|
||||
* for time-series data storage in QuestDB.
|
||||
*/
|
||||
export class QuestDBSchemaManager {
|
||||
private readonly logger: ReturnType<typeof getLogger>;
|
||||
private readonly schemas: Map<string, TableSchema> = new Map();
|
||||
constructor(private readonly client: QuestDBClientInterface) {
|
||||
this.logger = getLogger('questdb-schema-manager');
|
||||
this.initializeSchemas();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize predefined schemas
|
||||
*/
|
||||
private initializeSchemas(): void {
|
||||
// OHLCV Data Table
|
||||
this.schemas.set('ohlcv_data', {
|
||||
tableName: 'ohlcv_data',
|
||||
columns: [
|
||||
{ name: 'symbol', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'exchange', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'timestamp', type: 'TIMESTAMP', nullable: false, designated: true },
|
||||
{ name: 'open', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'high', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'low', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'close', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'volume', type: 'LONG', nullable: false },
|
||||
{ name: 'data_source', type: 'SYMBOL', nullable: true },
|
||||
],
|
||||
partitionBy: 'DAY',
|
||||
orderBy: ['symbol', 'timestamp'],
|
||||
indices: [
|
||||
{ columns: ['symbol'], type: 'HASH' },
|
||||
{ columns: ['exchange'], type: 'HASH' },
|
||||
],
|
||||
});
|
||||
|
||||
// Market Analytics Table
|
||||
this.schemas.set('market_analytics', {
|
||||
tableName: 'market_analytics',
|
||||
columns: [
|
||||
{ name: 'symbol', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'exchange', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'timestamp', type: 'TIMESTAMP', nullable: false, designated: true },
|
||||
{ name: 'rsi', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'macd', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'signal', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'histogram', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'bollinger_upper', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'bollinger_lower', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'volume_sma', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'timeframe', type: 'SYMBOL', nullable: true },
|
||||
],
|
||||
partitionBy: 'DAY',
|
||||
orderBy: ['symbol', 'timestamp'],
|
||||
indices: [
|
||||
{ columns: ['symbol'], type: 'HASH' },
|
||||
{ columns: ['timeframe'], type: 'HASH' },
|
||||
],
|
||||
});
|
||||
|
||||
// Trade Executions Table
|
||||
this.schemas.set('trade_executions', {
|
||||
tableName: 'trade_executions',
|
||||
columns: [
|
||||
{ name: 'symbol', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'timestamp', type: 'TIMESTAMP', nullable: false, designated: true },
|
||||
{ name: 'side', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'quantity', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'price', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'execution_time', type: 'LONG', nullable: false },
|
||||
{ name: 'order_id', type: 'SYMBOL', nullable: true },
|
||||
{ name: 'strategy', type: 'SYMBOL', nullable: true },
|
||||
{ name: 'commission', type: 'DOUBLE', nullable: true },
|
||||
],
|
||||
partitionBy: 'DAY',
|
||||
orderBy: ['symbol', 'timestamp'],
|
||||
indices: [
|
||||
{ columns: ['symbol'], type: 'HASH' },
|
||||
{ columns: ['order_id'], type: 'HASH' },
|
||||
{ columns: ['strategy'], type: 'HASH' },
|
||||
],
|
||||
});
|
||||
|
||||
// Performance Metrics Table
|
||||
this.schemas.set('performance_metrics', {
|
||||
tableName: 'performance_metrics',
|
||||
columns: [
|
||||
{ name: 'timestamp', type: 'TIMESTAMP', nullable: false, designated: true },
|
||||
{ name: 'operation', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'response_time', type: 'LONG', nullable: false },
|
||||
{ name: 'success', type: 'BOOLEAN', nullable: false },
|
||||
{ name: 'error_code', type: 'SYMBOL', nullable: true },
|
||||
{ name: 'component', type: 'SYMBOL', nullable: true },
|
||||
],
|
||||
partitionBy: 'HOUR',
|
||||
orderBy: ['operation', 'timestamp'],
|
||||
indices: [
|
||||
{ columns: ['operation'], type: 'HASH' },
|
||||
{ columns: ['success'], type: 'HASH' },
|
||||
],
|
||||
});
|
||||
|
||||
// Portfolio Positions Table
|
||||
this.schemas.set('portfolio_positions', {
|
||||
tableName: 'portfolio_positions',
|
||||
columns: [
|
||||
{ name: 'portfolio_id', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'symbol', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'timestamp', type: 'TIMESTAMP', nullable: false, designated: true },
|
||||
{ name: 'quantity', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'avg_cost', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'market_value', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'unrealized_pnl', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'realized_pnl', type: 'DOUBLE', nullable: false },
|
||||
],
|
||||
partitionBy: 'DAY',
|
||||
orderBy: ['portfolio_id', 'symbol', 'timestamp'],
|
||||
indices: [
|
||||
{ columns: ['portfolio_id'], type: 'HASH' },
|
||||
{ columns: ['symbol'], type: 'HASH' },
|
||||
],
|
||||
});
|
||||
|
||||
// Risk Metrics Table
|
||||
this.schemas.set('risk_metrics', {
|
||||
tableName: 'risk_metrics',
|
||||
columns: [
|
||||
{ name: 'portfolio_id', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'timestamp', type: 'TIMESTAMP', nullable: false, designated: true },
|
||||
{ name: 'var_1d', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'var_5d', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'expected_shortfall', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'beta', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'sharpe_ratio', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'max_drawdown', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'volatility', type: 'DOUBLE', nullable: true },
|
||||
],
|
||||
partitionBy: 'DAY',
|
||||
orderBy: ['portfolio_id', 'timestamp'],
|
||||
indices: [{ columns: ['portfolio_id'], type: 'HASH' }],
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create all tables
|
||||
*/
|
||||
public async createAllTables(): Promise<void> {
|
||||
this.logger.info('Creating all QuestDB tables');
|
||||
|
||||
for (const [tableName, schema] of this.schemas) {
|
||||
try {
|
||||
await this.createTable(schema);
|
||||
this.logger.info(`Table ${tableName} created successfully`);
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to create table ${tableName}`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a single table
|
||||
*/
|
||||
public async createTable(schema: TableSchema): Promise<void> {
|
||||
const sql = this.buildCreateTableSQL(schema);
|
||||
|
||||
try {
|
||||
await this.client.query(sql);
|
||||
this.logger.debug(`Table ${schema.tableName} created`, { sql });
|
||||
} catch (error) {
|
||||
// Check if table already exists
|
||||
if (error instanceof Error && error.message.includes('already exists')) {
|
||||
this.logger.debug(`Table ${schema.tableName} already exists`);
|
||||
return;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Drop a table
|
||||
*/
|
||||
public async dropTable(tableName: string): Promise<void> {
|
||||
const sql = `DROP TABLE IF EXISTS ${tableName}`;
|
||||
|
||||
try {
|
||||
await this.client.query(sql);
|
||||
this.logger.warn(`Table ${tableName} dropped`);
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to drop table ${tableName}`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if table exists
|
||||
*/
|
||||
public async tableExists(tableName: string): Promise<boolean> {
|
||||
try {
|
||||
const result = await this.client.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM information_schema.tables
|
||||
WHERE table_name = '${tableName}'
|
||||
`);
|
||||
|
||||
return result.rows.length > 0 && result.rows[0].count > 0;
|
||||
} catch (error) {
|
||||
this.logger.error(`Error checking if table exists: ${tableName}`, error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get table schema
|
||||
*/
|
||||
public getSchema(tableName: string): TableSchema | undefined {
|
||||
return this.schemas.get(tableName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add custom schema
|
||||
*/
|
||||
public addSchema(schema: TableSchema): void {
|
||||
this.schemas.set(schema.tableName, schema);
|
||||
this.logger.debug(`Schema added for table: ${schema.tableName}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all schema names
|
||||
*/
|
||||
public getSchemaNames(): string[] {
|
||||
return Array.from(this.schemas.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize table (rebuild indices, etc.)
|
||||
*/
|
||||
public async optimizeTable(tableName: string): Promise<void> {
|
||||
const schema = this.schemas.get(tableName);
|
||||
if (!schema) {
|
||||
throw new Error(`Schema not found for table: ${tableName}`);
|
||||
}
|
||||
|
||||
// QuestDB automatically optimizes, but we can analyze table stats
|
||||
try {
|
||||
const stats = await this.getTableStats(tableName);
|
||||
this.logger.debug(`Table ${tableName} stats`, stats);
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to optimize table ${tableName}`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get table statistics
|
||||
*/
|
||||
public async getTableStats(tableName: string): Promise<any> {
|
||||
try {
|
||||
const result = await this.client.query(`
|
||||
SELECT
|
||||
COUNT(*) as row_count,
|
||||
MIN(timestamp) as min_timestamp,
|
||||
MAX(timestamp) as max_timestamp
|
||||
FROM ${tableName}
|
||||
`);
|
||||
|
||||
return result.rows[0] || {};
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to get table stats for ${tableName}`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncate table (remove all data but keep structure)
|
||||
*/
|
||||
public async truncateTable(tableName: string): Promise<void> {
|
||||
try {
|
||||
await this.client.query(`TRUNCATE TABLE ${tableName}`);
|
||||
this.logger.warn(`Table ${tableName} truncated`);
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to truncate table ${tableName}`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create table partitions for future dates
|
||||
*/
|
||||
public async createPartitions(tableName: string, _days: number = 30): Promise<void> {
|
||||
// QuestDB handles partitioning automatically based on the PARTITION BY clause
|
||||
// This method is for future extensibility
|
||||
this.logger.debug(`Partitioning is automatic for table ${tableName}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build CREATE TABLE SQL statement
|
||||
*/
|
||||
private buildCreateTableSQL(schema: TableSchema): string {
|
||||
const columns = schema.columns
|
||||
.map(col => {
|
||||
let columnDef = `${col.name} ${col.type}`;
|
||||
|
||||
if (!col.nullable) {
|
||||
columnDef += ' NOT NULL';
|
||||
}
|
||||
|
||||
return columnDef;
|
||||
})
|
||||
.join(', ');
|
||||
|
||||
let sql = `CREATE TABLE IF NOT EXISTS ${schema.tableName} (${columns})`;
|
||||
|
||||
// Add designated timestamp
|
||||
const timestampColumn = schema.columns.find(col => col.designated);
|
||||
if (timestampColumn) {
|
||||
sql += ` timestamp(${timestampColumn.name})`;
|
||||
}
|
||||
|
||||
// Add partition by
|
||||
if (schema.partitionBy) {
|
||||
sql += ` PARTITION BY ${schema.partitionBy}`;
|
||||
}
|
||||
|
||||
return sql;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Validate schema definition
|
||||
*/
|
||||
private validateSchema(schema: TableSchema): void {
|
||||
if (!schema.tableName) {
|
||||
throw new Error('Table name is required');
|
||||
}
|
||||
|
||||
if (!schema.columns || schema.columns.length === 0) {
|
||||
throw new Error('At least one column is required');
|
||||
}
|
||||
|
||||
const timestampColumns = schema.columns.filter(col => col.designated);
|
||||
if (timestampColumns.length > 1) {
|
||||
throw new Error('Only one designated timestamp column is allowed');
|
||||
}
|
||||
|
||||
if (timestampColumns.length === 0) {
|
||||
throw new Error('A designated timestamp column is required for time-series tables');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get table creation status
|
||||
*/
|
||||
public async getTableCreationStatus(): Promise<Record<string, boolean>> {
|
||||
const status: Record<string, boolean> = {};
|
||||
|
||||
for (const tableName of this.schemas.keys()) {
|
||||
status[tableName] = await this.tableExists(tableName);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize database schema
|
||||
*/
|
||||
public async initializeDatabase(): Promise<void> {
|
||||
this.logger.info('Initializing QuestDB schema');
|
||||
|
||||
// Validate all schemas first
|
||||
for (const schema of this.schemas.values()) {
|
||||
this.validateSchema(schema);
|
||||
}
|
||||
|
||||
// Create all tables
|
||||
await this.createAllTables();
|
||||
|
||||
// Get creation status
|
||||
const status = await this.getTableCreationStatus();
|
||||
this.logger.info('Database initialization complete', { tableStatus: status });
|
||||
}
|
||||
}
|
||||
304
libs/data/questdb/src/types.ts
Normal file
304
libs/data/questdb/src/types.ts
Normal file
|
|
@ -0,0 +1,304 @@
|
|||
/**
|
||||
* QuestDB Client Configuration and Types
|
||||
*/
|
||||
|
||||
/**
|
||||
* QuestDB Client Configuration
|
||||
*/
|
||||
export interface QuestDBClientConfig {
|
||||
host: string;
|
||||
httpPort: number;
|
||||
pgPort: number;
|
||||
influxPort: number;
|
||||
user?: string;
|
||||
password?: string;
|
||||
database?: string;
|
||||
tls?: {
|
||||
enabled: boolean;
|
||||
verifyServerCert: boolean;
|
||||
};
|
||||
timeouts?: {
|
||||
connection: number;
|
||||
request: number;
|
||||
};
|
||||
retryAttempts?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* QuestDB Connection Options
|
||||
*/
|
||||
export interface QuestDBConnectionOptions {
|
||||
protocol?: 'http' | 'pg' | 'influx';
|
||||
retryAttempts?: number;
|
||||
retryDelay?: number;
|
||||
healthCheckInterval?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Health Status Types
|
||||
*/
|
||||
export type QuestDBHealthStatus = 'healthy' | 'degraded' | 'unhealthy';
|
||||
|
||||
export interface QuestDBHealthCheck {
|
||||
status: QuestDBHealthStatus;
|
||||
timestamp: Date;
|
||||
latency: number;
|
||||
protocols: {
|
||||
http: boolean;
|
||||
pg: boolean;
|
||||
influx: boolean;
|
||||
};
|
||||
errors?: string[];
|
||||
}
|
||||
|
||||
export interface QuestDBMetrics {
|
||||
queriesPerSecond: number;
|
||||
insertsPerSecond: number;
|
||||
averageQueryTime: number;
|
||||
errorRate: number;
|
||||
dataIngestionRate: number;
|
||||
storageSize: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Table Names for Time-Series Data
|
||||
*/
|
||||
export type TableNames =
|
||||
| 'ohlcv'
|
||||
| 'trades'
|
||||
| 'quotes'
|
||||
| 'indicators'
|
||||
| 'performance'
|
||||
| 'risk_metrics'
|
||||
| 'market_events'
|
||||
| 'strategy_signals'
|
||||
| 'portfolio_snapshots';
|
||||
|
||||
/**
|
||||
* Time-Series Data Types
|
||||
*/
|
||||
export interface BaseTimeSeriesData {
|
||||
timestamp: Date;
|
||||
symbol?: string;
|
||||
}
|
||||
|
||||
export interface OHLCVData extends BaseTimeSeriesData {
|
||||
open: number;
|
||||
high: number;
|
||||
low: number;
|
||||
close: number;
|
||||
volume: number;
|
||||
timeframe: string; // '1m', '5m', '1h', '1d', etc.
|
||||
source: string;
|
||||
}
|
||||
|
||||
export interface TradeData extends BaseTimeSeriesData {
|
||||
trade_id: string;
|
||||
price: number;
|
||||
quantity: number;
|
||||
side: 'buy' | 'sell';
|
||||
exchange: string;
|
||||
conditions?: string[];
|
||||
}
|
||||
|
||||
export interface QuoteData extends BaseTimeSeriesData {
|
||||
bid_price: number;
|
||||
bid_size: number;
|
||||
ask_price: number;
|
||||
ask_size: number;
|
||||
exchange: string;
|
||||
spread: number;
|
||||
}
|
||||
|
||||
export interface IndicatorData extends BaseTimeSeriesData {
|
||||
indicator_name: string;
|
||||
value: number;
|
||||
parameters?: Record<string, any>;
|
||||
timeframe: string;
|
||||
}
|
||||
|
||||
export interface PerformanceData extends BaseTimeSeriesData {
|
||||
portfolio_id: string;
|
||||
total_value: number;
|
||||
cash_balance: number;
|
||||
unrealized_pnl: number;
|
||||
realized_pnl: number;
|
||||
daily_return: number;
|
||||
cumulative_return: number;
|
||||
}
|
||||
|
||||
export interface RiskMetrics extends BaseTimeSeriesData {
|
||||
portfolio_id?: string;
|
||||
strategy_id?: string;
|
||||
metric_name: string;
|
||||
value: number;
|
||||
threshold?: number;
|
||||
status: 'normal' | 'warning' | 'breach';
|
||||
}
|
||||
|
||||
/**
|
||||
* Query Result Types
|
||||
*/
|
||||
export interface QueryResult<T = any> {
|
||||
rows: T[];
|
||||
rowCount: number;
|
||||
executionTime: number;
|
||||
metadata?: {
|
||||
columns: Array<{
|
||||
name: string;
|
||||
type: string;
|
||||
}>;
|
||||
};
|
||||
}
|
||||
|
||||
export interface InsertResult {
|
||||
rowsInserted: number;
|
||||
executionTime: number;
|
||||
errors?: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Schema Definition Types
|
||||
*/
|
||||
export interface ColumnDefinition {
|
||||
name: string;
|
||||
type:
|
||||
| 'SYMBOL'
|
||||
| 'STRING'
|
||||
| 'DOUBLE'
|
||||
| 'FLOAT'
|
||||
| 'LONG'
|
||||
| 'INT'
|
||||
| 'BOOLEAN'
|
||||
| 'TIMESTAMP'
|
||||
| 'DATE'
|
||||
| 'BINARY';
|
||||
indexed?: boolean;
|
||||
capacity?: number; // For SYMBOL type
|
||||
}
|
||||
|
||||
export interface TableDefinition {
|
||||
name: string;
|
||||
columns: ColumnDefinition[];
|
||||
partitionBy?: 'NONE' | 'DAY' | 'MONTH' | 'YEAR';
|
||||
timestamp?: string; // Column name to use as designated timestamp
|
||||
dedup?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Connection Pool Types
|
||||
*/
|
||||
export interface ConnectionPoolConfig {
|
||||
minConnections: number;
|
||||
maxConnections: number;
|
||||
idleTimeout: number;
|
||||
acquireTimeout: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Health Monitoring Types
|
||||
*/
|
||||
export interface HealthStatus {
|
||||
isHealthy: boolean;
|
||||
lastCheck: Date;
|
||||
responseTime: number;
|
||||
message: string;
|
||||
error?: Error;
|
||||
details?: {
|
||||
pgPool: boolean;
|
||||
httpEndpoint: boolean;
|
||||
uptime: number;
|
||||
};
|
||||
}
|
||||
|
||||
export interface PerformanceMetrics {
|
||||
totalQueries: number;
|
||||
successfulQueries: number;
|
||||
failedQueries: number;
|
||||
averageResponseTime: number;
|
||||
lastQueryTime: Date | null;
|
||||
connectionUptime: number;
|
||||
memoryUsage: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Query Builder Types
|
||||
*/
|
||||
export interface TimeSeriesQuery {
|
||||
table: TableNames | string;
|
||||
columns?: string[];
|
||||
timeRange?: TimeRange;
|
||||
groupBy?: string[];
|
||||
aggregations?: Record<string, string>;
|
||||
sampleBy?: string;
|
||||
latestBy?: string[];
|
||||
orderBy?: Array<{ column: string; direction: 'ASC' | 'DESC' }>;
|
||||
limit?: number;
|
||||
}
|
||||
|
||||
export interface AggregationQuery {
|
||||
aggregations: Record<string, string>;
|
||||
groupBy?: string[];
|
||||
having?: string[];
|
||||
}
|
||||
|
||||
export interface TimeRange {
|
||||
startTime: Date;
|
||||
endTime: Date;
|
||||
}
|
||||
|
||||
/**
|
||||
* InfluxDB Line Protocol Types
|
||||
*/
|
||||
export interface InfluxLineData {
|
||||
measurement: string;
|
||||
tags: Record<string, string>;
|
||||
fields: Record<string, number | string | boolean>;
|
||||
timestamp?: Date;
|
||||
}
|
||||
|
||||
export interface InfluxWriteOptions {
|
||||
batchSize?: number;
|
||||
flushInterval?: number;
|
||||
autoFlush?: boolean;
|
||||
precision?: 'ns' | 'us' | 'ms' | 's';
|
||||
retryAttempts?: number;
|
||||
retryDelay?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Schema Management Types
|
||||
*/
|
||||
export interface TableSchema {
|
||||
tableName: string;
|
||||
columns: ColumnSchema[];
|
||||
partitionBy?: 'NONE' | 'HOUR' | 'DAY' | 'MONTH' | 'YEAR';
|
||||
orderBy?: string[];
|
||||
indices?: IndexDefinition[];
|
||||
dedup?: boolean;
|
||||
}
|
||||
|
||||
export interface ColumnSchema {
|
||||
name: string;
|
||||
type:
|
||||
| 'SYMBOL'
|
||||
| 'STRING'
|
||||
| 'DOUBLE'
|
||||
| 'FLOAT'
|
||||
| 'LONG'
|
||||
| 'INT'
|
||||
| 'BOOLEAN'
|
||||
| 'TIMESTAMP'
|
||||
| 'DATE'
|
||||
| 'BINARY';
|
||||
nullable?: boolean;
|
||||
designated?: boolean; // For designated timestamp column
|
||||
capacity?: number; // For SYMBOL type
|
||||
indexed?: boolean;
|
||||
}
|
||||
|
||||
export interface IndexDefinition {
|
||||
columns: string[];
|
||||
type: 'HASH' | 'BTREE';
|
||||
unique?: boolean;
|
||||
}
|
||||
251
libs/data/questdb/test/integration.test.ts
Normal file
251
libs/data/questdb/test/integration.test.ts
Normal file
|
|
@ -0,0 +1,251 @@
|
|||
/**
|
||||
* QuestDB Client Integration Test
|
||||
*
|
||||
* This test validates that all components work together correctly
|
||||
* without requiring an actual QuestDB instance.
|
||||
*/
|
||||
|
||||
import { afterEach, describe, expect, it } from 'bun:test';
|
||||
import {
|
||||
createQuestDBClient,
|
||||
QuestDBClient,
|
||||
QuestDBHealthMonitor,
|
||||
QuestDBInfluxWriter,
|
||||
QuestDBQueryBuilder,
|
||||
QuestDBSchemaManager,
|
||||
} from '../src';
|
||||
import { questdbTestHelpers } from './setup';
|
||||
|
||||
describe('QuestDB Client Integration', () => {
|
||||
let client: QuestDBClient;
|
||||
beforeEach(() => {
|
||||
client = new QuestDBClient({
|
||||
host: 'localhost',
|
||||
httpPort: 9000,
|
||||
pgPort: 8812,
|
||||
influxPort: 9009,
|
||||
database: 'questdb',
|
||||
user: 'admin',
|
||||
password: 'quest',
|
||||
});
|
||||
});
|
||||
afterEach(async () => {
|
||||
if (client && client.connected) {
|
||||
try {
|
||||
await client.disconnect();
|
||||
} catch {
|
||||
// Ignore cleanup errors in tests
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
describe('Client Initialization', () => {
|
||||
it('should create client with factory function', () => {
|
||||
const factoryClient = createQuestDBClient();
|
||||
expect(factoryClient).toBeInstanceOf(QuestDBClient);
|
||||
});
|
||||
|
||||
it('should initialize all supporting classes', () => {
|
||||
expect(client.getHealthMonitor()).toBeInstanceOf(QuestDBHealthMonitor);
|
||||
expect(client.queryBuilder()).toBeInstanceOf(QuestDBQueryBuilder);
|
||||
expect(client.getInfluxWriter()).toBeInstanceOf(QuestDBInfluxWriter);
|
||||
expect(client.getSchemaManager()).toBeInstanceOf(QuestDBSchemaManager);
|
||||
});
|
||||
|
||||
it('should handle connection configuration', () => {
|
||||
expect(client.getHttpUrl()).toBe('http://localhost:9000');
|
||||
expect(client.getInfluxUrl()).toBe('http://localhost:9009');
|
||||
expect(client.connected).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Query Builder', () => {
|
||||
it('should build query using query builder', () => {
|
||||
const query = client
|
||||
.queryBuilder()
|
||||
.select('symbol', 'close', 'timestamp')
|
||||
.from('ohlcv')
|
||||
.whereSymbol('AAPL')
|
||||
.whereLastHours(24)
|
||||
.orderBy('timestamp', 'DESC')
|
||||
.limit(100)
|
||||
.build();
|
||||
|
||||
expect(query).toContain('SELECT symbol, close, timestamp');
|
||||
expect(query).toContain('FROM ohlcv');
|
||||
expect(query).toContain("symbol = 'AAPL'");
|
||||
expect(query).toContain('ORDER BY timestamp DESC');
|
||||
expect(query).toContain('LIMIT 100');
|
||||
expect(questdbTestHelpers.validateQuestDBQuery(query)).toBe(true);
|
||||
});
|
||||
|
||||
it('should build time-series specific queries', () => {
|
||||
const latestQuery = client
|
||||
.queryBuilder()
|
||||
.select('*')
|
||||
.from('ohlcv')
|
||||
.latestBy('symbol')
|
||||
.build();
|
||||
|
||||
expect(latestQuery).toContain('LATEST BY symbol');
|
||||
expect(questdbTestHelpers.validateQuestDBQuery(latestQuery)).toBe(true);
|
||||
|
||||
const sampleQuery = client
|
||||
.queryBuilder()
|
||||
.select('symbol', 'avg(close)')
|
||||
.from('ohlcv')
|
||||
.sampleBy('1d')
|
||||
.build();
|
||||
|
||||
expect(sampleQuery).toContain('SAMPLE BY 1d');
|
||||
expect(questdbTestHelpers.validateQuestDBQuery(sampleQuery)).toBe(true);
|
||||
});
|
||||
|
||||
it('should build aggregation queries', () => {
|
||||
const query = client
|
||||
.aggregate('ohlcv')
|
||||
.select('symbol', 'avg(close) as avg_price', 'max(high) as max_high')
|
||||
.whereSymbolIn(['AAPL', 'GOOGL'])
|
||||
.groupBy('symbol')
|
||||
.sampleBy('1h')
|
||||
.build();
|
||||
|
||||
expect(query).toContain('SELECT symbol, avg(close) as avg_price, max(high) as max_high');
|
||||
expect(query).toContain('FROM ohlcv');
|
||||
expect(query).toContain("symbol IN ('AAPL', 'GOOGL')");
|
||||
expect(query).toContain('SAMPLE BY 1h');
|
||||
expect(query).toContain('GROUP BY symbol');
|
||||
expect(questdbTestHelpers.validateQuestDBQuery(query)).toBe(true);
|
||||
});
|
||||
});
|
||||
describe('InfluxDB Writer', () => {
|
||||
it('should write OHLCV data using InfluxDB line protocol', async () => {
|
||||
const ohlcvData = [
|
||||
{
|
||||
timestamp: new Date('2024-01-01T12:00:00Z'),
|
||||
open: 150.0,
|
||||
high: 152.0,
|
||||
low: 149.5,
|
||||
close: 151.5,
|
||||
volume: 1000000,
|
||||
},
|
||||
];
|
||||
|
||||
// Mock the actual write operation
|
||||
const writeSpy = spyOn(client.getInfluxWriter(), 'writeOHLCV');
|
||||
writeSpy.mockReturnValue(Promise.resolve());
|
||||
await expect(async () => {
|
||||
await client.writeOHLCV('AAPL', 'NASDAQ', ohlcvData);
|
||||
}).not.toThrow();
|
||||
});
|
||||
|
||||
it('should handle batch operations', () => {
|
||||
const lines = questdbTestHelpers.generateInfluxDBLines(3);
|
||||
expect(lines.length).toBe(3);
|
||||
|
||||
lines.forEach(line => {
|
||||
expect(line).toContain('ohlcv,symbol=TEST');
|
||||
expect(line).toMatch(/\d{19}$/); // Nanosecond timestamp
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Schema Manager', () => {
|
||||
it('should provide schema access', () => {
|
||||
const schema = client.getSchemaManager().getSchema('ohlcv_data');
|
||||
|
||||
expect(schema).toBeDefined();
|
||||
expect(schema?.tableName).toBe('ohlcv_data');
|
||||
|
||||
const symbolColumn = schema?.columns.find(col => col.name === 'symbol');
|
||||
expect(symbolColumn).toBeDefined();
|
||||
expect(symbolColumn?.type).toBe('SYMBOL');
|
||||
|
||||
expect(schema?.partitionBy).toBe('DAY');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Health Monitor', () => {
|
||||
it('should provide health monitoring capabilities', async () => {
|
||||
const healthMonitor = client.getHealthMonitor();
|
||||
expect(healthMonitor).toBeInstanceOf(QuestDBHealthMonitor);
|
||||
|
||||
// Mock health status since we're not connected
|
||||
const mockHealthStatus = {
|
||||
isHealthy: false,
|
||||
lastCheck: new Date(),
|
||||
responseTime: 100,
|
||||
message: 'Connection not established',
|
||||
details: {
|
||||
pgPool: false,
|
||||
httpEndpoint: false,
|
||||
uptime: 0,
|
||||
},
|
||||
};
|
||||
|
||||
const healthSpy = spyOn(healthMonitor, 'getHealthStatus');
|
||||
healthSpy.mockReturnValue(Promise.resolve(mockHealthStatus));
|
||||
|
||||
const health = await healthMonitor.getHealthStatus();
|
||||
expect(health.isHealthy).toBe(false);
|
||||
expect(health.lastCheck).toBeInstanceOf(Date);
|
||||
expect(health.message).toBe('Connection not established');
|
||||
});
|
||||
});
|
||||
describe('Time-Series Operations', () => {
|
||||
it('should support latest by operations', async () => {
|
||||
// Mock the query execution
|
||||
const mockResult = {
|
||||
rows: [{ symbol: 'AAPL', close: 150.0, timestamp: new Date() }],
|
||||
rowCount: 1,
|
||||
executionTime: 10,
|
||||
metadata: { columns: [] },
|
||||
};
|
||||
|
||||
const querySpy = spyOn(client, 'query');
|
||||
querySpy.mockReturnValue(Promise.resolve(mockResult));
|
||||
|
||||
const result = await client.latestBy('ohlcv', ['symbol', 'close'], 'symbol');
|
||||
expect(result.rows.length).toBe(1);
|
||||
expect(result.rows[0].symbol).toBe('AAPL');
|
||||
});
|
||||
|
||||
it('should support sample by operations', async () => {
|
||||
// Mock the query execution
|
||||
const mockResult = {
|
||||
rows: [{ symbol: 'AAPL', avg_close: 150.0, timestamp: new Date() }],
|
||||
rowCount: 1,
|
||||
executionTime: 15,
|
||||
metadata: { columns: [] },
|
||||
};
|
||||
|
||||
const querySpy = spyOn(client, 'query');
|
||||
querySpy.mockReturnValue(Promise.resolve(mockResult));
|
||||
const result = await client.sampleBy(
|
||||
'ohlcv',
|
||||
['symbol', 'avg(close) as avg_close'],
|
||||
'1h',
|
||||
'timestamp',
|
||||
"symbol = 'AAPL'"
|
||||
);
|
||||
|
||||
expect(result.rows.length).toBe(1);
|
||||
expect(result.executionTime).toBe(15);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Connection Management', () => {
|
||||
it('should handle connection configuration', () => {
|
||||
expect(client.getHttpUrl()).toBe('http://localhost:9000');
|
||||
expect(client.getInfluxUrl()).toBe('http://localhost:9009');
|
||||
expect(client.connected).toBe(false);
|
||||
});
|
||||
|
||||
it('should provide configuration access', () => {
|
||||
const config = client.configuration;
|
||||
expect(config.host).toBe('localhost');
|
||||
expect(config.httpPort).toBe(9000);
|
||||
expect(config.user).toBe('admin');
|
||||
});
|
||||
});
|
||||
});
|
||||
280
libs/data/questdb/test/setup.ts
Normal file
280
libs/data/questdb/test/setup.ts
Normal file
|
|
@ -0,0 +1,280 @@
|
|||
/**
|
||||
* QuestDB Client Test Setup
|
||||
*
|
||||
* Setup file specific to QuestDB client library tests.
|
||||
* Provides utilities and mocks for testing database operations.
|
||||
*/
|
||||
|
||||
import { beforeAll, beforeEach, mock } from 'bun:test';
|
||||
import { newDb } from 'pg-mem';
|
||||
|
||||
// Mock PostgreSQL database for unit tests
|
||||
let pgMem: any;
|
||||
|
||||
beforeAll(() => {
|
||||
// Create in-memory PostgreSQL database
|
||||
pgMem = newDb();
|
||||
|
||||
// Register QuestDB-specific functions
|
||||
pgMem.public.registerFunction({
|
||||
name: 'now',
|
||||
implementation: () => new Date().toISOString(),
|
||||
});
|
||||
|
||||
pgMem.public.registerFunction({
|
||||
name: 'dateadd',
|
||||
args: [{ type: 'text' }, { type: 'int' }, { type: 'timestamp' }],
|
||||
returns: 'timestamp',
|
||||
implementation: (unit: string, amount: number, date: Date) => {
|
||||
const result = new Date(date);
|
||||
switch (unit) {
|
||||
case 'd':
|
||||
case 'day':
|
||||
result.setDate(result.getDate() + amount);
|
||||
break;
|
||||
case 'h':
|
||||
case 'hour':
|
||||
result.setHours(result.getHours() + amount);
|
||||
break;
|
||||
case 'm':
|
||||
case 'minute':
|
||||
result.setMinutes(result.getMinutes() + amount);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unsupported date unit: ${unit}`);
|
||||
}
|
||||
return result;
|
||||
},
|
||||
}); // Mock QuestDB HTTP client
|
||||
// Mock fetch using Bun's built-in mock
|
||||
(global as any).fetch = mock(() => {});
|
||||
|
||||
// Mock the logger module to avoid Pino configuration conflicts
|
||||
mock.module('@stock-bot/logger', () => ({
|
||||
Logger: mock(() => ({
|
||||
info: mock(() => {}),
|
||||
warn: mock(() => {}),
|
||||
error: mock(() => {}),
|
||||
debug: mock(() => {}),
|
||||
fatal: mock(() => {}),
|
||||
trace: mock(() => {}),
|
||||
child: mock(() => ({
|
||||
info: mock(() => {}),
|
||||
warn: mock(() => {}),
|
||||
error: mock(() => {}),
|
||||
debug: mock(() => {}),
|
||||
fatal: mock(() => {}),
|
||||
trace: mock(() => {}),
|
||||
})),
|
||||
})),
|
||||
getLogger: mock(() => ({
|
||||
info: mock(() => {}),
|
||||
warn: mock(() => {}),
|
||||
error: mock(() => {}),
|
||||
debug: mock(() => {}),
|
||||
fatal: mock(() => {}),
|
||||
trace: mock(() => {}),
|
||||
child: mock(() => ({
|
||||
info: mock(() => {}),
|
||||
warn: mock(() => {}),
|
||||
error: mock(() => {}),
|
||||
debug: mock(() => {}),
|
||||
fatal: mock(() => {}),
|
||||
trace: mock(() => {}),
|
||||
})),
|
||||
})),
|
||||
}));
|
||||
|
||||
// Mock Pino and its transports to avoid configuration conflicts
|
||||
mock.module('pino', () => ({
|
||||
default: mock(() => ({
|
||||
info: mock(() => {}),
|
||||
warn: mock(() => {}),
|
||||
error: mock(() => {}),
|
||||
debug: mock(() => {}),
|
||||
fatal: mock(() => {}),
|
||||
trace: mock(() => {}),
|
||||
child: mock(() => ({
|
||||
info: mock(() => {}),
|
||||
warn: mock(() => {}),
|
||||
error: mock(() => {}),
|
||||
debug: mock(() => {}),
|
||||
fatal: mock(() => {}),
|
||||
trace: mock(() => {}),
|
||||
})),
|
||||
})),
|
||||
}));
|
||||
|
||||
mock.module('pino-pretty', () => ({
|
||||
default: mock(() => ({})),
|
||||
}));
|
||||
|
||||
mock.module('pino-loki', () => ({
|
||||
default: mock(() => ({})),
|
||||
}));
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset database state
|
||||
if (pgMem) {
|
||||
try {
|
||||
pgMem.public.none('DROP TABLE IF EXISTS ohlcv CASCADE');
|
||||
pgMem.public.none('DROP TABLE IF EXISTS trades CASCADE');
|
||||
pgMem.public.none('DROP TABLE IF EXISTS quotes CASCADE');
|
||||
pgMem.public.none('DROP TABLE IF EXISTS indicators CASCADE');
|
||||
pgMem.public.none('DROP TABLE IF EXISTS performance CASCADE');
|
||||
pgMem.public.none('DROP TABLE IF EXISTS risk_metrics CASCADE');
|
||||
} catch {
|
||||
// Tables might not exist, ignore errors
|
||||
}
|
||||
} // Reset fetch mock
|
||||
if ((global as any).fetch) {
|
||||
((global as any).fetch as any).mockClear?.();
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* QuestDB-specific test utilities
|
||||
*/
|
||||
export const questdbTestHelpers = {
|
||||
/**
|
||||
* Get mock PostgreSQL adapter
|
||||
*/
|
||||
getMockPgAdapter: () => pgMem?.adapters?.createPg?.(),
|
||||
|
||||
/**
|
||||
* Execute SQL in mock database
|
||||
*/
|
||||
executeMockSQL: (sql: string, params?: any[]) => {
|
||||
return pgMem?.public?.query(sql, params);
|
||||
},
|
||||
/**
|
||||
* Mock successful QuestDB HTTP response
|
||||
*/ mockQuestDBHttpSuccess: (data: any) => {
|
||||
((global as any).fetch as any).mockResolvedValue?.({
|
||||
ok: true,
|
||||
status: 200,
|
||||
json: async () => data,
|
||||
text: async () => JSON.stringify(data),
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Mock QuestDB HTTP error
|
||||
*/ mockQuestDBHttpError: (status: number, message: string) => {
|
||||
((global as any).fetch as any).mockResolvedValue?.({
|
||||
ok: false,
|
||||
status,
|
||||
json: async () => ({ error: message }),
|
||||
text: async () => message,
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Mock InfluxDB line protocol response
|
||||
*/ mockInfluxDBSuccess: () => {
|
||||
((global as any).fetch as any).mockResolvedValue?.({
|
||||
ok: true,
|
||||
status: 204,
|
||||
text: async () => '',
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Create test OHLCV table
|
||||
*/
|
||||
createTestOHLCVTable: () => {
|
||||
const sql = `
|
||||
CREATE TABLE ohlcv (
|
||||
symbol VARCHAR(10),
|
||||
timestamp TIMESTAMP,
|
||||
open DECIMAL(10,2),
|
||||
high DECIMAL(10,2),
|
||||
low DECIMAL(10,2),
|
||||
close DECIMAL(10,2),
|
||||
volume BIGINT,
|
||||
source VARCHAR(50)
|
||||
)
|
||||
`;
|
||||
return pgMem?.public?.none(sql);
|
||||
},
|
||||
|
||||
/**
|
||||
* Insert test OHLCV data
|
||||
*/
|
||||
insertTestOHLCVData: (data: any[]) => {
|
||||
const sql = `
|
||||
INSERT INTO ohlcv (symbol, timestamp, open, high, low, close, volume, source)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
`;
|
||||
|
||||
return Promise.all(
|
||||
data.map(row =>
|
||||
pgMem?.public?.none(sql, [
|
||||
row.symbol,
|
||||
row.timestamp,
|
||||
row.open,
|
||||
row.high,
|
||||
row.low,
|
||||
row.close,
|
||||
row.volume,
|
||||
row.source || 'test',
|
||||
])
|
||||
)
|
||||
);
|
||||
},
|
||||
|
||||
/**
|
||||
* Generate InfluxDB line protocol test data
|
||||
*/
|
||||
generateInfluxDBLines: (count: number = 5) => {
|
||||
const lines: string[] = [];
|
||||
const baseTime = Date.now() * 1000000; // Convert to nanoseconds
|
||||
|
||||
for (let i = 0; i < count; i++) {
|
||||
const time = baseTime + i * 60000000000; // 1 minute intervals
|
||||
const price = 150 + Math.random() * 10;
|
||||
|
||||
lines.push(
|
||||
`ohlcv,symbol=TEST open=${price},high=${price + 1},low=${price - 1},close=${price + 0.5},volume=1000i ${time}`
|
||||
);
|
||||
}
|
||||
|
||||
return lines;
|
||||
},
|
||||
|
||||
/**
|
||||
* Validate QuestDB query syntax
|
||||
*/
|
||||
validateQuestDBQuery: (query: string): boolean => {
|
||||
// Basic validation for QuestDB-specific syntax
|
||||
const _questdbKeywords = ['SAMPLE BY', 'LATEST BY', 'ASOF JOIN', 'SPLICE JOIN', 'LT JOIN'];
|
||||
|
||||
// Check for valid SQL structure
|
||||
const hasSelect = /SELECT\s+/i.test(query);
|
||||
const hasFrom = /FROM\s+/i.test(query);
|
||||
|
||||
return hasSelect && hasFrom;
|
||||
},
|
||||
|
||||
/**
|
||||
* Mock connection pool
|
||||
*/ createMockPool: () => {
|
||||
const mockQuery = () => Promise.resolve({ rows: [], rowCount: 0 });
|
||||
const mockRelease = () => {};
|
||||
const mockConnect = () =>
|
||||
Promise.resolve({
|
||||
query: mockQuery,
|
||||
release: mockRelease,
|
||||
});
|
||||
const mockEnd = () => Promise.resolve(undefined);
|
||||
|
||||
return {
|
||||
connect: mockConnect,
|
||||
end: mockEnd,
|
||||
totalCount: 0,
|
||||
idleCount: 0,
|
||||
waitingCount: 0,
|
||||
};
|
||||
},
|
||||
};
|
||||
13
libs/data/questdb/tsconfig.json
Normal file
13
libs/data/questdb/tsconfig.json
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"composite": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"references": [
|
||||
{ "path": "../logger" },
|
||||
{ "path": "../types" }
|
||||
]
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue