no idea- added loki and other stuff to market-data-gateway, also added config lib

This commit is contained in:
Bojan Kucera 2025-06-03 11:37:58 -04:00
parent b957fb99aa
commit 1b71fc87ab
72 changed files with 6178 additions and 153 deletions

View file

@ -29,6 +29,15 @@ MAX_DAILY_LOSS=1000
# Logging
LOG_LEVEL=debug
LOG_CONSOLE=true
LOKI_HOST=localhost
LOKI_PORT=3100
LOKI_USERNAME=
LOKI_PASSWORD=
LOKI_RETENTION_DAYS=30
LOKI_LABELS=environment=development,service=stock-bot
LOKI_BATCH_SIZE=100
LOKI_FLUSH_INTERVAL_MS=5000
# Feature Flags
ENABLE_ML_SIGNALS=false

View file

@ -16,7 +16,6 @@
"hono": "^4.6.3",
"@hono/node-server": "^1.8.0",
"ws": "^8.18.0",
"axios": "^1.6.0",
"bull": "^4.12.0",
"ioredis": "^5.4.1",
"zod": "^3.22.0",
@ -29,6 +28,7 @@
"fast-json-stringify": "^5.10.0",
"pino": "^8.17.0",
"dotenv": "^16.3.0",
"@stock-bot/http-client": "*",
"@stock-bot/config": "*",
"@stock-bot/shared-types": "*",
"@stock-bot/event-bus": "*",

View file

@ -0,0 +1,114 @@
// Data Provider Configuration
export interface DataProviderConfig {
name: string;
type: 'rest' | 'websocket' | 'both';
enabled: boolean;
endpoints: {
quotes?: string;
candles?: string;
trades?: string;
websocket?: string;
};
authentication?: {
type: 'api_key' | 'bearer' | 'basic';
key?: string;
secret?: string;
token?: string;
};
rateLimits: {
requestsPerSecond: number;
requestsPerMinute: number;
requestsPerHour: number;
};
retryPolicy: {
maxRetries: number;
backoffMultiplier: number;
initialDelayMs: number;
};
timeout: number;
priority: number; // 1-10, higher is better
}
export const dataProviderConfigs: Record<string, DataProviderConfig> = {
'alpha-vantage': {
name: 'Alpha Vantage',
type: 'rest',
enabled: true,
endpoints: {
quotes: 'https://www.alphavantage.co/query',
candles: 'https://www.alphavantage.co/query',
},
authentication: {
type: 'api_key',
key: process.env.ALPHA_VANTAGE_API_KEY,
},
rateLimits: {
requestsPerSecond: 5,
requestsPerMinute: 500,
requestsPerHour: 25000,
},
retryPolicy: {
maxRetries: 3,
backoffMultiplier: 2,
initialDelayMs: 1000,
},
timeout: 10000,
priority: 7,
},
'yahoo-finance': {
name: 'Yahoo Finance',
type: 'rest',
enabled: true,
endpoints: {
quotes: 'https://query1.finance.yahoo.com/v8/finance/chart',
candles: 'https://query1.finance.yahoo.com/v8/finance/chart',
},
rateLimits: {
requestsPerSecond: 10,
requestsPerMinute: 2000,
requestsPerHour: 100000,
},
retryPolicy: {
maxRetries: 3,
backoffMultiplier: 1.5,
initialDelayMs: 500,
},
timeout: 8000,
priority: 8,
},
'polygon': {
name: 'Polygon.io',
type: 'both',
enabled: false,
endpoints: {
quotes: 'https://api.polygon.io/v2/last/nbbo',
candles: 'https://api.polygon.io/v2/aggs/ticker',
trades: 'https://api.polygon.io/v3/trades',
websocket: 'wss://socket.polygon.io/stocks',
},
authentication: {
type: 'api_key',
key: process.env.POLYGON_API_KEY,
},
rateLimits: {
requestsPerSecond: 100,
requestsPerMinute: 5000,
requestsPerHour: 100000,
},
retryPolicy: {
maxRetries: 5,
backoffMultiplier: 2,
initialDelayMs: 200,
},
timeout: 5000,
priority: 9,
},
};
export function getEnabledProviders(): DataProviderConfig[] {
return Object.values(dataProviderConfigs).filter(config => config.enabled);
}
export function getProviderByPriority(): DataProviderConfig[] {
return getEnabledProviders().sort((a, b) => b.priority - a.priority);
}

View file

@ -1,4 +1,4 @@
// Market Data Gateway - Unified Implementation
// Market Data Gateway - Enhanced Implementation
import { Hono } from 'hono';
import { cors } from 'hono/cors';
import { logger } from 'hono/logger';
@ -8,6 +8,12 @@ import { WebSocketServer } from 'ws';
// Types
import { GatewayConfig } from './types/MarketDataGateway';
// Services
import { DataNormalizer, DataNormalizationResult } from './services/DataNormalizer';
import { MarketDataCache } from './services/AdvancedCache';
import { ConnectionPoolManager } from './services/ConnectionPoolManager';
import { dataProviderConfigs, getEnabledProviders } from './config/DataProviderConfig';
// Simple logger interface
interface Logger {
info: (message: string, ...args: any[]) => void;
@ -18,12 +24,26 @@ interface Logger {
// Create application logger
const appLogger: Logger = {
info: (message: string, ...args: any[]) => console.log(`[MDG-UNIFIED] [INFO] ${message}`, ...args),
error: (message: string, ...args: any[]) => console.error(`[MDG-UNIFIED] [ERROR] ${message}`, ...args),
warn: (message: string, ...args: any[]) => console.warn(`[MDG-UNIFIED] [WARN] ${message}`, ...args),
debug: (message: string, ...args: any[]) => console.debug(`[MDG-UNIFIED] [DEBUG] ${message}`, ...args),
info: (message: string, ...args: any[]) => console.log(`[MDG-ENHANCED] [INFO] ${message}`, ...args),
error: (message: string, ...args: any[]) => console.error(`[MDG-ENHANCED] [ERROR] ${message}`, ...args),
warn: (message: string, ...args: any[]) => console.warn(`[MDG-ENHANCED] [WARN] ${message}`, ...args),
debug: (message: string, ...args: any[]) => console.debug(`[MDG-ENHANCED] [DEBUG] ${message}`, ...args),
};
// Initialize services
const dataNormalizer = new DataNormalizer();
const marketDataCache = new MarketDataCache();
const connectionPool = new ConnectionPoolManager({
maxConnections: 100,
maxConnectionsPerHost: 20,
connectionTimeout: 10000,
requestTimeout: 30000,
retryAttempts: 3,
retryDelay: 1000,
keepAlive: true,
maxIdleTime: 300000, // 5 minutes
});
// Configuration matching the GatewayConfig interface
const config: GatewayConfig = {
server: {
@ -36,7 +56,46 @@ const config: GatewayConfig = {
headers: ['Content-Type', 'Authorization'],
},
},
dataSources: [], // Array of DataSourceConfig, initially empty
dataSources: getEnabledProviders().map(provider => ({
id: provider.name.toLowerCase().replace(/\s+/g, '-'),
name: provider.name,
type: provider.type === 'both' ? 'websocket' : provider.type as any,
enabled: provider.enabled,
priority: provider.priority,
rateLimit: {
requestsPerSecond: provider.rateLimits.requestsPerSecond,
burstLimit: provider.rateLimits.requestsPerMinute,
},
connection: {
url: provider.endpoints.quotes || provider.endpoints.websocket || '',
authentication: provider.authentication ? {
type: provider.authentication.type === 'api_key' ? 'apikey' as const : 'basic' as const,
credentials: {
apiKey: provider.authentication.key || '',
secret: provider.authentication.secret || '',
token: provider.authentication.token || '',
},
} : undefined,
},
subscriptions: {
quotes: true,
trades: true,
orderbook: provider.endpoints.websocket ? true : false,
candles: true,
news: false,
},
symbols: ['AAPL', 'GOOGL', 'MSFT', 'AMZN', 'TSLA'], // Default symbols
retryPolicy: {
maxRetries: provider.retryPolicy.maxRetries,
backoffMultiplier: provider.retryPolicy.backoffMultiplier,
maxBackoffMs: provider.retryPolicy.initialDelayMs * 10,
},
healthCheck: {
intervalMs: 30000,
timeoutMs: provider.timeout,
expectedLatencyMs: 1000,
},
})),
processing: {
pipelines: [],
bufferSize: 10000,
@ -207,32 +266,134 @@ app.post('/api/v1/subscriptions', async (c) => {
}
});
// Market data endpoints
// Market data endpoints with enhanced functionality
app.get('/api/v1/data/tick/:symbol', async (c) => {
const symbol = c.req.param('symbol');
return c.json({
...mockTickData,
symbol: symbol.toUpperCase(),
});
const symbol = c.req.param('symbol').toUpperCase();
const source = c.req.query('source') || 'yahoo-finance';
try {
// Check cache first
const cacheKey = marketDataCache.getQuoteKey(symbol);
const cachedData = marketDataCache.get(cacheKey);
if (cachedData) {
appLogger.debug(`Cache hit for ${symbol}`);
return c.json({
...cachedData,
cached: true,
timestamp: new Date().toISOString(),
});
}
// Fetch from provider
const provider = dataProviderConfigs[source];
if (!provider || !provider.enabled) {
return c.json({ error: 'Data source not available' }, 400);
}
// Mock data for now (replace with actual API calls)
const mockData = {
symbol,
price: 150.25 + (Math.random() - 0.5) * 10,
volume: Math.floor(Math.random() * 100000),
timestamp: new Date().toISOString(),
bid: 150.20,
ask: 150.30,
source,
};
// Normalize the data
const normalizedResult = dataNormalizer.normalizeMarketData(mockData, source);
if (!normalizedResult.success) {
return c.json({ error: normalizedResult.error }, 500);
}
// Cache the result
marketDataCache.setQuote(symbol, normalizedResult.data);
return c.json({
...normalizedResult.data,
cached: false,
processingTimeMs: normalizedResult.processingTimeMs,
});
} catch (error) {
appLogger.error(`Error fetching tick data for ${symbol}:`, error);
return c.json({ error: 'Internal server error' }, 500);
}
});
app.get('/api/v1/data/candles/:symbol', async (c) => {
const symbol = c.req.param('symbol');
const symbol = c.req.param('symbol').toUpperCase();
const timeframe = c.req.query('timeframe') || '1m';
const limit = parseInt(c.req.query('limit') || '100');
const limit = Math.min(parseInt(c.req.query('limit') || '100'), 1000); // Max 1000
const source = c.req.query('source') || 'yahoo-finance';
const candles = Array.from({ length: limit }, (_, i) => ({
...mockCandleData,
symbol: symbol.toUpperCase(),
timeframe,
timestamp: new Date(Date.now() - i * 60000).toISOString(),
}));
return c.json({ candles });
try {
// Generate cache key
const cacheKey = `candles:${symbol}:${timeframe}:${limit}`;
const cachedData = marketDataCache.get(cacheKey);
if (cachedData) {
appLogger.debug(`Cache hit for candles ${symbol}:${timeframe}`);
return c.json({
candles: cachedData,
cached: true,
count: cachedData.length,
});
}
// Mock candle data generation (replace with actual API calls)
const candles = Array.from({ length: limit }, (_, i) => {
const timestamp = new Date(Date.now() - i * 60000);
const basePrice = 150 + (Math.random() - 0.5) * 20;
const variation = (Math.random() - 0.5) * 2;
return {
symbol,
timeframe,
timestamp: timestamp.toISOString(),
open: basePrice + variation,
high: basePrice + variation + Math.random() * 2,
low: basePrice + variation - Math.random() * 2,
close: basePrice + variation + (Math.random() - 0.5),
volume: Math.floor(Math.random() * 10000),
source,
};
}).reverse(); // Oldest first
// Normalize OHLCV data
const normalizedResult = dataNormalizer.normalizeOHLCV(
{ candles: candles.map(c => ({ ...c, timestamp: new Date(c.timestamp) })) },
source
);
if (!normalizedResult.success) {
return c.json({ error: normalizedResult.error }, 500);
}
// Cache the result
marketDataCache.set(cacheKey, normalizedResult.data, marketDataCache['getCandleTTL'](timeframe));
return c.json({
candles: normalizedResult.data,
cached: false,
count: normalizedResult.data?.length || 0,
processingTimeMs: normalizedResult.processingTimeMs,
});
} catch (error) {
appLogger.error(`Error fetching candles for ${symbol}:`, error);
return c.json({ error: 'Internal server error' }, 500);
}
});
// Metrics endpoints
// Enhanced metrics endpoints
app.get('/api/v1/metrics', async (c) => {
const cacheStats = marketDataCache.getStats();
const connectionStats = connectionPool.getStats();
return c.json({
system: {
uptime: process.uptime(),
@ -241,9 +402,91 @@ app.get('/api/v1/metrics', async (c) => {
},
gateway: {
activeConnections: webSocketServer ? webSocketServer.clients.size : 0,
dataSourcesCount: config.dataSources.length,
dataSourcesCount: config.dataSources.filter(ds => ds.enabled).length,
messagesProcessed: 0,
},
cache: cacheStats,
connectionPool: connectionStats,
timestamp: new Date().toISOString(),
});
});
// Data quality assessment endpoint
app.get('/api/v1/data/quality/:symbol', async (c) => {
const symbol = c.req.param('symbol').toUpperCase();
const source = c.req.query('source') || 'yahoo-finance';
try {
// Get recent data for quality assessment (mock for now)
const recentData = Array.from({ length: 10 }, (_, i) => ({
symbol,
price: 150 + (Math.random() - 0.5) * 10,
bid: 149.5,
ask: 150.5,
volume: Math.floor(Math.random() * 10000),
timestamp: new Date(Date.now() - i * 60000),
}));
const qualityMetrics = dataNormalizer.assessDataQuality(recentData, source);
return c.json({
symbol,
source,
dataPoints: recentData.length,
qualityMetrics,
timestamp: new Date().toISOString(),
});
} catch (error) {
appLogger.error(`Error assessing data quality for ${symbol}:`, error);
return c.json({ error: 'Internal server error' }, 500);
}
});
// Cache management endpoints
app.get('/api/v1/cache/stats', async (c) => {
return c.json({
stats: marketDataCache.getStats(),
keys: marketDataCache.keys().slice(0, 100), // Limit to first 100 keys
timestamp: new Date().toISOString(),
});
});
app.delete('/api/v1/cache/clear', async (c) => {
marketDataCache.clear();
return c.json({
message: 'Cache cleared successfully',
timestamp: new Date().toISOString(),
});
});
app.delete('/api/v1/cache/key/:key', async (c) => {
const key = c.req.param('key');
const deleted = marketDataCache.delete(key);
return c.json({
message: deleted ? 'Key deleted successfully' : 'Key not found',
key,
deleted,
timestamp: new Date().toISOString(),
});
});
// Data providers status endpoint
app.get('/api/v1/providers', async (c) => {
const providers = Object.values(dataProviderConfigs).map(provider => ({
name: provider.name,
enabled: provider.enabled,
type: provider.type,
priority: provider.priority,
rateLimits: provider.rateLimits,
endpoints: Object.keys(provider.endpoints),
}));
return c.json({
providers,
enabled: providers.filter(p => p.enabled).length,
total: providers.length,
timestamp: new Date().toISOString(),
});
});
@ -332,7 +575,7 @@ function setupWebSocketServer(): void {
appLogger.info(`WebSocket server listening on port ${wsPort}`);
}
// Graceful shutdown handler
// Enhanced graceful shutdown handler
async function gracefulShutdown(): Promise<void> {
if (isShuttingDown) return;
isShuttingDown = true;
@ -349,6 +592,14 @@ async function gracefulShutdown(): Promise<void> {
appLogger.info('WebSocket server closed');
}
// Close connection pool
await connectionPool.close();
appLogger.info('Connection pool closed');
// Clean up cache
marketDataCache.destroy();
appLogger.info('Cache destroyed');
appLogger.info('Graceful shutdown completed');
process.exit(0);
} catch (error) {
@ -357,10 +608,19 @@ async function gracefulShutdown(): Promise<void> {
}
}
// Start server function
// Enhanced start server function
async function startServer(): Promise<void> {
try {
appLogger.info('Starting Market Data Gateway...');
appLogger.info('Starting Enhanced Market Data Gateway...');
// Initialize cache event listeners
marketDataCache.on('hit', (key) => appLogger.debug(`Cache hit: ${key}`));
marketDataCache.on('miss', (key) => appLogger.debug(`Cache miss: ${key}`));
marketDataCache.on('evict', (key) => appLogger.debug(`Cache evict: ${key}`));
// Initialize connection pool event listeners
connectionPool.on('connectionCreated', (host) => appLogger.debug(`Connection created for: ${host}`));
connectionPool.on('error', ({ host, error }) => appLogger.warn(`Connection error for ${host}: ${error}`));
// Setup WebSocket server
setupWebSocketServer();
@ -369,9 +629,14 @@ async function startServer(): Promise<void> {
process.on('SIGTERM', gracefulShutdown);
process.on('SIGINT', gracefulShutdown);
// Log service status
appLogger.info(`HTTP server starting on ${config.server.host}:${config.server.port}`);
appLogger.info(`WebSocket server running on port ${config.server.port + 1}`);
appLogger.info('Market Data Gateway started successfully');
appLogger.info(`Data sources configured: ${config.dataSources.length}`);
appLogger.info(`Enabled providers: ${config.dataSources.filter(ds => ds.enabled).length}`);
appLogger.info(`Cache max size: ${marketDataCache['config'].maxSize}`);
appLogger.info(`Connection pool max connections: ${connectionPool['config'].maxConnections}`);
appLogger.info('Enhanced Market Data Gateway started successfully');
} catch (error) {
appLogger.error('Failed to start server:', error);

View file

@ -0,0 +1,361 @@
import { EventEmitter } from 'events';
export interface CacheEntry<T> {
data: T;
timestamp: number;
ttl: number;
hits: number;
lastAccessed: number;
}
export interface CacheStats {
totalEntries: number;
memoryUsage: number;
hitRate: number;
totalHits: number;
totalMisses: number;
averageAccessTime: number;
}
export interface CacheConfig {
maxSize: number;
defaultTtl: number;
cleanupInterval: number;
enableStats: boolean;
compressionEnabled: boolean;
}
export class AdvancedCache<T = any> extends EventEmitter {
private cache = new Map<string, CacheEntry<T>>();
private stats = {
hits: 0,
misses: 0,
totalAccessTime: 0,
accessCount: 0,
};
private cleanupTimer: NodeJS.Timeout | null = null;
constructor(private config: CacheConfig) {
super();
this.startCleanupTimer();
}
/**
* Get value from cache
*/
get(key: string): T | null {
const startTime = Date.now();
const entry = this.cache.get(key);
if (!entry) {
this.stats.misses++;
this.emit('miss', key);
return null;
}
// Check if entry has expired
if (Date.now() > entry.timestamp + entry.ttl) {
this.cache.delete(key);
this.stats.misses++;
this.emit('expired', key, entry);
return null;
}
// Update access statistics
entry.hits++;
entry.lastAccessed = Date.now();
this.stats.hits++;
if (this.config.enableStats) {
this.stats.totalAccessTime += Date.now() - startTime;
this.stats.accessCount++;
}
this.emit('hit', key, entry);
return entry.data;
}
/**
* Set value in cache
*/
set(key: string, value: T, ttl?: number): void {
const effectiveTtl = ttl || this.config.defaultTtl;
// Check cache size limits
if (this.cache.size >= this.config.maxSize && !this.cache.has(key)) {
this.evictLeastUsed();
}
const entry: CacheEntry<T> = {
data: value,
timestamp: Date.now(),
ttl: effectiveTtl,
hits: 0,
lastAccessed: Date.now(),
};
this.cache.set(key, entry);
this.emit('set', key, entry);
}
/**
* Delete value from cache
*/
delete(key: string): boolean {
const deleted = this.cache.delete(key);
if (deleted) {
this.emit('delete', key);
}
return deleted;
}
/**
* Check if key exists in cache
*/
has(key: string): boolean {
const entry = this.cache.get(key);
if (!entry) return false;
// Check if expired
if (Date.now() > entry.timestamp + entry.ttl) {
this.cache.delete(key);
return false;
}
return true;
}
/**
* Clear all cache entries
*/
clear(): void {
this.cache.clear();
this.resetStats();
this.emit('clear');
}
/**
* Get cache statistics
*/
getStats(): CacheStats {
const memoryUsage = this.estimateMemoryUsage();
const hitRate = this.stats.hits + this.stats.misses > 0
? this.stats.hits / (this.stats.hits + this.stats.misses)
: 0;
const averageAccessTime = this.stats.accessCount > 0
? this.stats.totalAccessTime / this.stats.accessCount
: 0;
return {
totalEntries: this.cache.size,
memoryUsage,
hitRate,
totalHits: this.stats.hits,
totalMisses: this.stats.misses,
averageAccessTime,
};
}
/**
* Get all cache keys
*/
keys(): string[] {
return Array.from(this.cache.keys());
}
/**
* Get cache size
*/
size(): number {
return this.cache.size;
}
/**
* Get or set with async loader function
*/
async getOrSet<K>(
key: string,
loader: () => Promise<K>,
ttl?: number
): Promise<K> {
const cached = this.get(key) as K;
if (cached !== null) {
return cached;
}
try {
const value = await loader();
this.set(key, value as any, ttl);
return value;
} catch (error) {
this.emit('error', key, error);
throw error;
}
}
/**
* Batch get multiple keys
*/
mget(keys: string[]): Map<string, T | null> {
const result = new Map<string, T | null>();
for (const key of keys) {
result.set(key, this.get(key));
}
return result;
}
/**
* Batch set multiple key-value pairs
*/
mset(entries: Map<string, T>, ttl?: number): void {
for (const [key, value] of entries) {
this.set(key, value, ttl);
}
}
/**
* Clean up expired entries
*/
cleanup(): number {
const now = Date.now();
let removedCount = 0;
for (const [key, entry] of this.cache.entries()) {
if (now > entry.timestamp + entry.ttl) {
this.cache.delete(key);
removedCount++;
this.emit('expired', key, entry);
}
}
return removedCount;
}
/**
* Evict least recently used entries
*/
private evictLeastUsed(): void {
let oldestKey: string | null = null;
let oldestTime = Date.now();
for (const [key, entry] of this.cache.entries()) {
if (entry.lastAccessed < oldestTime) {
oldestTime = entry.lastAccessed;
oldestKey = key;
}
}
if (oldestKey) {
this.cache.delete(oldestKey);
this.emit('evict', oldestKey);
}
}
/**
* Estimate memory usage in bytes
*/
private estimateMemoryUsage(): number {
let totalSize = 0;
for (const [key, entry] of this.cache.entries()) {
// Rough estimation: key size + data size (as JSON string)
totalSize += key.length * 2; // UTF-16 encoding
totalSize += JSON.stringify(entry.data).length * 2;
totalSize += 64; // Overhead for entry metadata
}
return totalSize;
}
/**
* Reset statistics
*/
private resetStats(): void {
this.stats = {
hits: 0,
misses: 0,
totalAccessTime: 0,
accessCount: 0,
};
}
/**
* Start cleanup timer
*/
private startCleanupTimer(): void {
if (this.cleanupTimer) {
clearInterval(this.cleanupTimer);
}
this.cleanupTimer = setInterval(() => {
const removed = this.cleanup();
if (removed > 0) {
this.emit('cleanup', removed);
}
}, this.config.cleanupInterval);
}
/**
* Stop cleanup timer and close cache
*/
destroy(): void {
if (this.cleanupTimer) {
clearInterval(this.cleanupTimer);
this.cleanupTimer = null;
}
this.clear();
this.removeAllListeners();
}
}
// Specialized cache for market data
export class MarketDataCache extends AdvancedCache {
constructor() {
super({
maxSize: 10000,
defaultTtl: 60000, // 1 minute
cleanupInterval: 30000, // 30 seconds
enableStats: true,
compressionEnabled: false,
});
}
// Market data specific cache keys
getQuoteKey(symbol: string): string {
return `quote:${symbol}`;
}
getCandleKey(symbol: string, timeframe: string, timestamp: Date): string {
return `candle:${symbol}:${timeframe}:${timestamp.getTime()}`;
}
getOrderBookKey(symbol: string): string {
return `orderbook:${symbol}`;
}
// Market data specific TTLs
setQuote(symbol: string, data: any): void {
this.set(this.getQuoteKey(symbol), data, 60000); // 1 minute
}
setCandle(symbol: string, timeframe: string, timestamp: Date, data: any): void {
const ttl = this.getCandleTTL(timeframe);
this.set(this.getCandleKey(symbol, timeframe, timestamp), data, ttl);
}
setOrderBook(symbol: string, data: any): void {
this.set(this.getOrderBookKey(symbol), data, 30000); // 30 seconds
}
private getCandleTTL(timeframe: string): number {
const ttlMap: Record<string, number> = {
'1m': 60000, // 1 minute
'5m': 300000, // 5 minutes
'15m': 900000, // 15 minutes
'1h': 3600000, // 1 hour
'1d': 86400000, // 24 hours
};
return ttlMap[timeframe] || 300000; // Default 5 minutes
}
}

View file

@ -0,0 +1,346 @@
import { EventEmitter } from 'eventemitter3';
import {
BunHttpClient,
RequestConfig,
HttpResponse,
ConnectionStats,
HttpClientConfig
} from '@stock-bot/http-client';
export interface ConnectionPoolConfig {
maxConnections: number;
maxConnectionsPerHost: number;
connectionTimeout: number;
requestTimeout: number;
retryAttempts: number;
retryDelay: number;
keepAlive: boolean;
maxIdleTime: number;
}
export interface QueuedRequest {
id: string;
config: RequestConfig;
resolve: (value: any) => void;
reject: (error: any) => void;
timestamp: number;
retryCount: number;
}
export class ConnectionPoolManager extends EventEmitter {
private clients = new Map<string, BunHttpClient>();
private activeRequests = new Map<string, number>(); // host -> count
private requestQueue: QueuedRequest[] = [];
private stats = {
totalConnections: 0,
successfulRequests: 0,
failedRequests: 0,
totalResponseTime: 0,
requestCount: 0,
};
private isProcessingQueue = false;
private queueProcessor?: NodeJS.Timeout;
constructor(private config: ConnectionPoolConfig) {
super();
this.startQueueProcessor();
}
/**
* Get or create a client for a host
*/
private getClient(host: string): BunHttpClient {
if (!this.clients.has(host)) {
const client = new BunHttpClient({
baseURL: `https://${host}`,
timeout: this.config.requestTimeout,
retries: this.config.retryAttempts,
retryDelay: this.config.retryDelay,
keepAlive: this.config.keepAlive,
headers: {
'User-Agent': 'StockBot-MarketDataGateway/1.0',
'Accept': 'application/json',
},
validateStatus: (status: number) => status < 500,
});
// Listen for events from the client
client.on('response', (data) => {
const responseTime = data.response.timing.duration;
this.updateStats(true, responseTime);
this.emit('response', {
host,
responseTime,
status: data.response.status
});
});
client.on('error', (data) => {
const responseTime = data.error?.config?.metadata?.startTime
? Date.now() - data.error.config.metadata.startTime
: 0;
this.updateStats(false, responseTime);
this.emit('error', {
host,
error: data.error.message,
responseTime
});
});
this.clients.set(host, client);
this.activeRequests.set(host, 0);
this.stats.totalConnections++;
this.emit('connectionCreated', host);
}
return this.clients.get(host)!;
}
/**
* Make an HTTP request with connection pooling
*/
async request(config: RequestConfig): Promise<any> {
return new Promise((resolve, reject) => {
const requestId = this.generateRequestId();
const queuedRequest: QueuedRequest = {
id: requestId,
config,
resolve,
reject,
timestamp: Date.now(),
retryCount: 0,
};
this.requestQueue.push(queuedRequest);
this.processQueue();
});
}
/**
* Process the request queue
*/
private async processQueue(): Promise<void> {
if (this.isProcessingQueue || this.requestQueue.length === 0) {
return;
}
this.isProcessingQueue = true;
while (this.requestQueue.length > 0) {
const request = this.requestQueue.shift()!;
try {
const host = this.extractHost(request.config.url || '');
const currentConnections = this.activeRequests.get(host) || 0;
// Check connection limits
if (currentConnections >= this.config.maxConnectionsPerHost) {
// Put request back in queue
this.requestQueue.unshift(request);
break;
}
// Check global connection limit
const totalActive = Array.from(this.activeRequests.values()).reduce((sum, count) => sum + count, 0);
if (totalActive >= this.config.maxConnections) {
this.requestQueue.unshift(request);
break;
}
// Execute the request
this.executeRequest(request, host);
} catch (error) {
request.reject(error);
}
}
this.isProcessingQueue = false;
}
/**
* Execute a single request
*/
private async executeRequest(request: QueuedRequest, host: string): Promise<void> {
const client = this.getClient(host);
// Increment active connections
this.activeRequests.set(host, (this.activeRequests.get(host) || 0) + 1);
try {
// Add metadata to track timing
if (!request.config.metadata) {
request.config.metadata = {};
}
request.config.metadata.startTime = Date.now();
// Execute request using our client
const response = await client.request(request.config);
request.resolve(response.data);
} catch (error: any) {
// No need to handle retries explicitly as the BunHttpClient handles them internally
request.reject(error);
// Emit retry event for monitoring
if (error.retryCount) {
this.emit('retry', {
requestId: request.id,
retryCount: error.retryCount,
error
});
}
} finally {
// Decrement active connections
this.activeRequests.set(host, Math.max(0, (this.activeRequests.get(host) || 0) - 1));
}
}
/**
* Extract host from URL
*/
private extractHost(url: string): string {
try {
const urlObj = new URL(url);
return urlObj.host;
} catch {
return 'default';
}
}
/**
* Generate unique request ID
*/
private generateRequestId(): string {
return `req_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
}
/**
* Update statistics
*/
private updateStats(success: boolean, responseTime: number): void {
this.stats.requestCount++;
this.stats.totalResponseTime += responseTime;
if (success) {
this.stats.successfulRequests++;
} else {
this.stats.failedRequests++;
}
}
/**
* Get connection pool statistics
*/
getStats(): ConnectionStats {
const totalActive = Array.from(this.activeRequests.values()).reduce((sum, count) => sum + count, 0);
const averageResponseTime = this.stats.requestCount > 0
? this.stats.totalResponseTime / this.stats.requestCount
: 0;
const utilization = this.config.maxConnections > 0
? totalActive / this.config.maxConnections
: 0;
// Combine our stats with the stats from all clients
const clientStats = Array.from(this.clients.values()).map(client => client.getStats());
let successfulRequests = this.stats.successfulRequests;
let failedRequests = this.stats.failedRequests;
for (const stats of clientStats) {
successfulRequests += stats.successfulRequests;
failedRequests += stats.failedRequests;
}
return {
activeConnections: totalActive,
totalConnections: this.stats.totalConnections,
successfulRequests,
failedRequests,
averageResponseTime,
connectionPoolUtilization: utilization,
requestsPerSecond: 0 // Will be calculated by the http-client
};
}
/**
* Start queue processor timer
*/
private startQueueProcessor(): void {
this.queueProcessor = setInterval(() => {
this.processQueue();
}, 100); // Process queue every 100ms
}
/**
* Close all connections and clean up
*/
async close(): Promise<void> {
// Stop the queue processor
if (this.queueProcessor) {
clearInterval(this.queueProcessor);
}
// Wait for pending requests to complete (with timeout)
const timeout = 30000; // 30 seconds
const startTime = Date.now();
while (this.requestQueue.length > 0 && Date.now() - startTime < timeout) {
await new Promise(resolve => setTimeout(resolve, 100));
}
// Clear remaining requests
while (this.requestQueue.length > 0) {
const request = this.requestQueue.shift()!;
request.reject(new Error('Connection pool closing'));
}
// Close all clients
const closePromises = Array.from(this.clients.values()).map(client => client.close());
await Promise.all(closePromises);
// Clear clients and requests
this.clients.clear();
this.activeRequests.clear();
this.emit('closed');
}
/**
* Health check for the connection pool
*/
async healthCheck(): Promise<{ healthy: boolean; details: any }> {
const stats = this.getStats();
const queueSize = this.requestQueue.length;
// Check health of all clients
const clientHealthChecks = await Promise.all(
Array.from(this.clients.entries()).map(async ([host, client]) => {
const health = await client.healthCheck();
return {
host,
healthy: health.healthy,
details: health.details
};
})
);
const healthy =
stats.connectionPoolUtilization < 0.9 && // Less than 90% utilization
queueSize < 100 && // Queue not too large
stats.averageResponseTime < 5000 && // Average response time under 5 seconds
clientHealthChecks.every(check => check.healthy); // All clients healthy
return {
healthy,
details: {
stats,
queueSize,
clients: clientHealthChecks,
connections: Array.from(this.clients.keys()),
},
};
}
}

View file

@ -1,36 +1,164 @@
import type { MarketData, OHLCV } from '@stock-bot/shared-types';
import { dataProviderConfigs } from '@stock-bot/config';
import { dataProviderConfigs, DataProviderConfig } from '../config/DataProviderConfig';
// Define local types for market data
interface MarketDataType {
symbol: string;
price: number;
bid: number;
ask: number;
volume: number;
timestamp: Date;
}
interface OHLCVType {
symbol: string;
timestamp: Date;
open: number;
high: number;
low: number;
close: number;
volume: number;
}
export interface DataNormalizationResult<T> {
success: boolean;
data?: T;
error?: string;
source: string;
timestamp: Date;
processingTimeMs: number;
}
export interface DataQualityMetrics {
completeness: number; // 0-1
accuracy: number; // 0-1
timeliness: number; // 0-1
consistency: number; // 0-1
overall: number; // 0-1
}
export class DataNormalizer {
private readonly providerConfigs: Record<string, DataProviderConfig>;
constructor() {
this.providerConfigs = dataProviderConfigs;
}
/**
* Normalize market data from different providers to our standard format
*/
normalizeMarketData(rawData: any, source: string): MarketData {
switch (source) {
case 'alpha-vantage':
return this.normalizeAlphaVantage(rawData);
case 'yahoo-finance':
return this.normalizeYahooFinance(rawData);
default:
throw new Error(`Unsupported data source: ${source}`);
}
}
normalizeMarketData(rawData: any, source: string): DataNormalizationResult<MarketDataType> {
const startTime = Date.now();
try {
let normalizedData: MarketDataType;
switch (source.toLowerCase()) {
case 'alpha-vantage':
normalizedData = this.normalizeAlphaVantage(rawData);
break;
case 'yahoo-finance':
normalizedData = this.normalizeYahooFinance(rawData);
break;
case 'polygon':
normalizedData = this.normalizePolygon(rawData);
break;
default:
return {
success: false,
error: `Unsupported data source: ${source}`,
source,
timestamp: new Date(),
processingTimeMs: Date.now() - startTime,
};
}
/**
// Validate the normalized data
if (!this.validateMarketData(normalizedData)) {
return {
success: false,
error: 'Data validation failed',
source,
timestamp: new Date(),
processingTimeMs: Date.now() - startTime,
};
}
return {
success: true,
data: normalizedData,
source,
timestamp: new Date(),
processingTimeMs: Date.now() - startTime,
};
} catch (error) {
return {
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
source,
timestamp: new Date(),
processingTimeMs: Date.now() - startTime,
};
}
} /**
* Normalize OHLCV data from different providers
*/
normalizeOHLCV(rawData: any, source: string): OHLCV[] {
switch (source) {
case 'alpha-vantage':
return this.normalizeAlphaVantageOHLCV(rawData);
case 'yahoo-finance':
return this.normalizeYahooFinanceOHLCV(rawData);
default:
throw new Error(`Unsupported data source: ${source}`);
normalizeOHLCV(rawData: any, source: string): DataNormalizationResult<OHLCVType[]> {
const startTime = Date.now();
try {
let normalizedData: OHLCVType[];
switch (source.toLowerCase()) {
case 'alpha-vantage':
normalizedData = this.normalizeAlphaVantageOHLCV(rawData);
break;
case 'yahoo-finance':
normalizedData = this.normalizeYahooFinanceOHLCV(rawData);
break;
case 'polygon':
normalizedData = this.normalizePolygonOHLCV(rawData);
break;
default:
return {
success: false,
error: `Unsupported data source: ${source}`,
source,
timestamp: new Date(),
processingTimeMs: Date.now() - startTime,
};
}
// Validate each OHLCV entry
const validData = normalizedData.filter(item => this.validateOHLCV(item));
if (validData.length === 0) {
return {
success: false,
error: 'No valid OHLCV data after normalization',
source,
timestamp: new Date(),
processingTimeMs: Date.now() - startTime,
};
}
return {
success: true,
data: validData,
source,
timestamp: new Date(),
processingTimeMs: Date.now() - startTime,
};
} catch (error) {
return {
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
source,
timestamp: new Date(),
processingTimeMs: Date.now() - startTime,
};
}
}
private normalizeAlphaVantage(data: any): MarketData {
private normalizeAlphaVantage(data: any): MarketDataType {
const quote = data['Global Quote'];
return {
symbol: quote['01. symbol'],
@ -41,8 +169,7 @@ export class DataNormalizer {
timestamp: new Date(),
};
}
private normalizeYahooFinance(data: any): MarketData {
private normalizeYahooFinance(data: any): MarketDataType {
return {
symbol: data.symbol,
price: data.regularMarketPrice,
@ -53,7 +180,19 @@ export class DataNormalizer {
};
}
private normalizeAlphaVantageOHLCV(data: any): OHLCV[] {
private normalizePolygon(data: any): MarketDataType {
// Polygon.io format normalization
return {
symbol: data.T || data.symbol,
price: data.c || data.price,
bid: data.b || data.bid,
ask: data.a || data.ask,
volume: data.v || data.volume,
timestamp: new Date(data.t || data.timestamp),
};
}
private normalizeAlphaVantageOHLCV(data: any): OHLCVType[] {
const timeSeries = data['Time Series (1min)'] || data['Time Series (5min)'] || data['Time Series (Daily)'];
const symbol = data['Meta Data']['2. Symbol'];
@ -67,8 +206,7 @@ export class DataNormalizer {
volume: parseInt(values['5. volume']),
})).sort((a, b) => b.timestamp.getTime() - a.timestamp.getTime());
}
private normalizeYahooFinanceOHLCV(data: any): OHLCV[] {
private normalizeYahooFinanceOHLCV(data: any): OHLCVType[] {
const result = data.chart.result[0];
const timestamps = result.timestamp;
const quotes = result.indicators.quote[0];
@ -84,26 +222,48 @@ export class DataNormalizer {
}));
}
/**
private normalizePolygonOHLCV(data: any): OHLCVType[] {
// Polygon.io aggregates format
if (data.results && Array.isArray(data.results)) {
return data.results.map((candle: any) => ({
symbol: data.ticker || candle.T,
timestamp: new Date(candle.t),
open: candle.o,
high: candle.h,
low: candle.l,
close: candle.c,
volume: candle.v,
}));
}
return [];
} /**
* Validate market data quality
*/
validateMarketData(data: MarketData): boolean {
validateMarketData(data: MarketDataType): boolean {
return (
data.symbol &&
typeof data.symbol === 'string' &&
data.symbol.length > 0 &&
typeof data.price === 'number' &&
data.price > 0 &&
typeof data.volume === 'number' &&
data.volume >= 0 &&
data.timestamp instanceof Date
data.timestamp instanceof Date &&
!isNaN(data.timestamp.getTime()) &&
typeof data.bid === 'number' &&
typeof data.ask === 'number' &&
data.ask >= data.bid
) as boolean;
}
/**
* Validate OHLCV data quality
*/
validateOHLCV(data: OHLCV): boolean {
validateOHLCV(data: OHLCVType): boolean {
return (
data.symbol &&
typeof data.symbol === 'string' &&
data.symbol.length > 0 &&
typeof data.open === 'number' && data.open > 0 &&
typeof data.high === 'number' && data.high > 0 &&
typeof data.low === 'number' && data.low > 0 &&
@ -111,7 +271,126 @@ export class DataNormalizer {
data.high >= Math.max(data.open, data.close) &&
data.low <= Math.min(data.open, data.close) &&
typeof data.volume === 'number' && data.volume >= 0 &&
data.timestamp instanceof Date
data.timestamp instanceof Date &&
!isNaN(data.timestamp.getTime())
) as boolean;
}
/**
* Assess data quality metrics for market data
*/
assessDataQuality(data: MarketDataType[], source: string): DataQualityMetrics {
if (data.length === 0) {
return {
completeness: 0,
accuracy: 0,
timeliness: 0,
consistency: 0,
overall: 0,
};
}
// Completeness: percentage of valid data points
const validCount = data.filter(item => this.validateMarketData(item)).length;
const completeness = validCount / data.length;
// Accuracy: based on price consistency and reasonable ranges
const accuracyScore = this.assessAccuracy(data);
// Timeliness: based on data freshness
const timelinessScore = this.assessTimeliness(data);
// Consistency: based on data patterns and outliers
const consistencyScore = this.assessConsistency(data);
const overall = (completeness + accuracyScore + timelinessScore + consistencyScore) / 4;
return {
completeness,
accuracy: accuracyScore,
timeliness: timelinessScore,
consistency: consistencyScore,
overall,
};
}
private assessAccuracy(data: MarketDataType[]): number {
let accuracySum = 0;
for (const item of data) {
let score = 1.0;
// Check for reasonable price ranges
if (item.price <= 0 || item.price > 100000) score -= 0.3;
// Check bid/ask spread reasonableness
const spread = item.ask - item.bid;
const spreadPercentage = spread / item.price;
if (spreadPercentage > 0.1) score -= 0.2; // More than 10% spread is suspicious
// Check for negative volume
if (item.volume < 0) score -= 0.5;
accuracySum += Math.max(0, score);
}
return data.length > 0 ? accuracySum / data.length : 0;
}
private assessTimeliness(data: MarketDataType[]): number {
const now = new Date();
let timelinessSum = 0;
for (const item of data) {
const ageMs = now.getTime() - item.timestamp.getTime();
const ageMinutes = ageMs / (1000 * 60);
// Score based on data age (fresher is better)
let score = 1.0;
if (ageMinutes > 60) score = 0.1; // Very old data
else if (ageMinutes > 15) score = 0.5; // Moderately old
else if (ageMinutes > 5) score = 0.8; // Slightly old
timelinessSum += score;
}
return data.length > 0 ? timelinessSum / data.length : 0;
}
private assessConsistency(data: MarketDataType[]): number {
if (data.length < 2) return 1.0;
// Sort by timestamp
const sortedData = [...data].sort((a, b) => a.timestamp.getTime() - b.timestamp.getTime());
let consistencySum = 0;
for (let i = 1; i < sortedData.length; i++) {
const current = sortedData[i];
const previous = sortedData[i - 1];
// Check for reasonable price movements
const priceChange = Math.abs(current.price - previous.price) / previous.price;
let score = 1.0;
if (priceChange > 0.5) score -= 0.7; // More than 50% change is suspicious
else if (priceChange > 0.1) score -= 0.3; // More than 10% change is notable
consistencySum += Math.max(0, score);
}
return consistencySum / (sortedData.length - 1);
}
/**
* Clean and sanitize market data
*/
sanitizeMarketData(data: MarketDataType): MarketDataType {
return {
symbol: data.symbol.toUpperCase().trim(),
price: Math.max(0, Number(data.price) || 0),
bid: Math.max(0, Number(data.bid) || 0),
ask: Math.max(0, Number(data.ask) || 0),
volume: Math.max(0, Math.floor(Number(data.volume) || 0)),
timestamp: new Date(data.timestamp),
};
}
}

View file

@ -1,7 +1,43 @@
import { EventEmitter } from 'eventemitter3';
import { Logger } from 'pino';
// Local logger interface to avoid pino dependency issues
interface Logger {
info(msg: string, ...args: any[]): void;
error(msg: string, ...args: any[]): void;
warn(msg: string, ...args: any[]): void;
debug(msg: string, ...args: any[]): void;
child(options: any): Logger;
}
// Simple logger implementation
const createLogger = (name: string): Logger => ({
info: (msg: string, ...args: any[]) => console.log(`[${name}] INFO:`, msg, ...args),
error: (msg: string, ...args: any[]) => console.error(`[${name}] ERROR:`, msg, ...args),
warn: (msg: string, ...args: any[]) => console.warn(`[${name}] WARN:`, msg, ...args),
debug: (msg: string, ...args: any[]) => console.debug(`[${name}] DEBUG:`, msg, ...args),
child: (options: any) => createLogger(`${name}.${options.component || 'child'}`)
});
import WebSocket from 'ws';
import axios, { AxiosInstance } from 'axios';
// Simple HTTP client to replace axios
interface HttpClient {
get(url: string): Promise<{ data: any }>;
post(url: string, data?: any): Promise<{ data: any }>;
}
const createHttpClient = (baseURL: string, headers?: Record<string, string>): HttpClient => ({
get: async (url: string) => {
const response = await fetch(`${baseURL}${url}`, { headers });
return { data: await response.json() };
},
post: async (url: string, data?: any) => {
const response = await fetch(`${baseURL}${url}`, {
method: 'POST',
headers: { 'Content-Type': 'application/json', ...headers },
body: data ? JSON.stringify(data) : undefined
});
return { data: await response.json() };
}
});
import {
DataSourceConfig,
DataSourceMetrics,
@ -13,7 +49,7 @@ import {
interface DataSourceConnection {
config: DataSourceConfig;
connection?: WebSocket | AxiosInstance;
connection?: WebSocket | HttpClient;
status: 'disconnected' | 'connecting' | 'connected' | 'error';
lastConnectedAt?: Date;
lastErrorAt?: Date;
@ -112,9 +148,8 @@ export class DataSourceManager extends EventEmitter {
await this.connectDataSource(config.id);
}
}
public async removeDataSource(sourceId: string): Promise<void> {
this.logger.info({ sourceId }, 'Removing data source');
this.logger.info(`Removing data source: ${sourceId}`);
await this.disconnectDataSource(sourceId);
this.dataSources.delete(sourceId);
@ -132,7 +167,7 @@ export class DataSourceManager extends EventEmitter {
throw new Error(`Data source ${sourceId} not found`);
}
this.logger.info({ sourceId, updates }, 'Updating data source');
this.logger.info(`Updating data source: ${sourceId}`, updates);
// Update configuration
dataSource.config = { ...dataSource.config, ...updates };

View file

@ -4,12 +4,14 @@ import type { MarketDataEvent, SignalEvent, TradingEvent } from '@stock-bot/shar
export class EventPublisher {
private dragonfly: Redis;
private readonly STREAM_NAME = 'trading-events'; constructor() {
private readonly STREAM_NAME = 'trading-events';
constructor() {
this.dragonfly = new Redis({
host: databaseConfig.dragonfly.host,
port: databaseConfig.dragonfly.port,
password: databaseConfig.dragonfly.password,
maxRetriesPerRequest: 3,
maxRetriesPerRequest: databaseConfig.dragonfly.maxRetriesPerRequest,
});
this.dragonfly.on('connect', () => {

View file

@ -1,5 +1,21 @@
import { EventEmitter } from 'eventemitter3';
import { Logger } from 'pino';
// Local logger interface to avoid pino dependency issues
interface Logger {
info(msg: string, ...args: any[]): void;
error(msg: string, ...args: any[]): void;
warn(msg: string, ...args: any[]): void;
debug(msg: string, ...args: any[]): void;
child(options: any): Logger;
}
// Simple logger implementation
const createLogger = (name: string): Logger => ({
info: (msg: string, ...args: any[]) => console.log(`[${name}] INFO:`, msg, ...args),
error: (msg: string, ...args: any[]) => console.error(`[${name}] ERROR:`, msg, ...args),
warn: (msg: string, ...args: any[]) => console.warn(`[${name}] WARN:`, msg, ...args),
debug: (msg: string, ...args: any[]) => console.debug(`[${name}] DEBUG:`, msg, ...args),
child: (options: any) => createLogger(`${name}.${options.component || 'child'}`)
});
import {
GatewayConfig,
DataSourceConfig,

1009
bun.lock

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,46 @@
version: '3.8'
services:
# Loki - Log aggregation
loki:
image: grafana/loki:2.9.2
container_name: trading-bot-loki
ports:
- "3100:3100"
volumes:
- loki_data:/loki
- ./monitoring/loki:/etc/loki
command: -config.file=/etc/loki/loki-config.yaml
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3100/ready"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# Grafana - Visualization for logs and metrics
grafana:
image: grafana/grafana:10.2.0
container_name: trading-bot-grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
depends_on:
- loki
networks:
- trading-bot-network
volumes:
loki_data:
grafana_data:
networks:
trading-bot-network:
external: true

View file

@ -0,0 +1,46 @@
version: '3.8'
services:
# Loki - Log aggregation
loki:
image: grafana/loki:2.9.2
container_name: trading-bot-loki
ports:
- "3100:3100"
volumes:
- loki_data:/loki
- ./monitoring/loki:/etc/loki
command: -config.file=/etc/loki/loki-config.yaml
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3100/ready"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# Grafana - Visualization for logs and metrics
grafana:
image: grafana/grafana:10.2.0
container_name: trading-bot-grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
depends_on:
- loki
networks:
- trading-bot-network
volumes:
loki_data:
grafana_data:
networks:
trading-bot-network:
external: true

View file

@ -1,3 +1,5 @@
version: '3.8'
services:
# Dragonfly - Redis replacement for caching and events
dragonfly:
@ -108,7 +110,7 @@ services:
image: dpage/pgadmin4:latest
container_name: trading-bot-pgadmin
environment:
PGADMIN_DEFAULT_EMAIL: boki@stare.gg
PGADMIN_DEFAULT_EMAIL: admin@tradingbot.local
PGADMIN_DEFAULT_PASSWORD: admin123
PGADMIN_CONFIG_SERVER_MODE: 'False'
PGADMIN_DISABLE_POSTFIX: 'true'
@ -131,7 +133,7 @@ services:
ME_CONFIG_MONGODB_ADMINPASSWORD: trading_mongo_dev
ME_CONFIG_MONGODB_SERVER: mongodb
ME_CONFIG_MONGODB_PORT: 27017
ME_CONFIG_BASICAUTH_USERNAME: boki
ME_CONFIG_BASICAUTH_USERNAME: admin
ME_CONFIG_BASICAUTH_PASSWORD: admin123
ports:
- "8081:8081"
@ -141,7 +143,7 @@ services:
networks:
- trading-bot-network
# Prometheus - Metrics collection (optional)
# Prometheus - Metrics collection
prometheus:
image: prom/prometheus:latest
container_name: trading-bot-prometheus
@ -160,21 +162,42 @@ services:
networks:
- trading-bot-network
# Grafana - Metrics visualization (optional)
# Loki - Log aggregation
loki:
image: grafana/loki:2.9.2
container_name: trading-bot-loki
ports:
- "3100:3100"
volumes:
- loki_data:/loki
- ./monitoring/loki:/etc/loki
command: -config.file=/etc/loki/loki-config.yaml
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3100/ready"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- trading-bot-network
# Grafana - Visualization for logs and metrics
grafana:
image: grafana/grafana:latest
image: grafana/grafana:10.2.0
container_name: trading-bot-grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin123
- GF_SECURITY_ADMIN_USER=boki
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
depends_on:
- prometheus
- loki
restart: unless-stopped
networks:
- trading-bot-network
@ -187,6 +210,7 @@ volumes:
pgadmin_data:
prometheus_data:
grafana_data:
loki_data:
networks:
trading-bot-network:

244
docker-compose.yml.backup Normal file
View file

@ -0,0 +1,244 @@
services:
# Dragonfly - Redis replacement for caching and events
dragonfly:
image: docker.dragonflydb.io/dragonflydb/dragonfly:latest
container_name: trading-bot-dragonfly
ports:
- "6379:6379"
command:
- dragonfly
- --logtostderr
- --cache_mode=true
- --maxmemory=2gb
- --proactor_threads=8
- --bind=0.0.0.0
volumes:
- dragonfly_data:/data
restart: unless-stopped
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# PostgreSQL - Operational data (orders, positions, strategies)
postgres:
image: postgres:16-alpine
container_name: trading-bot-postgres
environment:
POSTGRES_DB: trading_bot
POSTGRES_USER: trading_user
POSTGRES_PASSWORD: trading_pass_dev
POSTGRES_INITDB_ARGS: "--encoding=UTF-8"
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
- ./database/postgres/init:/docker-entrypoint-initdb.d
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U trading_user -d trading_bot"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# QuestDB - Time-series data (OHLCV, indicators, performance)
questdb:
image: questdb/questdb:latest
container_name: trading-bot-questdb
ports:
- "9000:9000" # Web console
- "8812:8812" # PostgreSQL wire protocol
- "9009:9009" # InfluxDB line protocol
volumes:
- questdb_data:/var/lib/questdb
environment:
- QDB_TELEMETRY_ENABLED=false
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/status"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# MongoDB - Document storage (sentiment, raw docs, unstructured data)
mongodb:
image: mongo:7-jammy
container_name: trading-bot-mongodb
environment:
MONGO_INITDB_ROOT_USERNAME: trading_admin
MONGO_INITDB_ROOT_PASSWORD: trading_mongo_dev
MONGO_INITDB_DATABASE: trading_documents
ports:
- "27017:27017"
volumes:
- mongodb_data:/data/db
- ./database/mongodb/init:/docker-entrypoint-initdb.d
restart: unless-stopped
healthcheck:
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# Redis Insight - GUI for Dragonfly debugging
redis-insight:
image: redislabs/redisinsight:latest
container_name: trading-bot-redis-insight
ports:
- "8001:8001"
environment:
- REDIS_HOSTS=local:dragonfly:6379
depends_on:
- dragonfly
restart: unless-stopped
networks:
- trading-bot-network
# PgAdmin - PostgreSQL GUI
pgadmin:
image: dpage/pgadmin4:latest
container_name: trading-bot-pgadmin
environment:
PGADMIN_DEFAULT_EMAIL: boki@stare.gg
PGADMIN_DEFAULT_PASSWORD: admin123
PGADMIN_CONFIG_SERVER_MODE: 'False'
PGADMIN_DISABLE_POSTFIX: 'true'
ports:
- "8080:80"
volumes:
- pgadmin_data:/var/lib/pgadmin
depends_on:
- postgres
restart: unless-stopped
networks:
- trading-bot-network
# Mongo Express - MongoDB GUI
mongo-express:
image: mongo-express:latest
container_name: trading-bot-mongo-express
environment:
ME_CONFIG_MONGODB_ADMINUSERNAME: trading_admin
ME_CONFIG_MONGODB_ADMINPASSWORD: trading_mongo_dev
ME_CONFIG_MONGODB_SERVER: mongodb
ME_CONFIG_MONGODB_PORT: 27017
ME_CONFIG_BASICAUTH_USERNAME: boki
ME_CONFIG_BASICAUTH_PASSWORD: admin123
ports:
- "8081:8081"
depends_on:
- mongodb
restart: unless-stopped
networks:
- trading-bot-network
# Prometheus - Metrics collection (optional)
prometheus:
image: prom/prometheus:latest
container_name: trading-bot-prometheus
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--web.enable-lifecycle'
restart: unless-stopped
networks:
- trading-bot-network
# Loki - Log aggregation
loki:
image: grafana/loki:2.9.2
container_name: trading-bot-loki
ports:
- "3100:3100"
volumes:
- loki_data:/loki
- ./monitoring/loki:/etc/loki
command: -config.file=/etc/loki/loki-config.yaml
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3100/ready"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- trading-bot-network
# Grafana - Visualization for logs and metrics
grafana:
image: grafana/grafana:10.2.0
container_name: trading-bot-grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
depends_on:
- prometheus
- loki
restart: unless-stopped
networks:
- trading-bot-network
volumes:
postgres_data:
questdb_data:
dragonfly_data:
mongodb_data:
pgadmin_data:
prometheus_data:
grafana_data:
loki_data:
- "3100:3100"
volumes:
- loki_data:/loki
- ./monitoring/loki:/etc/loki
command: -config.file=/etc/loki/loki-config.yaml
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3100/ready"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# Grafana - Visualization for logs and metrics
grafana:
image: grafana/grafana:10.2.0
container_name: trading-bot-grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
depends_on:
- loki
networks:
- trading-bot-network
networks:
trading-bot-network:
driver: bridge

217
docker-compose.yml.new Normal file
View file

@ -0,0 +1,217 @@
version: '3.8'
services:
# Dragonfly - Redis replacement for caching and events
dragonfly:
image: docker.dragonflydb.io/dragonflydb/dragonfly:latest
container_name: trading-bot-dragonfly
ports:
- "6379:6379"
command:
- dragonfly
- --logtostderr
- --cache_mode=true
- --maxmemory=2gb
- --proactor_threads=8
- --bind=0.0.0.0
volumes:
- dragonfly_data:/data
restart: unless-stopped
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# PostgreSQL - Operational data (orders, positions, strategies)
postgres:
image: postgres:16-alpine
container_name: trading-bot-postgres
environment:
POSTGRES_DB: trading_bot
POSTGRES_USER: trading_user
POSTGRES_PASSWORD: trading_pass_dev
POSTGRES_INITDB_ARGS: "--encoding=UTF-8"
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
- ./database/postgres/init:/docker-entrypoint-initdb.d
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U trading_user -d trading_bot"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# QuestDB - Time-series data (OHLCV, indicators, performance)
questdb:
image: questdb/questdb:latest
container_name: trading-bot-questdb
ports:
- "9000:9000" # Web console
- "8812:8812" # PostgreSQL wire protocol
- "9009:9009" # InfluxDB line protocol
volumes:
- questdb_data:/var/lib/questdb
environment:
- QDB_TELEMETRY_ENABLED=false
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/status"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# MongoDB - Document storage (sentiment, raw docs, unstructured data)
mongodb:
image: mongo:7-jammy
container_name: trading-bot-mongodb
environment:
MONGO_INITDB_ROOT_USERNAME: trading_admin
MONGO_INITDB_ROOT_PASSWORD: trading_mongo_dev
MONGO_INITDB_DATABASE: trading_documents
ports:
- "27017:27017"
volumes:
- mongodb_data:/data/db
- ./database/mongodb/init:/docker-entrypoint-initdb.d
restart: unless-stopped
healthcheck:
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# Redis Insight - GUI for Dragonfly debugging
redis-insight:
image: redislabs/redisinsight:latest
container_name: trading-bot-redis-insight
ports:
- "8001:8001"
environment:
- REDIS_HOSTS=local:dragonfly:6379
depends_on:
- dragonfly
restart: unless-stopped
networks:
- trading-bot-network
# PgAdmin - PostgreSQL GUI
pgadmin:
image: dpage/pgadmin4:latest
container_name: trading-bot-pgadmin
environment:
PGADMIN_DEFAULT_EMAIL: admin@tradingbot.local
PGADMIN_DEFAULT_PASSWORD: admin123
PGADMIN_CONFIG_SERVER_MODE: 'False'
PGADMIN_DISABLE_POSTFIX: 'true'
ports:
- "8080:80"
volumes:
- pgadmin_data:/var/lib/pgadmin
depends_on:
- postgres
restart: unless-stopped
networks:
- trading-bot-network
# Mongo Express - MongoDB GUI
mongo-express:
image: mongo-express:latest
container_name: trading-bot-mongo-express
environment:
ME_CONFIG_MONGODB_ADMINUSERNAME: trading_admin
ME_CONFIG_MONGODB_ADMINPASSWORD: trading_mongo_dev
ME_CONFIG_MONGODB_SERVER: mongodb
ME_CONFIG_MONGODB_PORT: 27017
ME_CONFIG_BASICAUTH_USERNAME: admin
ME_CONFIG_BASICAUTH_PASSWORD: admin123
ports:
- "8081:8081"
depends_on:
- mongodb
restart: unless-stopped
networks:
- trading-bot-network
# Prometheus - Metrics collection
prometheus:
image: prom/prometheus:latest
container_name: trading-bot-prometheus
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--web.enable-lifecycle'
restart: unless-stopped
networks:
- trading-bot-network
# Loki - Log aggregation
loki:
image: grafana/loki:2.9.2
container_name: trading-bot-loki
ports:
- "3100:3100"
volumes:
- loki_data:/loki
- ./monitoring/loki:/etc/loki
command: -config.file=/etc/loki/loki-config.yaml
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3100/ready"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- trading-bot-network
# Grafana - Visualization for logs and metrics
grafana:
image: grafana/grafana:10.2.0
container_name: trading-bot-grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
depends_on:
- prometheus
- loki
restart: unless-stopped
networks:
- trading-bot-network
volumes:
postgres_data:
questdb_data:
dragonfly_data:
mongodb_data:
pgadmin_data:
prometheus_data:
grafana_data:
loki_data:
networks:
trading-bot-network:
driver: bridge

171
docs/loki-logging.md Normal file
View file

@ -0,0 +1,171 @@
# Loki Logging for Stock Bot
This document outlines how to use the Loki logging system integrated with the Stock Bot platform.
## Overview
Loki is a horizontally-scalable, highly-available, multi-tenant log aggregation system inspired by Prometheus. It is designed to be very cost effective and easy to operate. Unlike other logging systems, Loki is built around the idea of only indexing metadata about your logs (labels), not the full text. This makes Loki more resource-efficient than traditional log storage systems.
For Stock Bot, Loki provides:
1. Centralized logging for all services
2. Log aggregation and filtering by service, level, and custom labels
3. Integration with Grafana for visualization
4. Query capabilities for log analysis
5. Alert capabilities for critical issues
## Getting Started
### Starting the Logging Stack
```cmd
# Start the monitoring stack (includes Loki and Grafana)
scripts\docker.ps1 monitoring
```
You can also start Loki directly using Docker Compose:
```cmd
# Start Loki service only
docker-compose up -d loki
# Start Loki and Grafana
docker-compose up -d loki grafana
```
### Viewing Logs
Once started:
1. Access Grafana at http://localhost:3000 (login with admin/admin)
2. Navigate to the "Stock Bot Logs" dashboard
3. View and query your logs
## Using the Logger in Your Services
The Stock Bot logger has been enhanced to automatically send logs to Loki. Here's how to use it:
```typescript
import { Logger, LogLevel } from '@stock-bot/utils';
// Create a logger for your service
const logger = new Logger('your-service-name', LogLevel.INFO);
// Log at different levels
logger.debug('Detailed information for debugging');
logger.info('General information about operations');
logger.warn('Potential issues that don't affect operation');
logger.error('Critical errors that require attention');
// Log with structured data (will be searchable in Loki)
logger.info('Processing trade', {
symbol: 'MSFT',
price: 410.75,
quantity: 50
});
```
## Configuration Options
Logger configuration is managed through the `@stock-bot/config` package and can be set in your `.env` file:
```bash
# Logging configuration
LOG_LEVEL=debug # debug, info, warn, error
LOG_CONSOLE=true # Log to console in addition to Loki
LOKI_HOST=localhost # Loki server hostname
LOKI_PORT=3100 # Loki server port
LOKI_RETENTION_DAYS=30 # Days to retain logs
LOKI_LABELS=environment=development,service=stock-bot # Default labels
LOKI_BATCH_SIZE=100 # Number of logs to batch before sending
LOKI_FLUSH_INTERVAL_MS=5000 # Max time to wait before sending logs
```
## Useful Loki Queries
Inside Grafana, you can use these LogQL queries to analyze your logs:
1. **All logs from a specific service**:
```
{service="market-data-gateway"}
```
2. **All error logs across all services**:
```
{level="error"}
```
3. **Logs containing specific text**:
```
{service="market-data-gateway"} |= "trade"
```
4. **Count of error logs by service over time**:
```
sum by(service) (count_over_time({level="error"}[5m]))
```
## Testing the Logging Integration
A test script is provided to verify the logging integration:
```bash
# Run from project root
bun run tools/test-loki-logging.ts
```
## Architecture
Our logging implementation follows this architecture:
```
┌─────────────────┐ ┌─────────────────┐
│ Trading Services│────►│ @stock-bot/utils│
└─────────────────┘ │ Logger │
└────────┬────────┘
┌────────────────────────────────────────┐
│ Loki │
└────────────────┬───────────────────────┘
┌────────────────────────────────────────┐
│ Grafana │
└────────────────────────────────────────┘
```
## Adding New Dashboards
To create new Grafana dashboards for log visualization:
1. Build your dashboard in the Grafana UI
2. Export it to JSON
3. Add it to `monitoring/grafana/provisioning/dashboards/json/`
4. Restart the monitoring stack
## Troubleshooting
If logs aren't appearing in Grafana:
1. Run the status check script to verify Loki and Grafana are working:
```cmd
tools\check-loki-status.bat
```
2. Check that Loki and Grafana containers are running:
```cmd
docker ps | findstr "loki grafana"
```
3. Verify .env configuration for Loki host and port:
```cmd
type .env | findstr "LOKI_"
```
4. Ensure your service has the latest @stock-bot/utils package
5. Check for errors in the Loki container logs:
```cmd
docker logs trading-bot-loki
```

53
libs/config/.env.example Normal file
View file

@ -0,0 +1,53 @@
# Base environment variables for Stock Bot
# Environment
NODE_ENV=development
# Logging
LOG_LEVEL=debug
# Database configuration
DRAGONFLY_HOST=localhost
DRAGONFLY_PORT=6379
DRAGONFLY_PASSWORD=
DRAGONFLY_MAX_RETRIES_PER_REQUEST=3
TIMESCALE_HOST=localhost
TIMESCALE_PORT=5432
TIMESCALE_DB=stockbot
TIMESCALE_USER=postgres
TIMESCALE_PASSWORD=postgres
# Data providers
DEFAULT_DATA_PROVIDER=alpaca
ALPACA_API_KEY=your_alpaca_key_here
ALPACA_API_SECRET=your_alpaca_secret_here
POLYGON_API_KEY=your_polygon_key_here
# Risk parameters
RISK_MAX_DRAWDOWN=0.05
RISK_MAX_POSITION_SIZE=0.1
RISK_MAX_LEVERAGE=1.5
RISK_STOP_LOSS_DEFAULT=0.02
RISK_TAKE_PROFIT_DEFAULT=0.05
# Market Data Gateway
SERVICE_PORT=4000
WEBSOCKET_ENABLED=true
WEBSOCKET_PATH=/ws/market-data
WEBSOCKET_HEARTBEAT_INTERVAL=30000
THROTTLING_MAX_REQUESTS=300
THROTTLING_MAX_CONNECTIONS=5
CACHING_ENABLED=true
CACHING_TTL_SECONDS=60
# Risk Guardian
RISK_CHECKS_PRE_TRADE=true
RISK_CHECKS_PORTFOLIO=true
RISK_CHECKS_LEVERAGE=true
RISK_CHECKS_CONCENTRATION=true
ALERTING_ENABLED=true
ALERTING_CRITICAL_THRESHOLD=0.8
ALERTING_WARNING_THRESHOLD=0.6
WATCHDOG_ENABLED=true
WATCHDOG_CHECK_INTERVAL=60

View file

@ -0,0 +1,28 @@
# Production environment variables for Stock Bot
# Environment
NODE_ENV=production
# Logging
LOG_LEVEL=info
# Database configuration (use environment-specific values)
DRAGONFLY_HOST=dragonfly.production
DRAGONFLY_PORT=6379
DRAGONFLY_MAX_RETRIES_PER_REQUEST=5
TIMESCALE_HOST=timescale.production
TIMESCALE_PORT=5432
TIMESCALE_DB=stockbot_prod
# Risk parameters (more conservative for production)
RISK_MAX_DRAWDOWN=0.03
RISK_MAX_POSITION_SIZE=0.05
RISK_MAX_LEVERAGE=1.0
# Service settings
WEBSOCKET_HEARTBEAT_INTERVAL=15000
THROTTLING_MAX_REQUESTS=500
THROTTLING_MAX_CONNECTIONS=20
CACHING_ENABLED=true
CACHING_TTL_SECONDS=30

103
libs/config/README.md Normal file
View file

@ -0,0 +1,103 @@
# @stock-bot/config
A configuration management library for the Stock Bot trading platform.
## Overview
This library provides a centralized way to manage configurations across all Stock Bot microservices and components. It includes:
- Environment-based configuration loading
- Strong TypeScript typing and validation using Zod
- Default configurations for services
- Environment variable parsing helpers
- Service-specific configuration modules
## Usage
### Basic Usage
```typescript
import { databaseConfig, dataProviderConfigs, riskConfig } from '@stock-bot/config';
// Access database configuration
const dragonflyHost = databaseConfig.dragonfly.host;
// Access data provider configuration
const alpacaApiKey = dataProviderConfigs.providers.find(p => p.name === 'alpaca')?.apiKey;
// Access risk configuration
const maxPositionSize = riskConfig.maxPositionSize;
```
### Service-Specific Configuration
```typescript
import { marketDataGatewayConfig, riskGuardianConfig } from '@stock-bot/config';
// Access Market Data Gateway configuration
const websocketPath = marketDataGatewayConfig.websocket.path;
// Access Risk Guardian configuration
const preTradeValidation = riskGuardianConfig.riskChecks.preTradeValidation;
```
### Environment Variables
The library automatically loads environment variables from `.env` files. You can create environment-specific files:
- `.env` - Base environment variables
- `.env.development` - Development-specific variables
- `.env.production` - Production-specific variables
- `.env.local` - Local overrides (not to be committed to git)
## Configuration Modules
### Core Configuration
- `Environment` - Enum for different environments
- `loadEnvVariables()` - Load environment variables from .env files
- `getEnvironment()` - Get the current environment
- `validateConfig()` - Validate configuration with Zod schema
### Database Configuration
- `databaseConfig` - Database connection settings (Dragonfly, TimescaleDB)
### Data Provider Configuration
- `dataProviderConfigs` - Settings for market data providers
### Risk Configuration
- `riskConfig` - Risk management parameters (max drawdown, position size, etc.)
### Service-Specific Configuration
- `marketDataGatewayConfig` - Configs for the Market Data Gateway service
- `riskGuardianConfig` - Configs for the Risk Guardian service
## Extending
To add a new service configuration:
1. Create a new file in `src/services/`
2. Define a Zod schema for validation
3. Create loading and default configuration functions
4. Export from `src/services/index.ts`
5. The new configuration will be automatically available from the main package
## Development
```bash
# Install dependencies
bun install
# Run tests
bun test
# Type check
bun run type-check
# Lint
bun run lint
```

37
libs/config/package.json Normal file
View file

@ -0,0 +1,37 @@
{
"name": "@stock-bot/config",
"version": "1.0.0",
"description": "Configuration management library for Stock Bot platform",
"main": "src/index.ts",
"type": "module",
"scripts": {
"build": "tsc",
"test": "bun test",
"lint": "eslint src/**/*.ts",
"type-check": "tsc --noEmit"
},
"dependencies": {
"dotenv": "^16.3.1",
"zod": "^3.22.4"
},
"devDependencies": {
"@types/node": "^20.11.0",
"typescript": "^5.3.0",
"eslint": "^8.56.0",
"@typescript-eslint/eslint-plugin": "^6.19.0",
"@typescript-eslint/parser": "^6.19.0",
"bun-types": "^1.2.15"
},
"keywords": [
"configuration",
"settings",
"env",
"stock-bot"
],
"exports": {
".": {
"import": "./src/index.ts",
"require": "./dist/index.js"
}
}
}

24
libs/config/setup.bat Normal file
View file

@ -0,0 +1,24 @@
@echo off
echo Building @stock-bot/config library...
cd /d g:\repos\stock-bot
echo Installing dependencies...
bun install
echo Running type check...
cd /d g:\repos\stock-bot\libs\config
bun run type-check
echo Running tests...
bun test
echo Setting up example configuration...
copy .env.example .env
echo Running example to display configuration...
bun run src/example.ts
echo.
echo Configuration library setup complete!
echo.
echo You can now import @stock-bot/config in your services.

View file

@ -0,0 +1,113 @@
/**
* Tests for the configuration library
*/
import { describe, expect, test, beforeAll, afterAll } from 'bun:test';
import {
getEnvironment,
Environment,
validateConfig,
ConfigurationError,
loadEnvVariables,
getEnvVar,
getNumericEnvVar,
getBooleanEnvVar
} from './core';
import { databaseConfigSchema } from './types';
describe('Core configuration', () => {
// Save original environment variables
const originalEnv = { ...process.env };
// Setup test environment variables
beforeAll(() => {
process.env.NODE_ENV = 'testing';
process.env.TEST_STRING = 'test-value';
process.env.TEST_NUMBER = '42';
process.env.TEST_BOOL_TRUE = 'true';
process.env.TEST_BOOL_FALSE = 'false';
});
// Restore original environment variables
afterAll(() => {
process.env = { ...originalEnv };
});
test('getEnvironment returns correct environment', () => {
expect(getEnvironment()).toBe(Environment.Testing);
// Test different environments
process.env.NODE_ENV = 'development';
expect(getEnvironment()).toBe(Environment.Development);
process.env.NODE_ENV = 'production';
expect(getEnvironment()).toBe(Environment.Production);
process.env.NODE_ENV = 'staging';
expect(getEnvironment()).toBe(Environment.Staging);
// Test default environment
process.env.NODE_ENV = 'unknown';
expect(getEnvironment()).toBe(Environment.Development);
});
test('getEnvVar retrieves environment variables', () => {
expect(getEnvVar('TEST_STRING')).toBe('test-value');
expect(getEnvVar('NON_EXISTENT')).toBeUndefined();
expect(getEnvVar('NON_EXISTENT', false)).toBeUndefined();
// Test required variables
expect(() => getEnvVar('NON_EXISTENT', true)).toThrow(ConfigurationError);
});
test('getNumericEnvVar converts to number', () => {
expect(getNumericEnvVar('TEST_NUMBER')).toBe(42);
expect(getNumericEnvVar('NON_EXISTENT', 100)).toBe(100);
// Test invalid number
process.env.INVALID_NUMBER = 'not-a-number';
expect(() => getNumericEnvVar('INVALID_NUMBER')).toThrow(ConfigurationError);
});
test('getBooleanEnvVar converts to boolean', () => {
expect(getBooleanEnvVar('TEST_BOOL_TRUE')).toBe(true);
expect(getBooleanEnvVar('TEST_BOOL_FALSE')).toBe(false);
expect(getBooleanEnvVar('NON_EXISTENT', true)).toBe(true);
});
test('validateConfig validates against schema', () => {
// Valid config
const validConfig = {
dragonfly: {
host: 'localhost',
port: 6379,
maxRetriesPerRequest: 3
},
timescaleDB: {
host: 'localhost',
port: 5432,
database: 'stockbot',
user: 'postgres'
}
};
expect(() => validateConfig(validConfig, databaseConfigSchema)).not.toThrow();
// Invalid config (missing required field)
const invalidConfig = {
dragonfly: {
host: 'localhost',
// missing port
maxRetriesPerRequest: 3
},
timescaleDB: {
host: 'localhost',
port: 5432,
database: 'stockbot',
user: 'postgres'
}
};
expect(() => validateConfig(invalidConfig, databaseConfigSchema)).toThrow(ConfigurationError);
});
});

162
libs/config/src/core.ts Normal file
View file

@ -0,0 +1,162 @@
/**
* Core configuration module for the Stock Bot platform
*/
import { config as dotenvConfig } from 'dotenv';
import path from 'path';
import { z } from 'zod';
import { Environment } from './types';
/**
* Represents an error related to configuration validation
*/
export class ConfigurationError extends Error {
constructor(message: string) {
super(message);
this.name = 'ConfigurationError';
}
}
/**
* Loads environment variables from .env files based on the current environment
*/
export function loadEnvVariables(envOverride?: string): void {
const env = envOverride || process.env.NODE_ENV || 'development';
// Order of loading:
// 1. .env (base environment variables)
// 2. .env.{environment} (environment-specific variables)
// 3. .env.local (local overrides, not to be committed)
const envFiles = [
'.env',
`.env.${env}`,
'.env.local'
];
for (const file of envFiles) {
dotenvConfig({ path: path.resolve(process.cwd(), file) });
}
}
/**
* Gets the current environment from process.env.NODE_ENV
*/
export function getEnvironment(): Environment {
const env = process.env.NODE_ENV?.toLowerCase() || 'development';
switch (env) {
case 'development':
return Environment.Development;
case 'testing':
return Environment.Testing;
case 'staging':
return Environment.Staging;
case 'production':
return Environment.Production;
default:
return Environment.Development;
}
}
/**
* Validates configuration using Zod schema
*/
export function validateConfig<T>(config: unknown, schema: z.ZodSchema<T>): T {
try {
return schema.parse(config);
} catch (error) {
if (error instanceof z.ZodError) {
const issues = error.issues.map(issue =>
`${issue.path.join('.')}: ${issue.message}`
).join('\n');
throw new ConfigurationError(`Configuration validation failed:\n${issues}`);
}
throw new ConfigurationError('Invalid configuration');
}
}
/**
* Retrieves an environment variable with validation
*/
export function getEnvVar(key: string, required: boolean = false): string | undefined {
const value = process.env[key];
if (required && (value === undefined || value === '')) {
throw new ConfigurationError(`Required environment variable ${key} is missing`);
}
return value;
}
/**
* Retrieves a numeric environment variable with validation
*/
export function getNumericEnvVar(key: string, defaultValue?: number): number {
const value = process.env[key];
if (value === undefined || value === '') {
if (defaultValue !== undefined) {
return defaultValue;
}
throw new ConfigurationError(`Required numeric environment variable ${key} is missing`);
}
const numValue = Number(value);
if (isNaN(numValue)) {
throw new ConfigurationError(`Environment variable ${key} is not a valid number`);
}
return numValue;
}
/**
* Retrieves a boolean environment variable with validation
*/
export function getBooleanEnvVar(key: string, defaultValue?: boolean): boolean {
const value = process.env[key];
if (value === undefined || value === '') {
if (defaultValue !== undefined) {
return defaultValue;
}
throw new ConfigurationError(`Required boolean environment variable ${key} is missing`);
}
return value.toLowerCase() === 'true' || value === '1';
}
/**
* Creates a typed dynamic configuration loader for a specific service
*/
export function createConfigLoader<T>(
serviceName: string,
schema: z.ZodSchema<T>,
defaultConfig: Partial<T> = {}
): () => T {
return (): T => {
try {
loadEnvVariables();
const configEnvVar = `${serviceName.toUpperCase()}_CONFIG`;
let config = { ...defaultConfig } as unknown as T;
// Try to load JSON from environment variable if available
const configJson = process.env[configEnvVar];
if (configJson) {
try {
const parsedConfig = JSON.parse(configJson);
config = { ...config, ...parsedConfig };
} catch (error) {
throw new ConfigurationError(`Invalid JSON in ${configEnvVar} environment variable`);
}
}
// Validate and return the config
return validateConfig(config, schema);
} catch (error) {
if (error instanceof ConfigurationError) {
throw error;
}
throw new ConfigurationError(`Failed to load configuration for service ${serviceName}: ${error}`);
}
};
}

View file

@ -0,0 +1,73 @@
/**
* Data provider configurations for market data
*/
import { getEnvVar, validateConfig } from './core';
import { dataProvidersConfigSchema, DataProvidersConfig, DataProviderConfig } from './types';
/**
* Default data provider configurations
*/
const defaultDataProviders: DataProviderConfig[] = [
{
name: 'alpaca',
type: 'rest',
baseUrl: 'https://data.alpaca.markets/v1beta1',
apiKey: '',
apiSecret: '',
rateLimits: {
maxRequestsPerMinute: 200
}
},
{
name: 'polygon',
type: 'rest',
baseUrl: 'https://api.polygon.io/v2',
apiKey: '',
rateLimits: {
maxRequestsPerMinute: 5
}
},
{
name: 'alpaca-websocket',
type: 'websocket',
wsUrl: 'wss://stream.data.alpaca.markets/v2/iex',
apiKey: '',
apiSecret: ''
}
];
/**
* Load data provider configurations from environment variables
*/
export function loadDataProviderConfigs(): DataProvidersConfig {
// Get provider specific environment variables
const providers = defaultDataProviders.map(provider => {
const nameUpper = provider.name.toUpperCase().replace('-', '_');
const updatedProvider: DataProviderConfig = {
...provider,
apiKey: getEnvVar(`${nameUpper}_API_KEY`) || provider.apiKey || '',
};
if (provider.apiSecret !== undefined) {
updatedProvider.apiSecret = getEnvVar(`${nameUpper}_API_SECRET`) || provider.apiSecret || '';
}
return updatedProvider;
});
// Load default provider from environment
const defaultProvider = getEnvVar('DEFAULT_DATA_PROVIDER') || 'alpaca';
const config: DataProvidersConfig = {
providers,
defaultProvider
};
return validateConfig(config, dataProvidersConfigSchema);
}
/**
* Singleton data provider configurations
*/
export const dataProviderConfigs = loadDataProviderConfigs();

View file

@ -0,0 +1,52 @@
/**
* Database configuration for Stock Bot services
*/
import { z } from 'zod';
import { getEnvVar, getNumericEnvVar, validateConfig } from './core';
import { databaseConfigSchema, DatabaseConfig } from './types';
/**
* Default database configuration
*/
const defaultDatabaseConfig: DatabaseConfig = {
dragonfly: {
host: 'localhost',
port: 6379,
maxRetriesPerRequest: 3
},
timescaleDB: {
host: 'localhost',
port: 5432,
database: 'stockbot',
user: 'postgres'
}
};
/**
* Load database configuration from environment variables
*/
export function loadDatabaseConfig(): DatabaseConfig {
const config: DatabaseConfig = {
dragonfly: {
host: getEnvVar('DRAGONFLY_HOST') || defaultDatabaseConfig.dragonfly.host,
port: getNumericEnvVar('DRAGONFLY_PORT', defaultDatabaseConfig.dragonfly.port),
password: getEnvVar('DRAGONFLY_PASSWORD'),
maxRetriesPerRequest: getNumericEnvVar('DRAGONFLY_MAX_RETRIES_PER_REQUEST',
defaultDatabaseConfig.dragonfly.maxRetriesPerRequest)
},
timescaleDB: {
host: getEnvVar('TIMESCALE_HOST') || defaultDatabaseConfig.timescaleDB.host,
port: getNumericEnvVar('TIMESCALE_PORT', defaultDatabaseConfig.timescaleDB.port),
database: getEnvVar('TIMESCALE_DB') || defaultDatabaseConfig.timescaleDB.database,
user: getEnvVar('TIMESCALE_USER') || defaultDatabaseConfig.timescaleDB.user,
password: getEnvVar('TIMESCALE_PASSWORD')
}
};
return validateConfig(config, databaseConfigSchema);
}
/**
* Singleton database configuration
*/
export const databaseConfig = loadDatabaseConfig();

View file

@ -0,0 +1,73 @@
/**
* Example usage of the @stock-bot/config library
*/
import {
databaseConfig,
dataProviderConfigs,
riskConfig,
Environment,
getEnvironment,
marketDataGatewayConfig,
riskGuardianConfig,
ConfigurationError,
validateConfig
} from './index';
/**
* Display current configuration values
*/
export function printCurrentConfig(): void {
console.log('\n=== Stock Bot Configuration ===');
console.log('\nEnvironment:', getEnvironment());
console.log('\n--- Database Config ---');
console.log('Dragonfly Host:', databaseConfig.dragonfly.host);
console.log('Dragonfly Port:', databaseConfig.dragonfly.port);
console.log('TimescaleDB Host:', databaseConfig.timescaleDB.host);
console.log('TimescaleDB Database:', databaseConfig.timescaleDB.database);
console.log('\n--- Data Provider Config ---');
console.log('Default Provider:', dataProviderConfigs.defaultProvider);
console.log('Providers:');
dataProviderConfigs.providers.forEach(provider => {
console.log(` - ${provider.name} (${provider.type})`);
if (provider.baseUrl) console.log(` URL: ${provider.baseUrl}`);
if (provider.wsUrl) console.log(` WebSocket: ${provider.wsUrl}`);
});
console.log('\n--- Risk Config ---');
console.log('Max Drawdown:', riskConfig.maxDrawdown * 100, '%');
console.log('Max Position Size:', riskConfig.maxPositionSize * 100, '%');
console.log('Max Leverage:', riskConfig.maxLeverage, 'x');
console.log('Default Stop Loss:', riskConfig.stopLossDefault * 100, '%');
console.log('Default Take Profit:', riskConfig.takeProfitDefault * 100, '%');
console.log('\n--- Market Data Gateway Config ---');
console.log('Service Port:', marketDataGatewayConfig.service.port);
console.log('WebSocket Enabled:', marketDataGatewayConfig.websocket.enabled);
console.log('WebSocket Path:', marketDataGatewayConfig.websocket.path);
console.log('Caching Enabled:', marketDataGatewayConfig.caching.enabled);
console.log('Caching TTL:', marketDataGatewayConfig.caching.ttlSeconds, 'seconds');
console.log('\n--- Risk Guardian Config ---');
console.log('Service Port:', riskGuardianConfig.service.port);
console.log('Pre-Trade Validation:', riskGuardianConfig.riskChecks.preTradeValidation);
console.log('Portfolio Validation:', riskGuardianConfig.riskChecks.portfolioValidation);
console.log('Alerting Enabled:', riskGuardianConfig.alerting.enabled);
console.log('Critical Threshold:', riskGuardianConfig.alerting.criticalThreshold * 100, '%');
}
// Execute example if this file is run directly
if (require.main === module) {
try {
printCurrentConfig();
} catch (error) {
if (error instanceof ConfigurationError) {
console.error('Configuration Error:', error.message);
} else {
console.error('Error:', error);
}
process.exit(1);
}
}

24
libs/config/src/index.ts Normal file
View file

@ -0,0 +1,24 @@
/**
* @stock-bot/config
*
* Configuration management library for Stock Bot platform
*/
// Core configuration functionality
export * from './core';
export * from './types';
// Database configurations
export * from './database';
// Data provider configurations
export * from './data-providers';
// Risk management configurations
export * from './risk';
// Logging configurations
export * from './logging';
// Service-specific configurations
export * from './services';

View file

@ -0,0 +1,75 @@
/**
* Loki logging configuration for Stock Bot platform
*/
import { z } from 'zod';
import { getEnvVar, getNumericEnvVar, getBooleanEnvVar } from './core';
/**
* Loki configuration schema
*/
export const lokiConfigSchema = z.object({
host: z.string().default('localhost'),
port: z.number().default(3100),
username: z.string().optional(),
password: z.string().optional(),
retentionDays: z.number().default(30),
labels: z.record(z.string()).default({}),
batchSize: z.number().default(100),
flushIntervalMs: z.number().default(5000)
});
export type LokiConfig = z.infer<typeof lokiConfigSchema>;
/**
* Logging configuration schema
*/
export const loggingConfigSchema = z.object({
level: z.enum(['debug', 'info', 'warn', 'error']).default('info'),
console: z.boolean().default(true),
loki: lokiConfigSchema
});
export type LoggingConfig = z.infer<typeof loggingConfigSchema>;
/**
* Parse labels from environment variable string
* Format: key1=value1,key2=value2
*/
function parseLabels(labelsStr?: string): Record<string, string> {
if (!labelsStr) return {};
const labels: Record<string, string> = {};
labelsStr.split(',').forEach(labelPair => {
const [key, value] = labelPair.trim().split('=');
if (key && value) {
labels[key] = value;
}
});
return labels;
}
/**
* Load logging configuration from environment variables
*/
export function loadLoggingConfig(): LoggingConfig {
return {
level: (getEnvVar('LOG_LEVEL') || 'info') as 'debug' | 'info' | 'warn' | 'error',
console: getBooleanEnvVar('LOG_CONSOLE', true),
loki: {
host: getEnvVar('LOKI_HOST') || 'localhost',
port: getNumericEnvVar('LOKI_PORT', 3100),
username: getEnvVar('LOKI_USERNAME'),
password: getEnvVar('LOKI_PASSWORD'),
retentionDays: getNumericEnvVar('LOKI_RETENTION_DAYS', 30),
labels: parseLabels(getEnvVar('LOKI_LABELS')),
batchSize: getNumericEnvVar('LOKI_BATCH_SIZE', 100),
flushIntervalMs: getNumericEnvVar('LOKI_FLUSH_INTERVAL_MS', 5000)
}
};
}
/**
* Singleton logging configuration
*/
export const loggingConfig = loadLoggingConfig();

36
libs/config/src/risk.ts Normal file
View file

@ -0,0 +1,36 @@
/**
* Risk management configuration for trading operations
*/
import { getNumericEnvVar, validateConfig } from './core';
import { riskConfigSchema, RiskConfig } from './types';
/**
* Default risk configuration
*/
const defaultRiskConfig: RiskConfig = {
maxDrawdown: 0.05,
maxPositionSize: 0.1,
maxLeverage: 1,
stopLossDefault: 0.02,
takeProfitDefault: 0.05
};
/**
* Load risk configuration from environment variables
*/
export function loadRiskConfig(): RiskConfig {
const config: RiskConfig = {
maxDrawdown: getNumericEnvVar('RISK_MAX_DRAWDOWN', defaultRiskConfig.maxDrawdown),
maxPositionSize: getNumericEnvVar('RISK_MAX_POSITION_SIZE', defaultRiskConfig.maxPositionSize),
maxLeverage: getNumericEnvVar('RISK_MAX_LEVERAGE', defaultRiskConfig.maxLeverage),
stopLossDefault: getNumericEnvVar('RISK_STOP_LOSS_DEFAULT', defaultRiskConfig.stopLossDefault),
takeProfitDefault: getNumericEnvVar('RISK_TAKE_PROFIT_DEFAULT', defaultRiskConfig.takeProfitDefault)
};
return validateConfig(config, riskConfigSchema);
}
/**
* Singleton risk configuration
*/
export const riskConfig = loadRiskConfig();

View file

@ -0,0 +1,5 @@
/**
* Export all service-specific configurations
*/
export * from './market-data-gateway';
export * from './risk-guardian';

View file

@ -0,0 +1,106 @@
/**
* Market Data Gateway service configuration
*/
import { z } from 'zod';
import { getEnvVar, getNumericEnvVar, getBooleanEnvVar, createConfigLoader } from './core';
import { Environment, BaseConfig } from './types';
import { getEnvironment } from './core';
/**
* Market Data Gateway specific configuration schema
*/
export const marketDataGatewayConfigSchema = z.object({
environment: z.nativeEnum(Environment),
logLevel: z.enum(['debug', 'info', 'warn', 'error']).default('info'),
service: z.object({
name: z.string().default('market-data-gateway'),
version: z.string().default('1.0.0'),
port: z.number().default(4000)
}),
websocket: z.object({
enabled: z.boolean().default(true),
path: z.string().default('/ws/market-data'),
heartbeatInterval: z.number().default(30000)
}),
throttling: z.object({
maxRequestsPerMinute: z.number().default(300),
maxConnectionsPerIP: z.number().default(5)
}),
caching: z.object({
enabled: z.boolean().default(true),
ttlSeconds: z.number().default(60)
})
});
/**
* Market Data Gateway configuration type
*/
export type MarketDataGatewayConfig = z.infer<typeof marketDataGatewayConfigSchema>;
/**
* Default Market Data Gateway configuration
*/
const defaultConfig: Partial<MarketDataGatewayConfig> = {
environment: getEnvironment(),
logLevel: 'info',
service: {
name: 'market-data-gateway',
version: '1.0.0',
port: 4000
},
websocket: {
enabled: true,
path: '/ws/market-data',
heartbeatInterval: 30000 // 30 seconds
},
throttling: {
maxRequestsPerMinute: 300,
maxConnectionsPerIP: 5
},
caching: {
enabled: true,
ttlSeconds: 60
}
};
/**
* Load Market Data Gateway configuration
*/
export function loadMarketDataGatewayConfig(): MarketDataGatewayConfig {
return {
environment: getEnvironment(),
logLevel: (getEnvVar('LOG_LEVEL') || defaultConfig.logLevel) as 'debug' | 'info' | 'warn' | 'error',
service: {
name: getEnvVar('SERVICE_NAME') || defaultConfig.service!.name,
version: getEnvVar('SERVICE_VERSION') || defaultConfig.service!.version,
port: getNumericEnvVar('SERVICE_PORT', defaultConfig.service!.port)
},
websocket: {
enabled: getBooleanEnvVar('WEBSOCKET_ENABLED', defaultConfig.websocket!.enabled),
path: getEnvVar('WEBSOCKET_PATH') || defaultConfig.websocket!.path,
heartbeatInterval: getNumericEnvVar('WEBSOCKET_HEARTBEAT_INTERVAL', defaultConfig.websocket!.heartbeatInterval)
},
throttling: {
maxRequestsPerMinute: getNumericEnvVar('THROTTLING_MAX_REQUESTS', defaultConfig.throttling!.maxRequestsPerMinute),
maxConnectionsPerIP: getNumericEnvVar('THROTTLING_MAX_CONNECTIONS', defaultConfig.throttling!.maxConnectionsPerIP)
},
caching: {
enabled: getBooleanEnvVar('CACHING_ENABLED', defaultConfig.caching!.enabled),
ttlSeconds: getNumericEnvVar('CACHING_TTL_SECONDS', defaultConfig.caching!.ttlSeconds)
}
};
}
/**
* Creates a dynamic configuration loader for the Market Data Gateway
*/
export const createMarketDataGatewayConfig = createConfigLoader<MarketDataGatewayConfig>(
'market-data-gateway',
marketDataGatewayConfigSchema,
defaultConfig
);
/**
* Singleton Market Data Gateway configuration
*/
export const marketDataGatewayConfig = loadMarketDataGatewayConfig();

View file

@ -0,0 +1,112 @@
/**
* Risk Guardian service configuration
*/
import { z } from 'zod';
import { getEnvVar, getNumericEnvVar, getBooleanEnvVar, createConfigLoader } from '../core';
import { Environment, BaseConfig } from '../types';
import { getEnvironment } from '../core';
/**
* Risk Guardian specific configuration schema
*/
export const riskGuardianConfigSchema = z.object({
environment: z.nativeEnum(Environment),
logLevel: z.enum(['debug', 'info', 'warn', 'error']).default('info'),
service: z.object({
name: z.string().default('risk-guardian'),
version: z.string().default('1.0.0'),
port: z.number().default(4001)
}),
riskChecks: z.object({
preTradeValidation: z.boolean().default(true),
portfolioValidation: z.boolean().default(true),
leverageValidation: z.boolean().default(true),
concentrationValidation: z.boolean().default(true)
}),
alerting: z.object({
enabled: z.boolean().default(true),
criticalThreshold: z.number().default(0.8),
warningThreshold: z.number().default(0.6)
}),
watchdog: z.object({
enabled: z.boolean().default(true),
checkIntervalSeconds: z.number().default(60)
})
});
/**
* Risk Guardian configuration type
*/
export type RiskGuardianConfig = z.infer<typeof riskGuardianConfigSchema>;
/**
* Default Risk Guardian configuration
*/
const defaultConfig: Partial<RiskGuardianConfig> = {
environment: getEnvironment(),
logLevel: 'info',
service: {
name: 'risk-guardian',
version: '1.0.0',
port: 4001
},
riskChecks: {
preTradeValidation: true,
portfolioValidation: true,
leverageValidation: true,
concentrationValidation: true
},
alerting: {
enabled: true,
criticalThreshold: 0.8,
warningThreshold: 0.6
},
watchdog: {
enabled: true,
checkIntervalSeconds: 60
}
};
/**
* Load Risk Guardian configuration
*/
export function loadRiskGuardianConfig(): RiskGuardianConfig {
return {
environment: getEnvironment(),
logLevel: (getEnvVar('LOG_LEVEL') || defaultConfig.logLevel) as 'debug' | 'info' | 'warn' | 'error',
service: {
name: getEnvVar('SERVICE_NAME') || defaultConfig.service!.name,
version: getEnvVar('SERVICE_VERSION') || defaultConfig.service!.version,
port: getNumericEnvVar('SERVICE_PORT', defaultConfig.service!.port)
},
riskChecks: {
preTradeValidation: getBooleanEnvVar('RISK_CHECKS_PRE_TRADE', defaultConfig.riskChecks!.preTradeValidation),
portfolioValidation: getBooleanEnvVar('RISK_CHECKS_PORTFOLIO', defaultConfig.riskChecks!.portfolioValidation),
leverageValidation: getBooleanEnvVar('RISK_CHECKS_LEVERAGE', defaultConfig.riskChecks!.leverageValidation),
concentrationValidation: getBooleanEnvVar('RISK_CHECKS_CONCENTRATION', defaultConfig.riskChecks!.concentrationValidation)
},
alerting: {
enabled: getBooleanEnvVar('ALERTING_ENABLED', defaultConfig.alerting!.enabled),
criticalThreshold: getNumericEnvVar('ALERTING_CRITICAL_THRESHOLD', defaultConfig.alerting!.criticalThreshold),
warningThreshold: getNumericEnvVar('ALERTING_WARNING_THRESHOLD', defaultConfig.alerting!.warningThreshold)
},
watchdog: {
enabled: getBooleanEnvVar('WATCHDOG_ENABLED', defaultConfig.watchdog!.enabled),
checkIntervalSeconds: getNumericEnvVar('WATCHDOG_CHECK_INTERVAL', defaultConfig.watchdog!.checkIntervalSeconds)
}
};
}
/**
* Creates a dynamic configuration loader for the Risk Guardian
*/
export const createRiskGuardianConfig = createConfigLoader<RiskGuardianConfig>(
'risk-guardian',
riskGuardianConfigSchema,
defaultConfig
);
/**
* Singleton Risk Guardian configuration
*/
export const riskGuardianConfig = loadRiskGuardianConfig();

87
libs/config/src/types.ts Normal file
View file

@ -0,0 +1,87 @@
/**
* Configuration type definitions for the Stock Bot platform
*/
import { z } from 'zod';
/**
* Environment enum for different deployment environments
*/
export enum Environment {
Development = 'development',
Testing = 'testing',
Staging = 'staging',
Production = 'production'
}
/**
* Common configuration interface for all service configs
*/
export interface BaseConfig {
environment: Environment;
logLevel: 'debug' | 'info' | 'warn' | 'error';
service: {
name: string;
version: string;
port: number;
};
}
/**
* Database configuration schema
*/
export const databaseConfigSchema = z.object({
dragonfly: z.object({
host: z.string().default('localhost'),
port: z.number().default(6379),
password: z.string().optional(),
maxRetriesPerRequest: z.number().default(3)
}),
timescaleDB: z.object({
host: z.string().default('localhost'),
port: z.number().default(5432),
database: z.string().default('stockbot'),
user: z.string().default('postgres'),
password: z.string().optional()
})
});
/**
* Data provider configuration schema
*/
export const dataProviderSchema = z.object({
name: z.string(),
type: z.enum(['rest', 'websocket', 'file']),
baseUrl: z.string().url().optional(),
wsUrl: z.string().url().optional(),
apiKey: z.string().optional(),
apiSecret: z.string().optional(),
refreshInterval: z.number().optional(),
rateLimits: z.object({
maxRequestsPerMinute: z.number().optional(),
maxRequestsPerSecond: z.number().optional()
}).optional()
});
export const dataProvidersConfigSchema = z.object({
providers: z.array(dataProviderSchema),
defaultProvider: z.string()
});
/**
* Risk management configuration schema
*/
export const riskConfigSchema = z.object({
maxDrawdown: z.number().default(0.05),
maxPositionSize: z.number().default(0.1),
maxLeverage: z.number().default(1),
stopLossDefault: z.number().default(0.02),
takeProfitDefault: z.number().default(0.05)
});
/**
* Type definitions based on schemas
*/
export type DatabaseConfig = z.infer<typeof databaseConfigSchema>;
export type DataProviderConfig = z.infer<typeof dataProviderSchema>;
export type DataProvidersConfig = z.infer<typeof dataProvidersConfigSchema>;
export type RiskConfig = z.infer<typeof riskConfigSchema>;

10
libs/config/tsconfig.json Normal file
View file

@ -0,0 +1,10 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src",
"declaration": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "**/*.test.ts"]
}

187
libs/http-client/README.md Normal file
View file

@ -0,0 +1,187 @@
# @stock-bot/http-client
High-performance HTTP client for Stock Bot microservices built on Bun's native fetch API.
## Features
- **Ultra-fast performance** - Built on Bun's native fetch implementation
- **Connection pooling** - Efficiently manages connections to prevent overwhelming servers
- **Automatic retries** - Handles transient network errors with configurable retry strategies
- **Timeout management** - Prevents requests from hanging indefinitely
- **Streaming support** - Efficient handling of large responses
- **TypeScript support** - Full type safety for all operations
- **Metrics & monitoring** - Built-in performance statistics
## Installation
```bash
bun add @stock-bot/http-client
```
## Basic Usage
```typescript
import { BunHttpClient } from '@stock-bot/http-client';
// Create a client
const client = new BunHttpClient({
baseURL: 'https://api.example.com',
timeout: 5000,
retries: 3
});
// Make requests
async function fetchData() {
try {
// GET request
const response = await client.get('/users');
console.log(response.data);
// POST request with data
const createResponse = await client.post('/users', {
name: 'John Doe',
email: 'john@example.com'
});
console.log(createResponse.data);
} catch (error) {
console.error('Request failed:', error.message);
}
}
// Close when done
await client.close();
```
## Advanced Configuration
```typescript
const client = new BunHttpClient({
baseURL: 'https://api.example.com',
timeout: 10000,
retries: 3,
retryDelay: 1000,
maxConcurrency: 20,
keepAlive: true,
headers: {
'User-Agent': 'StockBot/1.0',
'Authorization': 'Bearer token'
},
validateStatus: (status) => status >= 200 && status < 300
});
```
## Connection Pooling
The HTTP client automatically manages connection pooling with smart limits:
```typescript
// Get connection statistics
const stats = client.getStats();
console.log(`Active connections: ${stats.activeConnections}`);
console.log(`Success rate: ${stats.successfulRequests / (stats.successfulRequests + stats.failedRequests)}`);
console.log(`Average response time: ${stats.averageResponseTime}ms`);
// Health check
const health = await client.healthCheck();
if (health.healthy) {
console.log('HTTP client is healthy');
} else {
console.log('HTTP client is degraded:', health.details);
}
```
## Event Handling
```typescript
// Listen for specific events
client.on('response', ({ host, response }) => {
console.log(`Response from ${host}: ${response.status}`);
});
client.on('error', ({ host, error }) => {
console.log(`Error from ${host}: ${error.message}`);
});
client.on('retryAttempt', (data) => {
console.log(`Retrying request (${data.attempt}/${data.config.retries}): ${data.error.message}`);
});
```
## API Reference
### BunHttpClient
Main HTTP client class with connection pooling and retry support.
#### Methods
- `request(config)`: Make a request with full configuration options
- `get(url, config?)`: Make a GET request
- `post(url, data?, config?)`: Make a POST request with data
- `put(url, data?, config?)`: Make a PUT request with data
- `patch(url, data?, config?)`: Make a PATCH request with data
- `delete(url, config?)`: Make a DELETE request
- `head(url, config?)`: Make a HEAD request
- `options(url, config?)`: Make an OPTIONS request
- `getStats()`: Get connection statistics
- `healthCheck()`: Check health of the client
- `close()`: Close all connections
- `setBaseURL(url)`: Update the base URL
- `setDefaultHeaders(headers)`: Update default headers
- `setTimeout(timeout)`: Update default timeout
- `create(config)`: Create a new instance with different config
### Request Configuration
```typescript
interface RequestConfig {
url: string;
method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH' | 'HEAD' | 'OPTIONS';
headers?: Record<string, string>;
body?: any;
timeout?: number;
retries?: number;
validateStatus?: (status: number) => boolean;
metadata?: Record<string, any>;
}
```
### Response Object
```typescript
interface HttpResponse<T = any> {
data: T;
status: number;
statusText: string;
headers: Record<string, string>;
config: RequestConfig;
timing: {
start: number;
end: number;
duration: number;
};
}
```
## Error Handling
```typescript
try {
const response = await client.get('/resource-that-might-fail');
processData(response.data);
} catch (error) {
if (error instanceof TimeoutError) {
console.log('Request timed out');
} else if (error instanceof RetryExhaustedError) {
console.log(`Request failed after ${error.config.retries} retries`);
} else if (error instanceof HttpClientError) {
console.log(`HTTP error: ${error.status} - ${error.message}`);
} else {
console.log('Unexpected error', error);
}
}
```
## License
MIT

View file

@ -0,0 +1,91 @@
// Example usage of the @stock-bot/http-client library
import { BunHttpClient } from '../src';
async function main() {
// Create a client instance
const client = new BunHttpClient({
baseURL: 'https://api.polygon.io',
timeout: 10000,
retries: 2,
retryDelay: 500,
headers: {
'X-API-Key': process.env.POLYGON_API_KEY || 'demo'
}
});
// Add event listeners for monitoring
client.on('response', ({host, response}) => {
console.log(`📦 Response from ${host}: ${response.status} (${response.timing.duration.toFixed(2)}ms)`);
});
client.on('error', ({host, error}) => {
console.error(`❌ Error from ${host}: ${error.message}`);
});
client.on('retryAttempt', ({attempt, config, delay}) => {
console.warn(`⚠️ Retry ${attempt}/${config.retries} for ${config.url} in ${delay}ms`);
});
try {
console.log('Fetching market data...');
// Make a GET request
const tickerResponse = await client.get('/v3/reference/tickers', {
headers: {
'Accept': 'application/json'
}
});
console.log(`Found ${tickerResponse.data.results.length} tickers`);
console.log(`First ticker: ${JSON.stringify(tickerResponse.data.results[0], null, 2)}`);
// Make a request that will fail
try {
await client.get('/non-existent-endpoint');
} catch (error) {
console.log('Expected error caught:', error.message);
}
// Multiple parallel requests
console.log('Making parallel requests...');
const [aaplData, msftData, amznData] = await Promise.all([
client.get('/v2/aggs/ticker/AAPL/range/1/day/2023-01-01/2023-01-15'),
client.get('/v2/aggs/ticker/MSFT/range/1/day/2023-01-01/2023-01-15'),
client.get('/v2/aggs/ticker/AMZN/range/1/day/2023-01-01/2023-01-15')
]);
console.log('Parallel requests completed:');
console.log(`- AAPL: ${aaplData.status}, data points: ${aaplData.data.results?.length || 0}`);
console.log(`- MSFT: ${msftData.status}, data points: ${msftData.data.results?.length || 0}`);
console.log(`- AMZN: ${amznData.status}, data points: ${amznData.data.results?.length || 0}`);
// Get client statistics
const stats = client.getStats();
console.log('\nClient Stats:');
console.log(`- Active connections: ${stats.activeConnections}`);
console.log(`- Total connections: ${stats.totalConnections}`);
console.log(`- Successful requests: ${stats.successfulRequests}`);
console.log(`- Failed requests: ${stats.failedRequests}`);
console.log(`- Average response time: ${stats.averageResponseTime.toFixed(2)}ms`);
console.log(`- Requests per second: ${stats.requestsPerSecond.toFixed(2)}`);
// Health check
const health = await client.healthCheck();
console.log(`\nClient health: ${health.healthy ? 'HEALTHY' : 'DEGRADED'}`);
console.log(`Health details: ${JSON.stringify(health.details, null, 2)}`);
} catch (error) {
console.error('Error in example:', error);
} finally {
// Always close the client when done to clean up resources
await client.close();
console.log('HTTP client closed');
}
}
// Run the example
main().catch(err => {
console.error('Example failed:', err);
process.exit(1);
});

View file

@ -0,0 +1,34 @@
{
"name": "@stock-bot/http-client",
"version": "1.0.0",
"description": "High-performance HTTP client for Stock Bot using Bun's native fetch",
"main": "src/index.ts",
"type": "module",
"scripts": {
"build": "tsc",
"test": "bun test",
"lint": "eslint src/**/*.ts",
"type-check": "tsc --noEmit"
},
"dependencies": {
"eventemitter3": "^5.0.1"
},
"devDependencies": {
"@types/node": "^20.11.0",
"typescript": "^5.3.0",
"eslint": "^8.56.0",
"@typescript-eslint/eslint-plugin": "^6.19.0",
"@typescript-eslint/parser": "^6.19.0",
"bun-types": "^1.2.15"
},
"keywords": [
"http-client",
"fetch",
"bun",
"performance",
"connection-pooling"
],
"exports": {
".": "./src/index.ts"
}
}

View file

@ -0,0 +1,182 @@
import { describe, test, expect, mock, beforeEach, afterEach } from "bun:test";
import { BunHttpClient, HttpClientError, TimeoutError } from "../src";
// Mock GlobalFetch to avoid making real network requests
const mockFetchSuccess = mock(() =>
Promise.resolve(new Response(
JSON.stringify({ result: "success" }),
{ status: 200, headers: { "content-type": "application/json" } }
))
);
const mockFetchFailure = mock(() =>
Promise.resolve(new Response(
JSON.stringify({ error: "Not found" }),
{ status: 404, headers: { "content-type": "application/json" } }
))
);
const mockFetchTimeout = mock(() => {
return new Promise((_, reject) => {
setTimeout(() => {
const error = new Error("Timeout");
error.name = "AbortError";
reject(error);
}, 10);
});
});
describe("BunHttpClient", () => {
let client: BunHttpClient;
const originalFetch = global.fetch;
beforeEach(() => {
// Create a fresh client for each test
client = new BunHttpClient({
baseURL: "https://api.example.com",
timeout: 1000,
retries: 1
});
});
afterEach(async () => {
// Cleanup after each test
await client.close();
global.fetch = originalFetch;
});
test("should make successful GET requests", async () => {
global.fetch = mockFetchSuccess;
const response = await client.get("/users");
expect(response.status).toBe(200);
expect(response.data).toEqual({ result: "success" });
expect(mockFetchSuccess).toHaveBeenCalledTimes(1);
});
test("should handle failed requests", async () => {
global.fetch = mockFetchFailure;
try {
await client.get("/missing");
expect("Should have thrown").toBe("But didn't");
} catch (error) {
expect(error).toBeInstanceOf(HttpClientError);
expect(error.status).toBe(404);
}
expect(mockFetchFailure).toHaveBeenCalledTimes(1);
});
test("should handle request timeouts", async () => {
global.fetch = mockFetchTimeout;
try {
await client.get("/slow");
expect("Should have thrown").toBe("But didn't");
} catch (error) {
expect(error).toBeInstanceOf(TimeoutError);
}
});
test("should build full URLs properly", async () => {
global.fetch = mockFetchSuccess;
await client.get("/users/123");
expect(mockFetchSuccess).toHaveBeenCalledWith(
"https://api.example.com/users/123",
expect.objectContaining({
method: "GET"
})
);
});
test("should make POST requests with body", async () => {
global.fetch = mockFetchSuccess;
const data = { name: "John", email: "john@example.com" };
await client.post("/users", data);
expect(mockFetchSuccess).toHaveBeenCalledWith(
"https://api.example.com/users",
expect.objectContaining({
method: "POST",
body: JSON.stringify(data)
})
);
});
test("should provide convenience methods for all HTTP verbs", async () => {
global.fetch = mockFetchSuccess;
await client.get("/users");
await client.post("/users", { name: "Test" });
await client.put("/users/1", { name: "Updated" });
await client.patch("/users/1", { status: "active" });
await client.delete("/users/1");
await client.head("/users");
await client.options("/users");
expect(mockFetchSuccess).toHaveBeenCalledTimes(7);
});
test("should merge config options correctly", async () => {
global.fetch = mockFetchSuccess;
await client.get("/users", {
headers: { "X-Custom": "Value" },
timeout: 5000
});
expect(mockFetchSuccess).toHaveBeenCalledWith(
"https://api.example.com/users",
expect.objectContaining({
headers: expect.objectContaining({
"X-Custom": "Value"
})
})
);
});
test("should handle absolute URLs", async () => {
global.fetch = mockFetchSuccess;
await client.get("https://other-api.com/endpoint");
expect(mockFetchSuccess).toHaveBeenCalledWith(
"https://other-api.com/endpoint",
expect.anything()
);
});
test("should update configuration", async () => {
global.fetch = mockFetchSuccess;
client.setBaseURL("https://new-api.com");
client.setDefaultHeaders({ "Authorization": "Bearer token" });
client.setTimeout(2000);
await client.get("/resource");
expect(mockFetchSuccess).toHaveBeenCalledWith(
"https://new-api.com/resource",
expect.objectContaining({
headers: expect.objectContaining({
"Authorization": "Bearer token"
})
})
);
});
test("should get connection stats", async () => {
global.fetch = mockFetchSuccess;
await client.get("/users");
const stats = client.getStats();
expect(stats).toHaveProperty("successfulRequests", 1);
expect(stats).toHaveProperty("activeConnections");
expect(stats).toHaveProperty("averageResponseTime");
});
});

View file

@ -0,0 +1,199 @@
import { EventEmitter } from 'eventemitter3';
import type {
HttpClientConfig,
RequestConfig,
HttpResponse,
ConnectionStats,
HttpClientError,
TimeoutError
} from './types';
import { ConnectionPool } from './ConnectionPool';
import { RetryHandler } from './RetryHandler';
export class BunHttpClient extends EventEmitter {
private connectionPool: ConnectionPool;
private retryHandler: RetryHandler;
private defaultConfig: Required<HttpClientConfig>;
constructor(config: HttpClientConfig = {}) {
super();
this.defaultConfig = {
baseURL: '',
timeout: 30000,
headers: {},
retries: 3,
retryDelay: 1000,
maxConcurrency: 10,
keepAlive: true,
validateStatus: (status: number) => status < 400,
...config
};
this.connectionPool = new ConnectionPool({
maxConnections: this.defaultConfig.maxConcurrency,
maxConnectionsPerHost: Math.ceil(this.defaultConfig.maxConcurrency / 4),
keepAlive: this.defaultConfig.keepAlive,
maxIdleTime: 60000,
connectionTimeout: this.defaultConfig.timeout
});
this.retryHandler = new RetryHandler({
maxRetries: this.defaultConfig.retries,
baseDelay: this.defaultConfig.retryDelay,
maxDelay: 30000,
exponentialBackoff: true
});
// Forward events from connection pool and retry handler
this.connectionPool.on('response', (data) => this.emit('response', data));
this.connectionPool.on('error', (data) => this.emit('error', data));
this.retryHandler.on('retryAttempt', (data) => this.emit('retryAttempt', data));
this.retryHandler.on('retrySuccess', (data) => this.emit('retrySuccess', data));
this.retryHandler.on('retryExhausted', (data) => this.emit('retryExhausted', data));
}
async request<T = any>(config: RequestConfig): Promise<HttpResponse<T>> {
const fullConfig = this.mergeConfig(config);
return this.retryHandler.execute(async () => {
const startTime = performance.now();
try {
// Add timing metadata
fullConfig.metadata = {
...fullConfig.metadata,
startTime
};
const response = await this.connectionPool.request(fullConfig);
return response as HttpResponse<T>;
} catch (error: any) {
// Convert fetch errors to our error types
if (error.name === 'AbortError') {
throw new TimeoutError(fullConfig, fullConfig.timeout || this.defaultConfig.timeout);
}
// Re-throw as HttpClientError if not already
if (!(error instanceof HttpClientError)) {
const httpError = new HttpClientError(
error.message || 'Request failed',
error.code,
error.status,
error.response,
fullConfig
);
throw httpError;
}
throw error;
}
}, fullConfig);
}
// Convenience methods
async get<T = any>(url: string, config?: Partial<RequestConfig>): Promise<HttpResponse<T>> {
return this.request<T>({ ...config, url, method: 'GET' });
}
async post<T = any>(url: string, data?: any, config?: Partial<RequestConfig>): Promise<HttpResponse<T>> {
return this.request<T>({ ...config, url, method: 'POST', body: data });
}
async put<T = any>(url: string, data?: any, config?: Partial<RequestConfig>): Promise<HttpResponse<T>> {
return this.request<T>({ ...config, url, method: 'PUT', body: data });
}
async patch<T = any>(url: string, data?: any, config?: Partial<RequestConfig>): Promise<HttpResponse<T>> {
return this.request<T>({ ...config, url, method: 'PATCH', body: data });
}
async delete<T = any>(url: string, config?: Partial<RequestConfig>): Promise<HttpResponse<T>> {
return this.request<T>({ ...config, url, method: 'DELETE' });
}
async head<T = any>(url: string, config?: Partial<RequestConfig>): Promise<HttpResponse<T>> {
return this.request<T>({ ...config, url, method: 'HEAD' });
}
async options<T = any>(url: string, config?: Partial<RequestConfig>): Promise<HttpResponse<T>> {
return this.request<T>({ ...config, url, method: 'OPTIONS' });
}
private mergeConfig(config: RequestConfig): RequestConfig {
return {
timeout: this.defaultConfig.timeout,
retries: this.defaultConfig.retries,
headers: { ...this.defaultConfig.headers, ...config.headers },
validateStatus: this.defaultConfig.validateStatus,
url: this.buildUrl(config.url),
...config
};
}
private buildUrl(url: string): string {
if (url.startsWith('http://') || url.startsWith('https://')) {
return url;
}
if (this.defaultConfig.baseURL) {
const baseURL = this.defaultConfig.baseURL.replace(/\/$/, '');
const path = url.replace(/^\//, '');
return `${baseURL}/${path}`;
}
return url;
}
// Configuration methods
setBaseURL(baseURL: string): void {
this.defaultConfig.baseURL = baseURL;
}
setDefaultHeaders(headers: Record<string, string>): void {
this.defaultConfig.headers = { ...this.defaultConfig.headers, ...headers };
}
setTimeout(timeout: number): void {
this.defaultConfig.timeout = timeout;
}
setMaxConcurrency(maxConcurrency: number): void {
this.defaultConfig.maxConcurrency = maxConcurrency;
}
// Statistics and monitoring
getStats(): ConnectionStats {
return this.connectionPool.getStats();
}
async healthCheck(): Promise<{ healthy: boolean; details: any }> {
return this.connectionPool.healthCheck();
}
// Lifecycle management
async close(): Promise<void> {
await this.connectionPool.close();
this.removeAllListeners();
}
// Create a new instance with different configuration
create(config: HttpClientConfig): BunHttpClient {
const mergedConfig = { ...this.defaultConfig, ...config };
return new BunHttpClient(mergedConfig);
}
// Interceptor-like functionality through events
onRequest(handler: (config: RequestConfig) => RequestConfig | Promise<RequestConfig>): void {
this.on('beforeRequest', handler);
}
onResponse(handler: (response: HttpResponse) => HttpResponse | Promise<HttpResponse>): void {
this.on('afterResponse', handler);
}
onError(handler: (error: any) => void): void {
this.on('requestError', handler);
}
}

View file

@ -0,0 +1,331 @@
import { EventEmitter } from 'eventemitter3';
import type {
ConnectionPoolConfig,
ConnectionStats,
QueuedRequest,
RequestConfig
} from './types';
export class ConnectionPool extends EventEmitter {
private activeConnections = new Map<string, number>();
private requestQueue: QueuedRequest[] = [];
private stats = {
totalConnections: 0,
activeRequests: 0,
successfulRequests: 0,
failedRequests: 0,
totalResponseTime: 0,
requestCount: 0,
startTime: Date.now()
};
private isProcessingQueue = false;
private queueProcessor?: NodeJS.Timeout;
constructor(private config: ConnectionPoolConfig) {
super();
this.startQueueProcessor();
}
async request(requestConfig: RequestConfig): Promise<any> {
return new Promise((resolve, reject) => {
const host = this.extractHost(requestConfig.url);
const queuedRequest: QueuedRequest = {
id: this.generateRequestId(),
config: requestConfig,
resolve,
reject,
timestamp: Date.now(),
retryCount: 0,
host
};
this.requestQueue.push(queuedRequest);
this.processQueue();
});
}
private async processQueue(): Promise<void> {
if (this.isProcessingQueue || this.requestQueue.length === 0) {
return;
}
this.isProcessingQueue = true;
while (this.requestQueue.length > 0) {
const request = this.requestQueue.shift()!;
try {
const currentConnections = this.activeConnections.get(request.host) || 0;
// Check per-host connection limits
if (currentConnections >= this.config.maxConnectionsPerHost) {
this.requestQueue.unshift(request);
break;
}
// Check global connection limit
const totalActive = Array.from(this.activeConnections.values())
.reduce((sum, count) => sum + count, 0);
if (totalActive >= this.config.maxConnections) {
this.requestQueue.unshift(request);
break;
}
// Execute the request
this.executeRequest(request);
} catch (error) {
request.reject(error);
}
}
this.isProcessingQueue = false;
}
private async executeRequest(request: QueuedRequest): Promise<void> {
const { host, config } = request;
// Increment active connections
this.activeConnections.set(host, (this.activeConnections.get(host) || 0) + 1);
this.stats.activeRequests++;
const startTime = performance.now();
try {
// Build the full URL
const url = this.buildUrl(config.url, config);
// Create abort controller for timeout
const controller = new AbortController();
const timeoutId = config.timeout ? setTimeout(() => {
controller.abort();
}, config.timeout) : undefined;
// Make the fetch request
const response = await fetch(url, {
method: config.method || 'GET',
headers: this.buildHeaders(config.headers),
body: this.buildBody(config.body),
signal: controller.signal,
// Bun-specific optimizations
keepalive: this.config.keepAlive,
});
if (timeoutId) {
clearTimeout(timeoutId);
}
// Check if response is considered successful
const isSuccess = config.validateStatus
? config.validateStatus(response.status)
: response.status < 400;
if (!isSuccess) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
// Parse response data
const data = await this.parseResponse(response);
const endTime = performance.now();
const duration = endTime - startTime;
// Update stats
this.updateStats(true, duration);
// Build response object
const httpResponse = {
data,
status: response.status,
statusText: response.statusText,
headers: this.parseHeaders(response.headers),
config,
timing: {
start: startTime,
end: endTime,
duration
}
};
this.emit('response', { host, response: httpResponse });
request.resolve(httpResponse);
} catch (error: any) {
const endTime = performance.now();
const duration = endTime - startTime;
this.updateStats(false, duration);
this.emit('error', { host, error, config });
request.reject(error);
} finally {
// Decrement active connections
this.activeConnections.set(host, Math.max(0, (this.activeConnections.get(host) || 0) - 1));
this.stats.activeRequests = Math.max(0, this.stats.activeRequests - 1);
}
}
private buildUrl(url: string, config: RequestConfig): string {
// If URL is already absolute, return as-is
if (url.startsWith('http://') || url.startsWith('https://')) {
return url;
}
// If no base URL in config, assume it's a relative URL that needs a protocol
if (!url.startsWith('/')) {
url = '/' + url;
}
return url;
}
private buildHeaders(headers?: Record<string, string>): HeadersInit {
return {
'User-Agent': 'StockBot-HttpClient/1.0',
'Accept': 'application/json',
'Content-Type': 'application/json',
...headers
};
}
private buildBody(body: any): BodyInit | undefined {
if (!body) return undefined;
if (typeof body === 'string') return body;
if (body instanceof FormData || body instanceof Blob) return body;
if (body instanceof ArrayBuffer || body instanceof Uint8Array) return body;
return JSON.stringify(body);
}
private async parseResponse(response: Response): Promise<any> {
const contentType = response.headers.get('content-type') || '';
if (contentType.includes('application/json')) {
return await response.json();
}
if (contentType.includes('text/')) {
return await response.text();
}
return await response.arrayBuffer();
}
private parseHeaders(headers: Headers): Record<string, string> {
const result: Record<string, string> = {};
headers.forEach((value, key) => {
result[key] = value;
});
return result;
}
private extractHost(url: string): string {
try {
if (url.startsWith('http://') || url.startsWith('https://')) {
const urlObj = new URL(url);
return urlObj.host;
}
return 'default';
} catch {
return 'default';
}
}
private generateRequestId(): string {
return `req_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
}
private updateStats(success: boolean, responseTime: number): void {
this.stats.requestCount++;
this.stats.totalResponseTime += responseTime;
if (success) {
this.stats.successfulRequests++;
} else {
this.stats.failedRequests++;
}
}
private startQueueProcessor(): void {
this.queueProcessor = setInterval(() => {
if (this.requestQueue.length > 0) {
this.processQueue();
}
}, 10); // Process queue every 10ms for better responsiveness
}
getStats(): ConnectionStats {
const totalActive = Array.from(this.activeConnections.values())
.reduce((sum, count) => sum + count, 0);
const averageResponseTime = this.stats.requestCount > 0
? this.stats.totalResponseTime / this.stats.requestCount
: 0;
const utilization = this.config.maxConnections > 0
? totalActive / this.config.maxConnections
: 0;
const elapsedTimeSeconds = (Date.now() - this.stats.startTime) / 1000;
const requestsPerSecond = elapsedTimeSeconds > 0
? this.stats.requestCount / elapsedTimeSeconds
: 0;
return {
activeConnections: totalActive,
totalConnections: this.stats.totalConnections,
successfulRequests: this.stats.successfulRequests,
failedRequests: this.stats.failedRequests,
averageResponseTime,
connectionPoolUtilization: utilization,
requestsPerSecond
};
}
async close(): Promise<void> {
// Stop queue processor
if (this.queueProcessor) {
clearInterval(this.queueProcessor);
this.queueProcessor = undefined;
}
// Wait for pending requests to complete (with timeout)
const timeout = 30000; // 30 seconds
const startTime = Date.now();
while (this.requestQueue.length > 0 && Date.now() - startTime < timeout) {
await new Promise(resolve => setTimeout(resolve, 100));
}
// Reject remaining requests
while (this.requestQueue.length > 0) {
const request = this.requestQueue.shift()!;
request.reject(new Error('Connection pool closing'));
}
// Clear connections
this.activeConnections.clear();
this.removeAllListeners();
this.emit('closed');
}
async healthCheck(): Promise<{ healthy: boolean; details: any }> {
const stats = this.getStats();
const queueSize = this.requestQueue.length;
const healthy =
stats.connectionPoolUtilization < 0.9 && // Less than 90% utilization
queueSize < 100 && // Queue not too large
stats.averageResponseTime < 5000; // Average response time under 5 seconds
return {
healthy,
details: {
stats,
queueSize,
activeHosts: Array.from(this.activeConnections.keys()),
config: this.config
},
};
}
}

View file

@ -0,0 +1,131 @@
import { EventEmitter } from 'eventemitter3';
import type {
RetryConfig,
RequestConfig,
HttpResponse,
HttpClientError,
TimeoutError,
RetryExhaustedError
} from './types';
export class RetryHandler extends EventEmitter {
private config: Required<RetryConfig>;
constructor(config: Partial<RetryConfig> = {}) {
super();
this.config = {
maxRetries: 3,
baseDelay: 1000,
maxDelay: 30000,
exponentialBackoff: true,
retryCondition: this.defaultRetryCondition,
...config
};
}
async execute<T>(
operation: () => Promise<HttpResponse<T>>,
requestConfig: RequestConfig
): Promise<HttpResponse<T>> {
let lastError: any;
let attempt = 0;
while (attempt <= this.config.maxRetries) {
try {
const result = await operation();
if (attempt > 0) {
this.emit('retrySuccess', {
requestConfig,
attempt,
result
});
}
return result;
} catch (error) {
lastError = error;
attempt++;
// Check if we should retry
if (
attempt > this.config.maxRetries ||
!this.config.retryCondition(error)
) {
break;
}
// Calculate delay
const delay = this.calculateDelay(attempt);
this.emit('retryAttempt', {
requestConfig,
attempt,
error,
delay
});
// Wait before retry
await this.delay(delay);
}
}
// All retries exhausted
const finalError = new RetryExhaustedError(requestConfig, attempt, lastError);
this.emit('retryExhausted', {
requestConfig,
attempts: attempt,
finalError
});
throw finalError;
}
private calculateDelay(attempt: number): number {
if (!this.config.exponentialBackoff) {
return this.config.baseDelay;
}
// Exponential backoff with jitter
const exponentialDelay = this.config.baseDelay * Math.pow(2, attempt - 1);
const jitter = Math.random() * 0.1 * exponentialDelay; // 10% jitter
const totalDelay = Math.min(exponentialDelay + jitter, this.config.maxDelay);
return Math.floor(totalDelay);
}
private delay(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
private defaultRetryCondition(error: any): boolean {
// Network errors
if (error.code === 'ECONNRESET' ||
error.code === 'ECONNREFUSED' ||
error.code === 'ETIMEDOUT' ||
error.code === 'ENOTFOUND') {
return true;
}
// HTTP status codes that should be retried
const retryableStatuses = [408, 429, 500, 502, 503, 504];
if (error.status && retryableStatuses.includes(error.status)) {
return true;
}
// Timeout errors
if (error instanceof TimeoutError) {
return true;
}
return false;
}
updateConfig(config: Partial<RetryConfig>): void {
this.config = { ...this.config, ...config };
}
getConfig(): RetryConfig {
return { ...this.config };
}
}

View file

@ -0,0 +1,25 @@
// Main exports
export { BunHttpClient } from './BunHttpClient';
export { ConnectionPool } from './ConnectionPool';
export { RetryHandler } from './RetryHandler';
// Type exports
export type {
HttpClientConfig,
RequestConfig,
HttpResponse,
ConnectionPoolConfig,
ConnectionStats,
QueuedRequest,
RetryConfig
} from './types';
// Error exports
export {
HttpClientError,
TimeoutError,
RetryExhaustedError
} from './types';
// Default export for convenience
export { BunHttpClient as default } from './BunHttpClient';

View file

@ -0,0 +1,104 @@
// Type definitions for the HTTP client
export interface HttpClientConfig {
baseURL?: string;
timeout?: number;
headers?: Record<string, string>;
retries?: number;
retryDelay?: number;
maxConcurrency?: number;
keepAlive?: boolean;
validateStatus?: (status: number) => boolean;
}
export interface RequestConfig {
url: string;
method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH' | 'HEAD' | 'OPTIONS';
headers?: Record<string, string>;
body?: any;
timeout?: number;
retries?: number;
metadata?: Record<string, any>;
validateStatus?: (status: number) => boolean;
}
export interface HttpResponse<T = any> {
data: T;
status: number;
statusText: string;
headers: Record<string, string>;
config: RequestConfig;
timing: {
start: number;
end: number;
duration: number;
};
}
export interface ConnectionPoolConfig {
maxConnections: number;
maxConnectionsPerHost: number;
keepAlive: boolean;
maxIdleTime: number;
connectionTimeout: number;
}
export interface ConnectionStats {
activeConnections: number;
totalConnections: number;
successfulRequests: number;
failedRequests: number;
averageResponseTime: number;
connectionPoolUtilization: number;
requestsPerSecond: number;
}
export interface QueuedRequest {
id: string;
config: RequestConfig;
resolve: (value: HttpResponse) => void;
reject: (error: any) => void;
timestamp: number;
retryCount: number;
host: string;
}
export interface RetryConfig {
maxRetries: number;
baseDelay: number;
maxDelay: number;
exponentialBackoff: boolean;
retryCondition?: (error: any) => boolean;
}
export class HttpClientError extends Error {
constructor(
message: string,
public code?: string,
public status?: number,
public response?: any,
public config?: RequestConfig
) {
super(message);
this.name = 'HttpClientError';
}
}
export class TimeoutError extends HttpClientError {
constructor(config: RequestConfig, timeout: number) {
super(`Request timeout after ${timeout}ms`, 'TIMEOUT', undefined, undefined, config);
this.name = 'TimeoutError';
}
}
export class RetryExhaustedError extends HttpClientError {
constructor(config: RequestConfig, attempts: number, lastError: any) {
super(
`Request failed after ${attempts} attempts: ${lastError.message}`,
'RETRY_EXHAUSTED',
lastError.status,
lastError.response,
config
);
this.name = 'RetryExhaustedError';
}
}

View file

@ -0,0 +1,12 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src",
"declaration": true,
"declarationMap": true,
"sourceMap": true
},
"include": ["src/**/*"],
"exclude": ["dist", "node_modules", "**/*.test.ts"]
}

View file

@ -9,9 +9,9 @@
"dev": "tsc --watch",
"clean": "rm -rf dist",
"test": "jest"
},
"dependencies": {
}, "dependencies": {
"@stock-bot/shared-types": "workspace:*",
"@stock-bot/config": "workspace:*",
"date-fns": "^2.30.0"
},
"devDependencies": {

View file

@ -1,3 +1,4 @@
export * from './dateUtils';
export * from './financialUtils';
export * from './logger';
export * from './lokiClient';

View file

@ -1,6 +1,20 @@
/**
* Logger utility with consistent formatting and log levels
* Supports console and Loki logging
*/
import { loggingConfig } from '@stock-bot/config';
import { LokiClient } from './lokiClient';
// Singleton Loki client
let lokiClient: LokiClient | null = null;
function getLokiClient(): LokiClient {
if (!lokiClient) {
lokiClient = new LokiClient(loggingConfig);
}
return lokiClient;
}
export class Logger {
constructor(private serviceName: string, private level: LogLevel = LogLevel.INFO) {}
@ -26,22 +40,51 @@ export class Logger {
const timestamp = new Date().toISOString();
const levelStr = LogLevel[level].padEnd(5);
const logMessage = `[${timestamp}] [${levelStr}] [${this.serviceName}] ${message}`;
const formattedArgs = args.length ? this.formatArgs(args) : '';
const fullMessage = `${message}${formattedArgs}`;
const logMessage = `[${timestamp}] [${levelStr}] [${this.serviceName}] ${fullMessage}`;
switch (level) {
case LogLevel.ERROR:
console.error(logMessage, ...args);
break;
case LogLevel.WARN:
console.warn(logMessage, ...args);
break;
case LogLevel.INFO:
console.info(logMessage, ...args);
break;
case LogLevel.DEBUG:
default:
console.debug(logMessage, ...args);
break;
// Console logging
if (loggingConfig.console) {
switch (level) {
case LogLevel.ERROR:
console.error(logMessage);
break;
case LogLevel.WARN:
console.warn(logMessage);
break;
case LogLevel.INFO:
console.info(logMessage);
break;
case LogLevel.DEBUG:
default:
console.debug(logMessage);
break;
}
}
// Loki logging
try {
const loki = getLokiClient();
loki.log(LogLevel[level].toLowerCase(), fullMessage, this.serviceName);
} catch (error) {
console.error('Failed to send log to Loki:', error);
}
}
private formatArgs(args: any[]): string {
try {
return args.map(arg => {
if (arg instanceof Error) {
return ` ${arg.message}\n${arg.stack}`;
} else if (typeof arg === 'object') {
return ` ${JSON.stringify(arg)}`;
} else {
return ` ${arg}`;
}
}).join('');
} catch (error) {
return ` [Error formatting log arguments: ${error}]`;
}
}

View file

@ -0,0 +1,86 @@
/**
* Loki client for sending logs to Grafana Loki
*/
import { LoggingConfig } from '@stock-bot/config';
export class LokiClient {
private batchQueue: any[] = [];
private flushInterval: NodeJS.Timeout;
private lokiUrl: string;
private authHeader?: string;
constructor(private config: LoggingConfig) {
const { host, port, username, password } = config.loki;
this.lokiUrl = `http://${host}:${port}/loki/api/v1/push`;
if (username && password) {
const authString = Buffer.from(`${username}:${password}`).toString('base64');
this.authHeader = `Basic ${authString}`;
}
this.flushInterval = setInterval(
() => this.flush(),
config.loki.flushIntervalMs
);
}
async log(level: string, message: string, serviceName: string, labels: Record<string, string> = {}) {
const timestamp = Date.now() * 1000000; // Loki expects nanoseconds
this.batchQueue.push({
streams: [{
stream: {
level,
service: serviceName,
...this.config.loki.labels,
...labels,
},
values: [[`${timestamp}`, message]],
}],
});
if (this.batchQueue.length >= this.config.loki.batchSize) {
await this.flush();
}
}
private async flush() {
if (this.batchQueue.length === 0) return;
try {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
};
if (this.authHeader) {
headers['Authorization'] = this.authHeader;
}
const response = await fetch(this.lokiUrl, {
method: 'POST',
headers,
body: JSON.stringify({
streams: this.batchQueue.flatMap(batch => batch.streams),
}),
});
if (!response.ok) {
console.error(`Failed to send logs to Loki: ${response.status} ${response.statusText}`);
const text = await response.text();
if (text) {
console.error(text);
}
}
} catch (error) {
console.error('Error sending logs to Loki:', error);
} finally {
this.batchQueue = [];
}
}
async destroy() {
clearInterval(this.flushInterval);
return this.flush();
}
}

View file

@ -0,0 +1,211 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 1,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "loki",
"uid": "loki"
},
"description": "Basic log viewer for Stock Bot services",
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": true,
"showTime": true,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"targets": [
{
"datasource": {
"type": "loki",
"uid": "loki"
},
"editorMode": "builder",
"expr": "{service=~\".+\"}",
"queryType": "range"
}
],
"title": "All Service Logs",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "loki"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unitScale": true
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 2,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "10.2.0",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "loki"
},
"editorMode": "builder",
"expr": "sum by(service) (count_over_time({level=\"error\"}[5m]))",
"legendFormat": "{{service}}",
"queryType": "range"
}
],
"title": "Error Count by Service",
"type": "timeseries"
},
{
"datasource": {
"type": "loki",
"uid": "loki"
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"id": 3,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": true,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"targets": [
{
"datasource": {
"type": "loki",
"uid": "loki"
},
"editorMode": "builder",
"expr": "{level=\"error\"}",
"queryType": "range"
}
],
"title": "Error Logs",
"type": "logs"
}
],
"refresh": "5s",
"schemaVersion": 38,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Stock Bot Logs",
"uid": "stock-bot-logs",
"version": 1,
"weekStart": ""
}

View file

@ -0,0 +1,11 @@
apiVersion: 1
providers:
- name: 'Stock Bot Dashboards'
orgId: 1
folder: 'Stock Bot'
type: file
disableDeletion: false
editable: true
options:
path: /etc/grafana/provisioning/dashboards/json

View file

@ -0,0 +1,10 @@
apiVersion: 1
datasources:
- name: Loki
type: loki
access: proxy
url: http://loki:3100
jsonData:
maxLines: 1000
isDefault: true

View file

@ -0,0 +1,41 @@
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
common:
path_prefix: /loki
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
replication_factor: 1
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
schema_config:
configs:
- from: 2023-01-01
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
ruler:
alertmanager_url: http://localhost:9093
# 30 days retention
limits_config:
retention_period: 720h
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100

View file

@ -2,7 +2,8 @@
"name": "stock-bot",
"private": true,
"version": "1.0.0",
"description": "Advanced trading bot with microservice architecture", "scripts": {
"description": "Advanced trading bot with microservice architecture",
"scripts": {
"dev": "turbo run dev",
"build": "turbo run build",
"build:libs": "pwsh ./scripts/build-libs.ps1",
@ -11,7 +12,6 @@
"clean": "turbo run clean",
"start": "turbo run start",
"backtest": "turbo run backtest",
"docker:start": "pwsh ./scripts/docker.ps1 start",
"docker:stop": "pwsh ./scripts/docker.ps1 stop",
"docker:restart": "pwsh ./scripts/docker.ps1 restart",
@ -20,20 +20,19 @@
"docker:reset": "pwsh ./scripts/docker.ps1 reset",
"docker:admin": "pwsh ./scripts/docker.ps1 admin",
"docker:monitoring": "pwsh ./scripts/docker.ps1 monitoring",
"infra:up": "docker-compose up -d dragonfly postgres questdb mongodb",
"infra:up": "docker-compose up -d dragonfly postgres questdb mongodb",
"infra:down": "docker-compose down",
"infra:reset": "docker-compose down -v && docker-compose up -d dragonfly postgres questdb mongodb",
"dev:full": "npm run infra:up && npm run docker:admin && turbo run dev",
"dev:clean": "npm run infra:reset && npm run dev:full"
}, "workspaces": [
},
"workspaces": [
"libs/*",
"packages/*",
"apps/*/*"
],
"devDependencies": {
"@types/node": "^20.12.12",
"turbo": "^2.0.5",
"turbo": "^2.5.4",
"typescript": "^5.4.5"
},
"packageManager": "bun@1.1.12",

View file

@ -68,7 +68,7 @@ switch ($Action) {
"logs" {
if ($Service) {
Write-Host "📋 Logs for $Service:" -ForegroundColor Cyan
Write-Host "📋 Logs for ${Service}:" -ForegroundColor Cyan
Invoke-Expression "docker-compose $ComposeFiles logs -f $Service"
} else {
Write-Host "📋 All service logs:" -ForegroundColor Cyan
@ -99,18 +99,17 @@ switch ($Action) {
Write-Host " PgAdmin: http://localhost:8080"
Write-Host " Email: admin@tradingbot.local"
Write-Host " Password: admin123"
}
"monitoring" {
} "monitoring" {
Write-Host "📊 Starting monitoring stack..." -ForegroundColor Green
Invoke-Expression "docker-compose $ComposeFiles --profile monitoring up -d"
Invoke-Expression "docker-compose $ComposeFiles up -d prometheus grafana loki"
Write-Host "✅ Monitoring started!" -ForegroundColor Green
Write-Host ""
Write-Host "🔗 Monitoring Access:" -ForegroundColor Cyan
Write-Host " Prometheus: http://localhost:9090"
Write-Host " Grafana: http://localhost:3000"
Write-Host " Username: admin"
Write-Host " Password: admin123"
Write-Host " Password: admin"
Write-Host " Loki: http://localhost:3100"
}
"help" {

View file

@ -0,0 +1,27 @@
@echo off
REM Check Loki status and accessibility
echo Checking Loki container status...
docker ps | findstr "loki"
echo.
echo Testing Loki API...
curl -s http://localhost:3100/ready
echo.
echo.
echo Testing Loki labels...
curl -s "http://localhost:3100/loki/api/v1/labels" | findstr /C:"service"
echo.
echo.
echo Checking Grafana...
curl -s http://localhost:3000/api/health | findstr /C:"database"
echo.
echo.
echo To view logs in Grafana:
echo 1. Open http://localhost:3000 in your browser
echo 2. Login with admin/admin
echo 3. Go to Explore and select Loki as the datasource
echo 4. Try the query: {service=~".+"}

View file

@ -0,0 +1,34 @@
#!/usr/bin/env bun
/**
* Test script to verify the Loki logging integration
*/
import { Logger, LogLevel } from '@stock-bot/utils';
// Create a logger for testing
const logger = new Logger('test-service', LogLevel.DEBUG);
// Log test messages
logger.info('Starting test log messages...');
logger.debug('This is a DEBUG level message');
logger.info('This is an INFO level message');
logger.warn('This is a WARNING level message');
logger.error('This is an ERROR level message');
// Add some structured data
logger.info('Processing trade', { symbol: 'AAPL', price: 190.50, quantity: 100 });
logger.info('Processing trade', { symbol: 'MSFT', price: 410.75, quantity: 50 });
// Simulate an error
try {
throw new Error('This is a simulated error');
} catch (error) {
logger.error('An error occurred', error);
}
logger.info('Test log messages complete. Check Grafana at http://localhost:3000 to view logs.');
// Wait to ensure all logs are sent
setTimeout(() => {
process.exit(0);
}, 1000);