added working config lib
This commit is contained in:
parent
f8576c0d93
commit
def9bce8dc
33 changed files with 2896 additions and 1485 deletions
131
libs/config/USAGE.md
Normal file
131
libs/config/USAGE.md
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
# Stock Bot Configuration Library Usage Guide
|
||||
|
||||
This guide shows how to use the envalid-based configuration system in the Stock Bot platform.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```typescript
|
||||
import { databaseConfig, loggingConfig, riskConfig, dataProvidersConfig } from '@stock-bot/config';
|
||||
|
||||
// Access individual values
|
||||
console.log(`Database: ${databaseConfig.DB_HOST}:${databaseConfig.DB_PORT}`);
|
||||
console.log(`Log level: ${loggingConfig.LOG_LEVEL}`);
|
||||
console.log(`Max position size: ${riskConfig.RISK_MAX_POSITION_SIZE}`);
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
All configuration is driven by environment variables. You can set them in:
|
||||
- `.env` files
|
||||
- System environment variables
|
||||
- Docker environment variables
|
||||
|
||||
### Database Configuration
|
||||
```bash
|
||||
DB_HOST=localhost
|
||||
DB_PORT=5432
|
||||
DB_NAME=stockbot
|
||||
DB_USER=stockbot
|
||||
DB_PASSWORD=your_password
|
||||
DB_SSL=false
|
||||
DB_POOL_MAX=10
|
||||
```
|
||||
|
||||
### Logging Configuration
|
||||
```bash
|
||||
LOG_LEVEL=info
|
||||
LOG_CONSOLE=true
|
||||
LOKI_HOST=localhost
|
||||
LOKI_PORT=3100
|
||||
LOKI_LABELS=service=market-data-gateway,version=1.0.0
|
||||
```
|
||||
|
||||
### Risk Management Configuration
|
||||
```bash
|
||||
RISK_MAX_POSITION_SIZE=0.1
|
||||
RISK_DEFAULT_STOP_LOSS=0.05
|
||||
RISK_DEFAULT_TAKE_PROFIT=0.15
|
||||
RISK_CIRCUIT_BREAKER_ENABLED=true
|
||||
```
|
||||
|
||||
### Data Provider Configuration
|
||||
```bash
|
||||
DEFAULT_DATA_PROVIDER=alpaca
|
||||
ALPACA_API_KEY=your_api_key
|
||||
ALPACA_API_SECRET=your_api_secret
|
||||
ALPACA_ENABLED=true
|
||||
POLYGON_ENABLED=false
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Type Safety
|
||||
All configurations are fully typed:
|
||||
|
||||
```typescript
|
||||
import type { DatabaseConfig, LoggingConfig, RiskConfig } from '@stock-bot/config';
|
||||
|
||||
function setupDatabase(config: DatabaseConfig) {
|
||||
// TypeScript knows all the available properties
|
||||
return {
|
||||
host: config.DB_HOST,
|
||||
port: config.DB_PORT, // number
|
||||
ssl: config.DB_SSL, // boolean
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Environment Detection
|
||||
```typescript
|
||||
import { getEnvironment, Environment } from '@stock-bot/config';
|
||||
|
||||
const env = getEnvironment();
|
||||
if (env === Environment.Production) {
|
||||
// Production-specific logic
|
||||
}
|
||||
```
|
||||
|
||||
### Data Provider Helpers
|
||||
```typescript
|
||||
import { getProviderConfig, getEnabledProviders, getDefaultProvider } from '@stock-bot/config';
|
||||
|
||||
// Get specific provider
|
||||
const alpaca = getProviderConfig('alpaca');
|
||||
|
||||
// Get all enabled providers
|
||||
const providers = getEnabledProviders();
|
||||
|
||||
// Get default provider
|
||||
const defaultProvider = getDefaultProvider();
|
||||
```
|
||||
|
||||
## Configuration Files
|
||||
|
||||
The library consists of these modules:
|
||||
|
||||
- **core.ts** - Core utilities and environment detection
|
||||
- **database.ts** - Database connection settings
|
||||
- **logging.ts** - Logging and Loki configuration
|
||||
- **risk.ts** - Risk management parameters
|
||||
- **data-providers.ts** - Data provider settings
|
||||
|
||||
## Benefits of This Approach
|
||||
|
||||
1. **Zero Configuration Schema** - No complex schema definitions needed
|
||||
2. **Automatic Type Inference** - TypeScript types are generated automatically
|
||||
3. **Environment Variable Validation** - Invalid values are caught at startup
|
||||
4. **Great Developer Experience** - IntelliSense works perfectly
|
||||
5. **Production Ready** - Used by many large-scale applications
|
||||
|
||||
## Migration from Previous System
|
||||
|
||||
If you're migrating from the old Valibot-based system:
|
||||
|
||||
```typescript
|
||||
// Old way
|
||||
const config = createConfigLoader('database', databaseSchema, defaultConfig)();
|
||||
|
||||
// New way
|
||||
import { databaseConfig } from '@stock-bot/config';
|
||||
// That's it! No schema needed, no validation needed, no complex setup.
|
||||
```
|
||||
|
|
@ -12,6 +12,7 @@
|
|||
},
|
||||
"dependencies": {
|
||||
"dotenv": "^16.3.1",
|
||||
"envalid": "^8.0.0",
|
||||
"zod": "^3.22.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
|
|
|||
119
libs/config/src/admin-interfaces.ts
Normal file
119
libs/config/src/admin-interfaces.ts
Normal file
|
|
@ -0,0 +1,119 @@
|
|||
/**
|
||||
* Admin interfaces configuration using envalid
|
||||
* PgAdmin, Mongo Express, Redis Insight for database management
|
||||
*/
|
||||
import { cleanEnv, str, port, bool } from 'envalid';
|
||||
|
||||
/**
|
||||
* PgAdmin configuration with validation and defaults
|
||||
*/
|
||||
export const pgAdminConfig = cleanEnv(process.env, {
|
||||
// PgAdmin Server
|
||||
PGADMIN_HOST: str({ default: 'localhost', desc: 'PgAdmin host' }),
|
||||
PGADMIN_PORT: port({ default: 8080, desc: 'PgAdmin port' }),
|
||||
|
||||
// Authentication
|
||||
PGADMIN_DEFAULT_EMAIL: str({ default: 'admin@tradingbot.local', desc: 'PgAdmin default admin email' }),
|
||||
PGADMIN_DEFAULT_PASSWORD: str({ default: 'admin123', desc: 'PgAdmin default admin password' }),
|
||||
|
||||
// Configuration
|
||||
PGADMIN_SERVER_MODE: bool({ default: false, desc: 'Enable server mode (multi-user)' }),
|
||||
PGADMIN_DISABLE_POSTFIX: bool({ default: true, desc: 'Disable postfix for email' }),
|
||||
PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION: bool({ default: true, desc: 'Enhanced cookie protection' }),
|
||||
|
||||
// Security
|
||||
PGADMIN_MASTER_PASSWORD_REQUIRED: bool({ default: false, desc: 'Require master password' }),
|
||||
PGADMIN_SESSION_TIMEOUT: str({ default: '60', desc: 'Session timeout in minutes' }),
|
||||
});
|
||||
|
||||
/**
|
||||
* Mongo Express configuration with validation and defaults
|
||||
*/
|
||||
export const mongoExpressConfig = cleanEnv(process.env, {
|
||||
// Mongo Express Server
|
||||
MONGO_EXPRESS_HOST: str({ default: 'localhost', desc: 'Mongo Express host' }),
|
||||
MONGO_EXPRESS_PORT: port({ default: 8081, desc: 'Mongo Express port' }),
|
||||
|
||||
// MongoDB Connection
|
||||
MONGO_EXPRESS_MONGODB_SERVER: str({ default: 'mongodb', desc: 'MongoDB server name/host' }),
|
||||
MONGO_EXPRESS_MONGODB_PORT: port({ default: 27017, desc: 'MongoDB port' }),
|
||||
MONGO_EXPRESS_MONGODB_ADMINUSERNAME: str({ default: 'trading_admin', desc: 'MongoDB admin username' }),
|
||||
MONGO_EXPRESS_MONGODB_ADMINPASSWORD: str({ default: '', desc: 'MongoDB admin password' }),
|
||||
|
||||
// Basic Authentication for Mongo Express
|
||||
MONGO_EXPRESS_BASICAUTH_USERNAME: str({ default: 'admin', desc: 'Basic auth username for Mongo Express' }),
|
||||
MONGO_EXPRESS_BASICAUTH_PASSWORD: str({ default: 'admin123', desc: 'Basic auth password for Mongo Express' }),
|
||||
|
||||
// Configuration
|
||||
MONGO_EXPRESS_ENABLE_ADMIN: bool({ default: true, desc: 'Enable admin features' }),
|
||||
MONGO_EXPRESS_OPTIONS_EDITOR_THEME: str({
|
||||
default: 'rubyblue',
|
||||
desc: 'Editor theme (rubyblue, 3024-night, etc.)'
|
||||
}),
|
||||
MONGO_EXPRESS_REQUEST_SIZE: str({ default: '100kb', desc: 'Maximum request size' }),
|
||||
});
|
||||
|
||||
/**
|
||||
* Redis Insight configuration with validation and defaults
|
||||
*/
|
||||
export const redisInsightConfig = cleanEnv(process.env, {
|
||||
// Redis Insight Server
|
||||
REDIS_INSIGHT_HOST: str({ default: 'localhost', desc: 'Redis Insight host' }),
|
||||
REDIS_INSIGHT_PORT: port({ default: 8001, desc: 'Redis Insight port' }),
|
||||
|
||||
// Redis Connection Settings
|
||||
REDIS_INSIGHT_REDIS_HOSTS: str({
|
||||
default: 'local:dragonfly:6379',
|
||||
desc: 'Redis hosts in format name:host:port,name:host:port'
|
||||
}),
|
||||
|
||||
// Configuration
|
||||
REDIS_INSIGHT_LOG_LEVEL: str({
|
||||
default: 'info',
|
||||
choices: ['error', 'warn', 'info', 'verbose', 'debug'],
|
||||
desc: 'Redis Insight log level'
|
||||
}),
|
||||
REDIS_INSIGHT_DISABLE_ANALYTICS: bool({ default: true, desc: 'Disable analytics collection' }),
|
||||
REDIS_INSIGHT_BUILD_TYPE: str({ default: 'DOCKER', desc: 'Build type identifier' }),
|
||||
});
|
||||
|
||||
// Export typed configuration objects
|
||||
export type PgAdminConfig = typeof pgAdminConfig;
|
||||
export type MongoExpressConfig = typeof mongoExpressConfig;
|
||||
export type RedisInsightConfig = typeof redisInsightConfig;
|
||||
|
||||
// Export individual config values for convenience
|
||||
export const {
|
||||
PGADMIN_HOST,
|
||||
PGADMIN_PORT,
|
||||
PGADMIN_DEFAULT_EMAIL,
|
||||
PGADMIN_DEFAULT_PASSWORD,
|
||||
PGADMIN_SERVER_MODE,
|
||||
PGADMIN_DISABLE_POSTFIX,
|
||||
PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION,
|
||||
PGADMIN_MASTER_PASSWORD_REQUIRED,
|
||||
PGADMIN_SESSION_TIMEOUT,
|
||||
} = pgAdminConfig;
|
||||
|
||||
export const {
|
||||
MONGO_EXPRESS_HOST,
|
||||
MONGO_EXPRESS_PORT,
|
||||
MONGO_EXPRESS_MONGODB_SERVER,
|
||||
MONGO_EXPRESS_MONGODB_PORT,
|
||||
MONGO_EXPRESS_MONGODB_ADMINUSERNAME,
|
||||
MONGO_EXPRESS_MONGODB_ADMINPASSWORD,
|
||||
MONGO_EXPRESS_BASICAUTH_USERNAME,
|
||||
MONGO_EXPRESS_BASICAUTH_PASSWORD,
|
||||
MONGO_EXPRESS_ENABLE_ADMIN,
|
||||
MONGO_EXPRESS_OPTIONS_EDITOR_THEME,
|
||||
MONGO_EXPRESS_REQUEST_SIZE,
|
||||
} = mongoExpressConfig;
|
||||
|
||||
export const {
|
||||
REDIS_INSIGHT_HOST,
|
||||
REDIS_INSIGHT_PORT,
|
||||
REDIS_INSIGHT_REDIS_HOSTS,
|
||||
REDIS_INSIGHT_LOG_LEVEL,
|
||||
REDIS_INSIGHT_DISABLE_ANALYTICS,
|
||||
REDIS_INSIGHT_BUILD_TYPE,
|
||||
} = redisInsightConfig;
|
||||
|
|
@ -1,136 +0,0 @@
|
|||
/**
|
||||
* Tests for the configuration library
|
||||
*/
|
||||
import { describe, expect, test, beforeAll, afterAll } from 'bun:test';
|
||||
import {
|
||||
getEnvironment,
|
||||
validateConfig,
|
||||
ConfigurationError,
|
||||
loadEnvVariables,
|
||||
getEnvVar,
|
||||
getNumericEnvVar,
|
||||
getBooleanEnvVar
|
||||
} from './core';
|
||||
|
||||
import { Environment, databaseConfigSchema } from './types';
|
||||
|
||||
describe('Core configuration', () => {
|
||||
// Save original environment variables
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
// Setup test environment variables
|
||||
beforeAll(() => {
|
||||
process.env.NODE_ENV = 'testing';
|
||||
process.env.TEST_STRING = 'test-value';
|
||||
process.env.TEST_NUMBER = '42';
|
||||
process.env.TEST_BOOL_TRUE = 'true';
|
||||
process.env.TEST_BOOL_FALSE = 'false';
|
||||
});
|
||||
|
||||
// Restore original environment variables
|
||||
afterAll(() => {
|
||||
process.env = { ...originalEnv };
|
||||
});
|
||||
|
||||
test('getEnvironment returns correct environment', () => {
|
||||
expect(getEnvironment()).toBe(Environment.Testing);
|
||||
|
||||
// Test different environments
|
||||
process.env.NODE_ENV = 'development';
|
||||
expect(getEnvironment()).toBe(Environment.Development);
|
||||
|
||||
process.env.NODE_ENV = 'production';
|
||||
expect(getEnvironment()).toBe(Environment.Production);
|
||||
|
||||
process.env.NODE_ENV = 'staging';
|
||||
expect(getEnvironment()).toBe(Environment.Staging);
|
||||
|
||||
// Test default environment
|
||||
process.env.NODE_ENV = 'unknown';
|
||||
expect(getEnvironment()).toBe(Environment.Development);
|
||||
});
|
||||
|
||||
test('getEnvVar retrieves environment variables', () => {
|
||||
expect(getEnvVar('TEST_STRING')).toBe('test-value');
|
||||
expect(getEnvVar('NON_EXISTENT')).toBeUndefined();
|
||||
expect(getEnvVar('NON_EXISTENT', false)).toBeUndefined();
|
||||
|
||||
// Test required variables
|
||||
expect(() => getEnvVar('NON_EXISTENT', true)).toThrow(ConfigurationError);
|
||||
});
|
||||
|
||||
test('getNumericEnvVar converts to number', () => {
|
||||
expect(getNumericEnvVar('TEST_NUMBER')).toBe(42);
|
||||
expect(getNumericEnvVar('NON_EXISTENT', 100)).toBe(100);
|
||||
|
||||
// Test invalid number
|
||||
process.env.INVALID_NUMBER = 'not-a-number';
|
||||
expect(() => getNumericEnvVar('INVALID_NUMBER')).toThrow(ConfigurationError);
|
||||
});
|
||||
|
||||
test('getBooleanEnvVar converts to boolean', () => {
|
||||
expect(getBooleanEnvVar('TEST_BOOL_TRUE')).toBe(true);
|
||||
expect(getBooleanEnvVar('TEST_BOOL_FALSE')).toBe(false);
|
||||
expect(getBooleanEnvVar('NON_EXISTENT', true)).toBe(true);
|
||||
});
|
||||
test('validateConfig validates against schema', () => {
|
||||
// Valid config
|
||||
const validConfig = {
|
||||
dragonfly: {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
maxRetriesPerRequest: 3
|
||||
},
|
||||
questDB: {
|
||||
host: 'localhost',
|
||||
port: 8812,
|
||||
database: 'stockbot',
|
||||
user: 'admin',
|
||||
httpPort: 9000
|
||||
},
|
||||
mongodb: {
|
||||
uri: 'mongodb://localhost:27017',
|
||||
database: 'stockbot'
|
||||
},
|
||||
postgres: {
|
||||
host: 'localhost',
|
||||
port: 5432,
|
||||
database: 'stockbot',
|
||||
user: 'postgres',
|
||||
poolSize: 10,
|
||||
ssl: false
|
||||
}
|
||||
};
|
||||
|
||||
expect(() => validateConfig(validConfig, databaseConfigSchema)).not.toThrow();
|
||||
// Invalid config (missing required field)
|
||||
const invalidConfig = {
|
||||
dragonfly: {
|
||||
host: 'localhost',
|
||||
// missing port
|
||||
maxRetriesPerRequest: 3
|
||||
},
|
||||
questDB: {
|
||||
host: 'localhost',
|
||||
port: 8812,
|
||||
database: 'stockbot',
|
||||
user: 'admin',
|
||||
httpPort: 9000
|
||||
},
|
||||
mongodb: {
|
||||
uri: 'mongodb://localhost:27017',
|
||||
database: 'stockbot'
|
||||
},
|
||||
postgres: {
|
||||
host: 'localhost',
|
||||
port: 5432,
|
||||
database: 'stockbot',
|
||||
user: 'postgres',
|
||||
poolSize: 10,
|
||||
ssl: false
|
||||
}
|
||||
};
|
||||
|
||||
expect(() => validateConfig(invalidConfig, databaseConfigSchema)).toThrow(ConfigurationError);
|
||||
});
|
||||
});
|
||||
|
|
@ -1,10 +1,8 @@
|
|||
/**
|
||||
* Core configuration module for the Stock Bot platform
|
||||
* Core configuration module for the Stock Bot platform using envalid
|
||||
*/
|
||||
import { config as dotenvConfig } from 'dotenv';
|
||||
import path from 'node:path';
|
||||
import { z } from 'zod';
|
||||
import { Environment } from './types';
|
||||
|
||||
/**
|
||||
* Represents an error related to configuration validation
|
||||
|
|
@ -16,6 +14,16 @@ export class ConfigurationError extends Error {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Environment types
|
||||
*/
|
||||
export enum Environment {
|
||||
Development = 'development',
|
||||
Testing = 'testing',
|
||||
Staging = 'staging',
|
||||
Production = 'production'
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads environment variables from .env files based on the current environment
|
||||
*/
|
||||
|
|
@ -57,106 +65,3 @@ export function getEnvironment(): Environment {
|
|||
return Environment.Development;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates configuration using Zod schema
|
||||
*/
|
||||
export function validateConfig<T>(config: unknown, schema: z.ZodSchema<T>): T {
|
||||
try {
|
||||
return schema.parse(config);
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
const issues = error.issues.map(issue =>
|
||||
`${issue.path.join('.')}: ${issue.message}`
|
||||
).join('\n');
|
||||
|
||||
throw new ConfigurationError(`Configuration validation failed:\n${issues}`);
|
||||
}
|
||||
throw new ConfigurationError('Invalid configuration');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves an environment variable with validation
|
||||
*/
|
||||
export function getEnvVar(key: string, required: boolean = false): string | undefined {
|
||||
const value = process.env[key];
|
||||
|
||||
if (required && (value === undefined || value === '')) {
|
||||
throw new ConfigurationError(`Required environment variable ${key} is missing`);
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a numeric environment variable with validation
|
||||
*/
|
||||
export function getNumericEnvVar(key: string, defaultValue?: number): number {
|
||||
const value = process.env[key];
|
||||
if (value === undefined || value === '') {
|
||||
if (defaultValue !== undefined) {
|
||||
return defaultValue;
|
||||
}
|
||||
throw new ConfigurationError(`Required numeric environment variable ${key} is missing`);
|
||||
}
|
||||
|
||||
const numValue = Number(value);
|
||||
|
||||
if (isNaN(numValue)) {
|
||||
throw new ConfigurationError(`Environment variable ${key} is not a valid number`);
|
||||
}
|
||||
|
||||
return numValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a boolean environment variable with validation
|
||||
*/
|
||||
export function getBooleanEnvVar(key: string, defaultValue?: boolean): boolean {
|
||||
const value = process.env[key];
|
||||
if (value === undefined || value === '') {
|
||||
if (defaultValue !== undefined) {
|
||||
return defaultValue;
|
||||
}
|
||||
throw new ConfigurationError(`Required boolean environment variable ${key} is missing`);
|
||||
}
|
||||
|
||||
return value.toLowerCase() === 'true' || value === '1';
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a typed dynamic configuration loader for a specific service
|
||||
*/
|
||||
export function createConfigLoader<T>(
|
||||
serviceName: string,
|
||||
schema: z.ZodSchema<T>,
|
||||
defaultConfig: Partial<T> = {}
|
||||
): () => T {
|
||||
return (): T => {
|
||||
try {
|
||||
loadEnvVariables();
|
||||
const configEnvVar = `${serviceName.toUpperCase()}_CONFIG`;
|
||||
let config = { ...defaultConfig } as unknown as T;
|
||||
|
||||
// Try to load JSON from environment variable if available
|
||||
const configJson = process.env[configEnvVar];
|
||||
if (configJson) {
|
||||
try {
|
||||
const parsedConfig = JSON.parse(configJson);
|
||||
config = { ...config, ...parsedConfig };
|
||||
} catch (error) {
|
||||
throw new ConfigurationError(`Invalid JSON in ${configEnvVar} environment variable`);
|
||||
}
|
||||
}
|
||||
|
||||
// Validate and return the config
|
||||
return validateConfig(config, schema);
|
||||
} catch (error) {
|
||||
if (error instanceof ConfigurationError) {
|
||||
throw error;
|
||||
}
|
||||
throw new ConfigurationError(`Failed to load configuration for service ${serviceName}: ${error}`);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,85 +1,157 @@
|
|||
/**
|
||||
* Data provider configurations for market data
|
||||
* Data provider configurations using envalid
|
||||
*/
|
||||
import { getEnvVar, validateConfig, createConfigLoader } from './core';
|
||||
import { dataProvidersConfigSchema, DataProvidersConfig, DataProviderConfig } from './types';
|
||||
import { cleanEnv, str, num, bool } from 'envalid';
|
||||
|
||||
/**
|
||||
* Default data provider configurations
|
||||
* Data providers configuration with validation and defaults
|
||||
*/
|
||||
const defaultDataProviders: DataProviderConfig[] = [
|
||||
{
|
||||
name: 'alpaca',
|
||||
type: 'rest',
|
||||
baseUrl: 'https://data.alpaca.markets/v1beta1',
|
||||
apiKey: '',
|
||||
apiSecret: '',
|
||||
rateLimits: {
|
||||
maxRequestsPerMinute: 200
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'polygon',
|
||||
type: 'rest',
|
||||
baseUrl: 'https://api.polygon.io/v2',
|
||||
apiKey: '',
|
||||
rateLimits: {
|
||||
maxRequestsPerMinute: 5
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'alpaca-websocket',
|
||||
type: 'websocket',
|
||||
wsUrl: 'wss://stream.data.alpaca.markets/v2/iex',
|
||||
apiKey: '',
|
||||
apiSecret: ''
|
||||
export const dataProvidersConfig = cleanEnv(process.env, {
|
||||
// Default Provider
|
||||
DEFAULT_DATA_PROVIDER: str({
|
||||
choices: ['alpaca', 'polygon', 'yahoo', 'iex'],
|
||||
default: 'alpaca',
|
||||
desc: 'Default data provider'
|
||||
}),
|
||||
|
||||
// Alpaca Configuration
|
||||
ALPACA_API_KEY: str({ default: '', desc: 'Alpaca API key' }),
|
||||
ALPACA_API_SECRET: str({ default: '', desc: 'Alpaca API secret' }),
|
||||
ALPACA_BASE_URL: str({ default: 'https://data.alpaca.markets/v1beta1', desc: 'Alpaca base URL' }),
|
||||
ALPACA_RATE_LIMIT: num({ default: 200, desc: 'Alpaca rate limit per minute' }),
|
||||
ALPACA_ENABLED: bool({ default: true, desc: 'Enable Alpaca provider' }),
|
||||
|
||||
// Polygon Configuration
|
||||
POLYGON_API_KEY: str({ default: '', desc: 'Polygon API key' }),
|
||||
POLYGON_BASE_URL: str({ default: 'https://api.polygon.io', desc: 'Polygon base URL' }),
|
||||
POLYGON_RATE_LIMIT: num({ default: 5, desc: 'Polygon rate limit per minute' }),
|
||||
POLYGON_ENABLED: bool({ default: false, desc: 'Enable Polygon provider' }),
|
||||
|
||||
// Yahoo Finance Configuration
|
||||
YAHOO_BASE_URL: str({ default: 'https://query1.finance.yahoo.com', desc: 'Yahoo Finance base URL' }),
|
||||
YAHOO_RATE_LIMIT: num({ default: 2000, desc: 'Yahoo Finance rate limit per hour' }),
|
||||
YAHOO_ENABLED: bool({ default: true, desc: 'Enable Yahoo Finance provider' }),
|
||||
|
||||
// IEX Cloud Configuration
|
||||
IEX_API_KEY: str({ default: '', desc: 'IEX Cloud API key' }),
|
||||
IEX_BASE_URL: str({ default: 'https://cloud.iexapis.com/stable', desc: 'IEX Cloud base URL' }),
|
||||
IEX_RATE_LIMIT: num({ default: 100, desc: 'IEX Cloud rate limit per second' }),
|
||||
IEX_ENABLED: bool({ default: false, desc: 'Enable IEX Cloud provider' }),
|
||||
|
||||
// Connection Settings
|
||||
DATA_PROVIDER_TIMEOUT: num({ default: 30000, desc: 'Request timeout in milliseconds' }),
|
||||
DATA_PROVIDER_RETRIES: num({ default: 3, desc: 'Number of retry attempts' }),
|
||||
DATA_PROVIDER_RETRY_DELAY: num({ default: 1000, desc: 'Retry delay in milliseconds' }),
|
||||
|
||||
// Cache Settings
|
||||
DATA_CACHE_ENABLED: bool({ default: true, desc: 'Enable data caching' }),
|
||||
DATA_CACHE_TTL: num({ default: 300000, desc: 'Cache TTL in milliseconds' }),
|
||||
DATA_CACHE_MAX_SIZE: num({ default: 1000, desc: 'Maximum cache entries' }),
|
||||
});
|
||||
|
||||
/**
|
||||
* Helper function to get provider-specific configuration
|
||||
*/
|
||||
export function getProviderConfig(providerName: string) {
|
||||
const name = providerName.toUpperCase();
|
||||
|
||||
switch (name) {
|
||||
case 'ALPACA':
|
||||
return {
|
||||
name: 'alpaca',
|
||||
type: 'rest' as const,
|
||||
enabled: dataProvidersConfig.ALPACA_ENABLED,
|
||||
baseUrl: dataProvidersConfig.ALPACA_BASE_URL,
|
||||
apiKey: dataProvidersConfig.ALPACA_API_KEY,
|
||||
apiSecret: dataProvidersConfig.ALPACA_API_SECRET,
|
||||
rateLimits: {
|
||||
maxRequestsPerMinute: dataProvidersConfig.ALPACA_RATE_LIMIT
|
||||
}
|
||||
};
|
||||
|
||||
case 'POLYGON':
|
||||
return {
|
||||
name: 'polygon',
|
||||
type: 'rest' as const,
|
||||
enabled: dataProvidersConfig.POLYGON_ENABLED,
|
||||
baseUrl: dataProvidersConfig.POLYGON_BASE_URL,
|
||||
apiKey: dataProvidersConfig.POLYGON_API_KEY,
|
||||
rateLimits: {
|
||||
maxRequestsPerMinute: dataProvidersConfig.POLYGON_RATE_LIMIT
|
||||
}
|
||||
};
|
||||
|
||||
case 'YAHOO':
|
||||
return {
|
||||
name: 'yahoo',
|
||||
type: 'rest' as const,
|
||||
enabled: dataProvidersConfig.YAHOO_ENABLED,
|
||||
baseUrl: dataProvidersConfig.YAHOO_BASE_URL,
|
||||
rateLimits: {
|
||||
maxRequestsPerHour: dataProvidersConfig.YAHOO_RATE_LIMIT
|
||||
}
|
||||
};
|
||||
|
||||
case 'IEX':
|
||||
return {
|
||||
name: 'iex',
|
||||
type: 'rest' as const,
|
||||
enabled: dataProvidersConfig.IEX_ENABLED,
|
||||
baseUrl: dataProvidersConfig.IEX_BASE_URL,
|
||||
apiKey: dataProvidersConfig.IEX_API_KEY,
|
||||
rateLimits: {
|
||||
maxRequestsPerSecond: dataProvidersConfig.IEX_RATE_LIMIT
|
||||
}
|
||||
};
|
||||
|
||||
default:
|
||||
throw new Error(`Unknown provider: ${providerName}`);
|
||||
}
|
||||
];
|
||||
|
||||
/**
|
||||
* Load data provider configurations from environment variables
|
||||
*/
|
||||
export function loadDataProviderConfigs(): DataProvidersConfig {
|
||||
// Get provider specific environment variables
|
||||
const providers = defaultDataProviders.map(provider => {
|
||||
const nameUpper = provider.name.toUpperCase().replace('-', '_');
|
||||
|
||||
const updatedProvider: DataProviderConfig = {
|
||||
...provider,
|
||||
apiKey: getEnvVar(`${nameUpper}_API_KEY`) || provider.apiKey || '',
|
||||
};
|
||||
|
||||
if (provider.apiSecret !== undefined) {
|
||||
updatedProvider.apiSecret = getEnvVar(`${nameUpper}_API_SECRET`) || provider.apiSecret || '';
|
||||
}
|
||||
|
||||
return updatedProvider;
|
||||
});
|
||||
|
||||
// Load default provider from environment
|
||||
const defaultProvider = getEnvVar('DEFAULT_DATA_PROVIDER') || 'alpaca';
|
||||
|
||||
const config: DataProvidersConfig = {
|
||||
providers,
|
||||
defaultProvider
|
||||
};
|
||||
|
||||
return validateConfig(config, dataProvidersConfigSchema);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a dynamic configuration loader for data providers
|
||||
* Get all enabled providers
|
||||
*/
|
||||
export const createDataProvidersConfig = createConfigLoader<DataProvidersConfig>(
|
||||
'data-providers',
|
||||
dataProvidersConfigSchema,
|
||||
{
|
||||
providers: defaultDataProviders,
|
||||
defaultProvider: 'alpaca'
|
||||
}
|
||||
);
|
||||
export function getEnabledProviders() {
|
||||
const providers = ['alpaca', 'polygon', 'yahoo', 'iex'];
|
||||
return providers
|
||||
.map(provider => getProviderConfig(provider))
|
||||
.filter(config => config.enabled);
|
||||
}
|
||||
|
||||
/**
|
||||
* Singleton data provider configurations
|
||||
* Get the default provider configuration
|
||||
*/
|
||||
export const dataProviderConfigs = loadDataProviderConfigs();
|
||||
export function getDefaultProvider() {
|
||||
return getProviderConfig(dataProvidersConfig.DEFAULT_DATA_PROVIDER);
|
||||
}
|
||||
|
||||
// Export typed configuration object
|
||||
export type DataProvidersConfig = typeof dataProvidersConfig;
|
||||
|
||||
// Export individual config values for convenience
|
||||
export const {
|
||||
DEFAULT_DATA_PROVIDER,
|
||||
ALPACA_API_KEY,
|
||||
ALPACA_API_SECRET,
|
||||
ALPACA_BASE_URL,
|
||||
ALPACA_RATE_LIMIT,
|
||||
ALPACA_ENABLED,
|
||||
POLYGON_API_KEY,
|
||||
POLYGON_BASE_URL,
|
||||
POLYGON_RATE_LIMIT,
|
||||
POLYGON_ENABLED,
|
||||
YAHOO_BASE_URL,
|
||||
YAHOO_RATE_LIMIT,
|
||||
YAHOO_ENABLED,
|
||||
IEX_API_KEY,
|
||||
IEX_BASE_URL,
|
||||
IEX_RATE_LIMIT,
|
||||
IEX_ENABLED,
|
||||
DATA_PROVIDER_TIMEOUT,
|
||||
DATA_PROVIDER_RETRIES,
|
||||
DATA_PROVIDER_RETRY_DELAY,
|
||||
DATA_CACHE_ENABLED,
|
||||
DATA_CACHE_TTL,
|
||||
DATA_CACHE_MAX_SIZE,
|
||||
} = dataProvidersConfig;
|
||||
|
|
|
|||
|
|
@ -1,91 +1,54 @@
|
|||
/**
|
||||
* Database configuration for Stock Bot services
|
||||
* Database configuration using envalid
|
||||
*/
|
||||
import { z } from 'zod';
|
||||
import { getEnvVar, getNumericEnvVar, validateConfig, createConfigLoader } from './core';
|
||||
import { databaseConfigSchema, DatabaseConfig } from './types';
|
||||
import { cleanEnv, str, port, bool, num } from 'envalid';
|
||||
|
||||
/**
|
||||
* Default database configuration
|
||||
* Database configuration with validation and defaults
|
||||
*/
|
||||
const defaultDatabaseConfig: DatabaseConfig = {
|
||||
dragonfly: {
|
||||
host: 'localhost',
|
||||
port: 6379,
|
||||
maxRetriesPerRequest: 3
|
||||
},
|
||||
questDB: {
|
||||
host: 'localhost',
|
||||
port: 8812,
|
||||
database: 'stockbot',
|
||||
user: 'admin',
|
||||
httpPort: 9000
|
||||
},
|
||||
mongodb: {
|
||||
uri: 'mongodb://localhost:27017',
|
||||
database: 'stockbot'
|
||||
},
|
||||
postgres: {
|
||||
host: 'localhost',
|
||||
port: 5432,
|
||||
database: 'stockbot',
|
||||
user: 'postgres',
|
||||
poolSize: 10,
|
||||
ssl: false
|
||||
}
|
||||
};
|
||||
export const databaseConfig = cleanEnv(process.env, {
|
||||
// PostgreSQL Configuration
|
||||
DB_HOST: str({ default: 'localhost', desc: 'Database host' }),
|
||||
DB_PORT: port({ default: 5432, desc: 'Database port' }),
|
||||
DB_NAME: str({ default: 'stockbot', desc: 'Database name' }),
|
||||
DB_USER: str({ default: 'stockbot', desc: 'Database user' }),
|
||||
DB_PASSWORD: str({ default: '', desc: 'Database password' }),
|
||||
|
||||
// Connection Pool Settings
|
||||
DB_POOL_MIN: num({ default: 2, desc: 'Minimum pool connections' }),
|
||||
DB_POOL_MAX: num({ default: 10, desc: 'Maximum pool connections' }),
|
||||
DB_POOL_IDLE_TIMEOUT: num({ default: 30000, desc: 'Pool idle timeout in ms' }),
|
||||
|
||||
// SSL Configuration
|
||||
DB_SSL: bool({ default: false, desc: 'Enable SSL for database connection' }),
|
||||
DB_SSL_REJECT_UNAUTHORIZED: bool({ default: true, desc: 'Reject unauthorized SSL certificates' }),
|
||||
|
||||
// Additional Settings
|
||||
DB_QUERY_TIMEOUT: num({ default: 30000, desc: 'Query timeout in ms' }),
|
||||
DB_CONNECTION_TIMEOUT: num({ default: 5000, desc: 'Connection timeout in ms' }),
|
||||
DB_STATEMENT_TIMEOUT: num({ default: 30000, desc: 'Statement timeout in ms' }),
|
||||
DB_LOCK_TIMEOUT: num({ default: 10000, desc: 'Lock timeout in ms' }),
|
||||
DB_IDLE_IN_TRANSACTION_SESSION_TIMEOUT: num({ default: 60000, desc: 'Idle in transaction timeout in ms' }),
|
||||
});
|
||||
|
||||
/**
|
||||
* Load database configuration from environment variables
|
||||
*/
|
||||
export function loadDatabaseConfig(): DatabaseConfig {
|
||||
const config = {
|
||||
dragonfly: {
|
||||
host: getEnvVar('DRAGONFLY_HOST') || defaultDatabaseConfig.dragonfly.host,
|
||||
port: getNumericEnvVar('DRAGONFLY_PORT', defaultDatabaseConfig.dragonfly.port),
|
||||
password: getEnvVar('DRAGONFLY_PASSWORD'),
|
||||
maxRetriesPerRequest: getNumericEnvVar('DRAGONFLY_MAX_RETRIES_PER_REQUEST',
|
||||
defaultDatabaseConfig.dragonfly.maxRetriesPerRequest)
|
||||
},
|
||||
questDB: {
|
||||
host: getEnvVar('QUESTDB_HOST') || defaultDatabaseConfig.questDB.host,
|
||||
port: getNumericEnvVar('QUESTDB_PORT', defaultDatabaseConfig.questDB.port),
|
||||
database: getEnvVar('QUESTDB_DB') || defaultDatabaseConfig.questDB.database,
|
||||
user: getEnvVar('QUESTDB_USER') || defaultDatabaseConfig.questDB.user,
|
||||
password: getEnvVar('QUESTDB_PASSWORD'),
|
||||
httpPort: getNumericEnvVar('QUESTDB_HTTP_PORT', defaultDatabaseConfig.questDB.httpPort)
|
||||
},
|
||||
mongodb: {
|
||||
uri: getEnvVar('MONGODB_URI') || defaultDatabaseConfig.mongodb.uri,
|
||||
database: getEnvVar('MONGODB_DATABASE') || defaultDatabaseConfig.mongodb.database,
|
||||
username: getEnvVar('MONGODB_USERNAME'),
|
||||
password: getEnvVar('MONGODB_PASSWORD'),
|
||||
options: process.env.MONGODB_OPTIONS ? JSON.parse(process.env.MONGODB_OPTIONS) : undefined
|
||||
},
|
||||
postgres: {
|
||||
host: getEnvVar('POSTGRES_HOST') || defaultDatabaseConfig.postgres.host,
|
||||
port: getNumericEnvVar('POSTGRES_PORT', defaultDatabaseConfig.postgres.port),
|
||||
database: getEnvVar('POSTGRES_DB') || defaultDatabaseConfig.postgres.database,
|
||||
user: getEnvVar('POSTGRES_USER') || defaultDatabaseConfig.postgres.user,
|
||||
password: getEnvVar('POSTGRES_PASSWORD'),
|
||||
ssl: process.env.POSTGRES_SSL === 'true',
|
||||
poolSize: getNumericEnvVar('POSTGRES_POOL_SIZE', defaultDatabaseConfig.postgres.poolSize)
|
||||
}
|
||||
};
|
||||
// Export typed configuration object
|
||||
export type DatabaseConfig = typeof databaseConfig;
|
||||
|
||||
return validateConfig(config, databaseConfigSchema);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a dynamic configuration loader for database config
|
||||
*/
|
||||
export const createDatabaseConfig = createConfigLoader<typeof defaultDatabaseConfig>(
|
||||
'database',
|
||||
databaseConfigSchema,
|
||||
defaultDatabaseConfig
|
||||
);
|
||||
|
||||
/**
|
||||
* Singleton database configuration
|
||||
*/
|
||||
export const databaseConfig = loadDatabaseConfig();
|
||||
// Export individual config values for convenience
|
||||
export const {
|
||||
DB_HOST,
|
||||
DB_PORT,
|
||||
DB_NAME,
|
||||
DB_USER,
|
||||
DB_PASSWORD,
|
||||
DB_POOL_MIN,
|
||||
DB_POOL_MAX,
|
||||
DB_POOL_IDLE_TIMEOUT,
|
||||
DB_SSL,
|
||||
DB_SSL_REJECT_UNAUTHORIZED,
|
||||
DB_QUERY_TIMEOUT,
|
||||
DB_CONNECTION_TIMEOUT,
|
||||
DB_STATEMENT_TIMEOUT,
|
||||
DB_LOCK_TIMEOUT,
|
||||
DB_IDLE_IN_TRANSACTION_SESSION_TIMEOUT,
|
||||
} = databaseConfig;
|
||||
|
|
|
|||
79
libs/config/src/dragonfly.ts
Normal file
79
libs/config/src/dragonfly.ts
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
/**
|
||||
* Dragonfly (Redis replacement) configuration using envalid
|
||||
* High-performance caching and event streaming
|
||||
*/
|
||||
import { cleanEnv, str, port, bool, num } from 'envalid';
|
||||
|
||||
/**
|
||||
* Dragonfly configuration with validation and defaults
|
||||
*/
|
||||
export const dragonflyConfig = cleanEnv(process.env, {
|
||||
// Dragonfly Connection
|
||||
DRAGONFLY_HOST: str({ default: 'localhost', desc: 'Dragonfly host' }),
|
||||
DRAGONFLY_PORT: port({ default: 6379, desc: 'Dragonfly port' }),
|
||||
DRAGONFLY_PASSWORD: str({ default: '', desc: 'Dragonfly password (if auth enabled)' }),
|
||||
DRAGONFLY_USERNAME: str({ default: '', desc: 'Dragonfly username (if ACL enabled)' }),
|
||||
|
||||
// Database Selection
|
||||
DRAGONFLY_DATABASE: num({ default: 0, desc: 'Dragonfly database number (0-15)' }),
|
||||
|
||||
// Connection Pool Settings
|
||||
DRAGONFLY_MAX_RETRIES: num({ default: 3, desc: 'Maximum retry attempts' }),
|
||||
DRAGONFLY_RETRY_DELAY: num({ default: 50, desc: 'Retry delay in ms' }),
|
||||
DRAGONFLY_CONNECT_TIMEOUT: num({ default: 10000, desc: 'Connection timeout in ms' }),
|
||||
DRAGONFLY_COMMAND_TIMEOUT: num({ default: 5000, desc: 'Command timeout in ms' }),
|
||||
|
||||
// Pool Configuration
|
||||
DRAGONFLY_POOL_SIZE: num({ default: 10, desc: 'Connection pool size' }),
|
||||
DRAGONFLY_POOL_MIN: num({ default: 1, desc: 'Minimum pool connections' }),
|
||||
DRAGONFLY_POOL_MAX: num({ default: 20, desc: 'Maximum pool connections' }),
|
||||
|
||||
// TLS Settings
|
||||
DRAGONFLY_TLS: bool({ default: false, desc: 'Enable TLS for Dragonfly connection' }),
|
||||
DRAGONFLY_TLS_CERT_FILE: str({ default: '', desc: 'Path to TLS certificate file' }),
|
||||
DRAGONFLY_TLS_KEY_FILE: str({ default: '', desc: 'Path to TLS key file' }),
|
||||
DRAGONFLY_TLS_CA_FILE: str({ default: '', desc: 'Path to TLS CA certificate file' }),
|
||||
DRAGONFLY_TLS_SKIP_VERIFY: bool({ default: false, desc: 'Skip TLS certificate verification' }),
|
||||
|
||||
// Performance Settings
|
||||
DRAGONFLY_ENABLE_KEEPALIVE: bool({ default: true, desc: 'Enable TCP keepalive' }),
|
||||
DRAGONFLY_KEEPALIVE_INTERVAL: num({ default: 60, desc: 'Keepalive interval in seconds' }),
|
||||
|
||||
// Clustering (if using cluster mode)
|
||||
DRAGONFLY_CLUSTER_MODE: bool({ default: false, desc: 'Enable cluster mode' }),
|
||||
DRAGONFLY_CLUSTER_NODES: str({ default: '', desc: 'Comma-separated list of cluster nodes (host:port)' }),
|
||||
|
||||
// Memory and Cache Settings
|
||||
DRAGONFLY_MAX_MEMORY: str({ default: '2gb', desc: 'Maximum memory usage' }),
|
||||
DRAGONFLY_CACHE_MODE: bool({ default: true, desc: 'Enable cache mode' }),
|
||||
});
|
||||
|
||||
// Export typed configuration object
|
||||
export type DragonflyConfig = typeof dragonflyConfig;
|
||||
|
||||
// Export individual config values for convenience
|
||||
export const {
|
||||
DRAGONFLY_HOST,
|
||||
DRAGONFLY_PORT,
|
||||
DRAGONFLY_PASSWORD,
|
||||
DRAGONFLY_USERNAME,
|
||||
DRAGONFLY_DATABASE,
|
||||
DRAGONFLY_MAX_RETRIES,
|
||||
DRAGONFLY_RETRY_DELAY,
|
||||
DRAGONFLY_CONNECT_TIMEOUT,
|
||||
DRAGONFLY_COMMAND_TIMEOUT,
|
||||
DRAGONFLY_POOL_SIZE,
|
||||
DRAGONFLY_POOL_MIN,
|
||||
DRAGONFLY_POOL_MAX,
|
||||
DRAGONFLY_TLS,
|
||||
DRAGONFLY_TLS_CERT_FILE,
|
||||
DRAGONFLY_TLS_KEY_FILE,
|
||||
DRAGONFLY_TLS_CA_FILE,
|
||||
DRAGONFLY_TLS_SKIP_VERIFY,
|
||||
DRAGONFLY_ENABLE_KEEPALIVE,
|
||||
DRAGONFLY_KEEPALIVE_INTERVAL,
|
||||
DRAGONFLY_CLUSTER_MODE,
|
||||
DRAGONFLY_CLUSTER_NODES,
|
||||
DRAGONFLY_MAX_MEMORY,
|
||||
DRAGONFLY_CACHE_MODE,
|
||||
} = dragonflyConfig;
|
||||
|
|
@ -1,81 +1,818 @@
|
|||
/**
|
||||
* Example usage of the @stock-bot/config library
|
||||
* Example usage of the Stock Bot configuration library
|
||||
*
|
||||
* This file demonstrates how to use the envalid-based configuration
|
||||
* system for various services in the Stock Bot platform.
|
||||
*/
|
||||
|
||||
// Import all the configuration modules
|
||||
import {
|
||||
databaseConfig,
|
||||
dataProviderConfigs,
|
||||
riskConfig,
|
||||
Environment,
|
||||
// Core utilities
|
||||
loadEnvVariables,
|
||||
getEnvironment,
|
||||
marketDataGatewayConfig,
|
||||
riskGuardianConfig,
|
||||
Environment,
|
||||
ConfigurationError,
|
||||
validateConfig
|
||||
} from './index';
|
||||
} from './core';
|
||||
|
||||
import {
|
||||
// Database configuration
|
||||
databaseConfig,
|
||||
DatabaseConfig,
|
||||
DB_HOST,
|
||||
DB_PORT,
|
||||
DB_NAME,
|
||||
DB_USER,
|
||||
DB_PASSWORD,
|
||||
} from './database';
|
||||
|
||||
import {
|
||||
// QuestDB configuration
|
||||
questdbConfig,
|
||||
QuestDbConfig,
|
||||
QUESTDB_HOST,
|
||||
QUESTDB_HTTP_PORT,
|
||||
QUESTDB_PG_PORT,
|
||||
} from './questdb';
|
||||
|
||||
import {
|
||||
// MongoDB configuration
|
||||
mongodbConfig,
|
||||
MongoDbConfig,
|
||||
MONGODB_HOST,
|
||||
MONGODB_PORT,
|
||||
MONGODB_DATABASE,
|
||||
MONGODB_USERNAME,
|
||||
} from './mongodb';
|
||||
|
||||
import {
|
||||
// Dragonfly configuration
|
||||
dragonflyConfig,
|
||||
DragonflyConfig,
|
||||
DRAGONFLY_HOST,
|
||||
DRAGONFLY_PORT,
|
||||
DRAGONFLY_DATABASE,
|
||||
} from './dragonfly';
|
||||
|
||||
import {
|
||||
// Monitoring configuration
|
||||
prometheusConfig,
|
||||
grafanaConfig,
|
||||
PrometheusConfig,
|
||||
GrafanaConfig,
|
||||
PROMETHEUS_HOST,
|
||||
PROMETHEUS_PORT,
|
||||
GRAFANA_HOST,
|
||||
GRAFANA_PORT,
|
||||
} from './monitoring';
|
||||
|
||||
import {
|
||||
// Loki configuration
|
||||
lokiConfig,
|
||||
LokiConfig,
|
||||
LOKI_HOST,
|
||||
LOKI_PORT,
|
||||
LOKI_SERVICE_LABEL,
|
||||
LOKI_BATCH_SIZE,
|
||||
} from './loki';
|
||||
|
||||
import {
|
||||
// Logging configuration
|
||||
loggingConfig,
|
||||
LoggingConfig,
|
||||
LOG_LEVEL,
|
||||
LOG_FORMAT,
|
||||
LOG_CONSOLE,
|
||||
LOG_FILE,
|
||||
LOG_SERVICE_NAME,
|
||||
} from './logging';
|
||||
|
||||
|
||||
import {
|
||||
// Risk management configuration
|
||||
riskConfig,
|
||||
RiskConfig,
|
||||
RISK_MAX_POSITION_SIZE,
|
||||
RISK_DEFAULT_STOP_LOSS,
|
||||
RISK_CIRCUIT_BREAKER_ENABLED,
|
||||
} from './risk';
|
||||
|
||||
import {
|
||||
// Data provider configuration
|
||||
dataProvidersConfig,
|
||||
DataProvidersConfig,
|
||||
getProviderConfig,
|
||||
getEnabledProviders,
|
||||
getDefaultProvider,
|
||||
DEFAULT_DATA_PROVIDER,
|
||||
ALPACA_API_KEY,
|
||||
} from './data-providers';
|
||||
|
||||
/**
|
||||
* Display current configuration values
|
||||
* Example 1: Basic usage - Load environment variables and get configuration
|
||||
*/
|
||||
export function printCurrentConfig(): void {
|
||||
console.log('\n=== Stock Bot Configuration ===');
|
||||
function basicUsageExample() {
|
||||
console.log('=== Basic Usage Example ===');
|
||||
|
||||
console.log('\nEnvironment:', getEnvironment());
|
||||
console.log('\n--- Database Config ---');
|
||||
console.log('Dragonfly Host:', databaseConfig.dragonfly.host);
|
||||
console.log('Dragonfly Port:', databaseConfig.dragonfly.port);
|
||||
console.log('QuestDB Host:', databaseConfig.questDB.host);
|
||||
console.log('QuestDB Database:', databaseConfig.questDB.database);
|
||||
console.log('MongoDB URI:', databaseConfig.mongodb.uri);
|
||||
console.log('MongoDB Database:', databaseConfig.mongodb.database);
|
||||
console.log('PostgreSQL Host:', databaseConfig.postgres.host);
|
||||
console.log('PostgreSQL Database:', databaseConfig.postgres.database);
|
||||
console.log('\n--- Data Provider Config ---');
|
||||
console.log('Default Provider:', dataProviderConfigs.defaultProvider);
|
||||
console.log('Providers:');
|
||||
dataProviderConfigs.providers.forEach((provider: {
|
||||
name: string;
|
||||
type: string;
|
||||
baseUrl?: string;
|
||||
wsUrl?: string;
|
||||
}) => {
|
||||
console.log(` - ${provider.name} (${provider.type})`);
|
||||
if (provider.baseUrl) console.log(` URL: ${provider.baseUrl}`);
|
||||
if (provider.wsUrl) console.log(` WebSocket: ${provider.wsUrl}`);
|
||||
// Load environment variables (optional - they're loaded automatically)
|
||||
loadEnvVariables();
|
||||
|
||||
// Get the current environment
|
||||
const env = getEnvironment();
|
||||
console.log(`Current environment: ${env}`);
|
||||
|
||||
// Access individual configuration values
|
||||
console.log(`Database host: ${DB_HOST}`);
|
||||
console.log(`Database port: ${DB_PORT}`);
|
||||
console.log(`Log level: ${LOG_LEVEL}`);
|
||||
|
||||
// Access full configuration objects
|
||||
console.log(`Full database config:`, {
|
||||
host: databaseConfig.DB_HOST,
|
||||
port: databaseConfig.DB_PORT,
|
||||
name: databaseConfig.DB_NAME,
|
||||
ssl: databaseConfig.DB_SSL,
|
||||
});
|
||||
|
||||
console.log('\n--- Risk Config ---');
|
||||
console.log('Max Drawdown:', riskConfig.maxDrawdown * 100, '%');
|
||||
console.log('Max Position Size:', riskConfig.maxPositionSize * 100, '%');
|
||||
console.log('Max Leverage:', riskConfig.maxLeverage, 'x');
|
||||
console.log('Default Stop Loss:', riskConfig.stopLossDefault * 100, '%');
|
||||
console.log('Default Take Profit:', riskConfig.takeProfitDefault * 100, '%');
|
||||
|
||||
console.log('\n--- Market Data Gateway Config ---');
|
||||
console.log('Service Port:', marketDataGatewayConfig.service.port);
|
||||
console.log('WebSocket Enabled:', marketDataGatewayConfig.websocket.enabled);
|
||||
console.log('WebSocket Path:', marketDataGatewayConfig.websocket.path);
|
||||
console.log('Caching Enabled:', marketDataGatewayConfig.caching.enabled);
|
||||
console.log('Caching TTL:', marketDataGatewayConfig.caching.ttlSeconds, 'seconds');
|
||||
|
||||
console.log('\n--- Risk Guardian Config ---');
|
||||
console.log('Service Port:', riskGuardianConfig.service.port);
|
||||
console.log('Pre-Trade Validation:', riskGuardianConfig.riskChecks.preTradeValidation);
|
||||
console.log('Portfolio Validation:', riskGuardianConfig.riskChecks.portfolioValidation);
|
||||
console.log('Alerting Enabled:', riskGuardianConfig.alerting.enabled);
|
||||
console.log('Critical Threshold:', riskGuardianConfig.alerting.criticalThreshold * 100, '%');
|
||||
}
|
||||
|
||||
// Execute example if this file is run directly
|
||||
if (require.main === module) { try {
|
||||
printCurrentConfig();
|
||||
} catch (error: unknown) {
|
||||
if (error instanceof ConfigurationError) {
|
||||
console.error('Configuration Error:', error.message);
|
||||
} else if (error instanceof Error) {
|
||||
console.error('Error:', error.message);
|
||||
} else {
|
||||
console.error('Unknown error:', error);
|
||||
}
|
||||
process.exit(1);
|
||||
/**
|
||||
* Example 2: Using configuration in a database connection
|
||||
*/
|
||||
async function databaseConnectionExample() {
|
||||
console.log('=== Database Connection Example ===');
|
||||
|
||||
try {
|
||||
// Use the database configuration to create a connection string
|
||||
const connectionString = `postgresql://${DB_USER}:${DB_PASSWORD}@${DB_HOST}:${DB_PORT}/${DB_NAME}`;
|
||||
|
||||
console.log('Database connection settings:');
|
||||
console.log(`- Host: ${databaseConfig.DB_HOST}`);
|
||||
console.log(`- Port: ${databaseConfig.DB_PORT}`);
|
||||
console.log(`- Database: ${databaseConfig.DB_NAME}`);
|
||||
console.log(`- SSL enabled: ${databaseConfig.DB_SSL}`);
|
||||
console.log(`- Pool max connections: ${databaseConfig.DB_POOL_MAX}`);
|
||||
console.log(`- Query timeout: ${databaseConfig.DB_QUERY_TIMEOUT}ms`);
|
||||
|
||||
// Example pool configuration
|
||||
const poolConfig = {
|
||||
host: databaseConfig.DB_HOST,
|
||||
port: databaseConfig.DB_PORT,
|
||||
database: databaseConfig.DB_NAME,
|
||||
user: databaseConfig.DB_USER,
|
||||
password: databaseConfig.DB_PASSWORD,
|
||||
ssl: databaseConfig.DB_SSL,
|
||||
min: databaseConfig.DB_POOL_MIN,
|
||||
max: databaseConfig.DB_POOL_MAX,
|
||||
idleTimeoutMillis: databaseConfig.DB_POOL_IDLE_TIMEOUT,
|
||||
};
|
||||
|
||||
console.log('Pool configuration:', poolConfig);
|
||||
|
||||
} catch (error) {
|
||||
console.error('Database configuration error:', error);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Example 3: Logging setup example
|
||||
*/
|
||||
function loggingSetupExample() {
|
||||
console.log('=== Logging Setup Example ===');
|
||||
|
||||
// Access logging configuration
|
||||
console.log('Logging settings:');
|
||||
console.log(`- Level: ${loggingConfig.LOG_LEVEL}`);
|
||||
console.log(`- Format: ${loggingConfig.LOG_FORMAT}`);
|
||||
console.log(`- Console enabled: ${loggingConfig.LOG_CONSOLE}`);
|
||||
console.log(`- File logging: ${loggingConfig.LOG_FILE}`);
|
||||
console.log(`- Service name: ${loggingConfig.LOG_SERVICE_NAME}`);
|
||||
// Example logger configuration
|
||||
const loggerConfig = {
|
||||
level: loggingConfig.LOG_LEVEL,
|
||||
format: loggingConfig.LOG_FORMAT,
|
||||
transports: [] as any[],
|
||||
defaultMeta: {
|
||||
service: loggingConfig.LOG_SERVICE_NAME,
|
||||
version: loggingConfig.LOG_SERVICE_VERSION,
|
||||
environment: loggingConfig.LOG_ENVIRONMENT,
|
||||
},
|
||||
};
|
||||
|
||||
if (loggingConfig.LOG_CONSOLE) {
|
||||
loggerConfig.transports.push({
|
||||
type: 'console',
|
||||
format: loggingConfig.LOG_FORMAT,
|
||||
timestamp: loggingConfig.LOG_TIMESTAMP,
|
||||
});
|
||||
}
|
||||
|
||||
if (loggingConfig.LOG_FILE) {
|
||||
loggerConfig.transports.push({
|
||||
type: 'file',
|
||||
filename: `${loggingConfig.LOG_FILE_PATH}/application.log`,
|
||||
maxSize: loggingConfig.LOG_FILE_MAX_SIZE,
|
||||
maxFiles: loggingConfig.LOG_FILE_MAX_FILES,
|
||||
datePattern: loggingConfig.LOG_FILE_DATE_PATTERN,
|
||||
});
|
||||
}
|
||||
|
||||
// Example Loki transport configuration
|
||||
if (lokiConfig.LOKI_HOST) {
|
||||
loggerConfig.transports.push({
|
||||
type: 'loki',
|
||||
host: lokiConfig.LOKI_HOST,
|
||||
port: lokiConfig.LOKI_PORT,
|
||||
batchSize: lokiConfig.LOKI_BATCH_SIZE,
|
||||
labels: {
|
||||
service: lokiConfig.LOKI_SERVICE_LABEL,
|
||||
environment: lokiConfig.LOKI_ENVIRONMENT_LABEL,
|
||||
},
|
||||
});
|
||||
}
|
||||
console.log('Logger configuration:', loggerConfig);
|
||||
}
|
||||
|
||||
/**
|
||||
* Example 4: Risk management configuration
|
||||
*/
|
||||
function riskManagementExample() {
|
||||
console.log('=== Risk Management Example ===');
|
||||
|
||||
// Access risk configuration
|
||||
console.log('Risk management settings:');
|
||||
console.log(`- Max position size: ${RISK_MAX_POSITION_SIZE * 100}%`);
|
||||
console.log(`- Default stop loss: ${RISK_DEFAULT_STOP_LOSS * 100}%`);
|
||||
console.log(`- Circuit breaker enabled: ${RISK_CIRCUIT_BREAKER_ENABLED}`);
|
||||
console.log(`- Max leverage: ${riskConfig.RISK_MAX_LEVERAGE}x`);
|
||||
|
||||
// Example risk calculator
|
||||
function calculatePositionSize(portfolioValue: number, riskPerTrade: number = RISK_DEFAULT_STOP_LOSS) {
|
||||
const maxPositionValue = portfolioValue * RISK_MAX_POSITION_SIZE;
|
||||
const riskAmount = portfolioValue * riskPerTrade;
|
||||
|
||||
return {
|
||||
maxPositionValue,
|
||||
riskAmount,
|
||||
maxShares: Math.floor(maxPositionValue / 100), // Assuming $100 per share
|
||||
};
|
||||
}
|
||||
|
||||
const portfolioValue = 100000; // $100k portfolio
|
||||
const position = calculatePositionSize(portfolioValue);
|
||||
console.log(`Position sizing for $${portfolioValue} portfolio:`, position);
|
||||
}
|
||||
|
||||
/**
|
||||
* Example 5: Data provider configuration
|
||||
*/
|
||||
function dataProviderExample() {
|
||||
console.log('=== Data Provider Example ===');
|
||||
|
||||
// Get the default provider
|
||||
const defaultProvider = getDefaultProvider();
|
||||
console.log('Default provider:', defaultProvider);
|
||||
|
||||
// Get all enabled providers
|
||||
const enabledProviders = getEnabledProviders();
|
||||
console.log('Enabled providers:', enabledProviders.map(p => p.name));
|
||||
|
||||
// Get specific provider configuration
|
||||
try {
|
||||
const alpacaConfig = getProviderConfig('alpaca');
|
||||
console.log('Alpaca configuration:', {
|
||||
enabled: alpacaConfig.enabled,
|
||||
baseUrl: alpacaConfig.baseUrl,
|
||||
hasApiKey: !!alpacaConfig.apiKey,
|
||||
rateLimit: alpacaConfig.rateLimits,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error getting Alpaca config:', error);
|
||||
}
|
||||
|
||||
// Example API client setup
|
||||
const apiClients = enabledProviders.map(provider => ({
|
||||
name: provider.name,
|
||||
client: {
|
||||
baseURL: provider.baseUrl,
|
||||
timeout: dataProvidersConfig.DATA_PROVIDER_TIMEOUT,
|
||||
retries: dataProvidersConfig.DATA_PROVIDER_RETRIES,
|
||||
retryDelay: dataProvidersConfig.DATA_PROVIDER_RETRY_DELAY,
|
||||
headers: provider.apiKey ? {
|
||||
'Authorization': `Bearer ${provider.apiKey}`
|
||||
} : {},
|
||||
}
|
||||
}));
|
||||
|
||||
console.log('API clients configuration:', apiClients);
|
||||
}
|
||||
|
||||
/**
|
||||
* Example 6: Environment-specific configuration
|
||||
*/
|
||||
function environmentSpecificExample() {
|
||||
console.log('=== Environment-Specific Example ===');
|
||||
|
||||
const env = getEnvironment();
|
||||
|
||||
switch (env) {
|
||||
case Environment.Development:
|
||||
console.log('Development environment detected');
|
||||
console.log('- Using local database');
|
||||
console.log('- Verbose logging enabled');
|
||||
console.log('- Paper trading mode');
|
||||
break;
|
||||
|
||||
case Environment.Testing:
|
||||
console.log('Testing environment detected');
|
||||
console.log('- Using test database');
|
||||
console.log('- Structured logging');
|
||||
console.log('- Mock data providers');
|
||||
break;
|
||||
|
||||
case Environment.Staging:
|
||||
console.log('Staging environment detected');
|
||||
console.log('- Using staging database');
|
||||
console.log('- Production-like settings');
|
||||
console.log('- Real data providers (limited)');
|
||||
break;
|
||||
|
||||
case Environment.Production:
|
||||
console.log('Production environment detected');
|
||||
console.log('- Using production database');
|
||||
console.log('- Optimized logging');
|
||||
console.log('- Live trading enabled');
|
||||
break;
|
||||
}
|
||||
|
||||
// Example of environment-specific behavior
|
||||
const isProduction = env === Environment.Production;
|
||||
const tradingMode = isProduction ? 'live' : 'paper';
|
||||
const logLevel = isProduction ? 'info' : 'debug';
|
||||
|
||||
console.log(`Trading mode: ${tradingMode}`);
|
||||
console.log(`Recommended log level: ${logLevel}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Example 7: Configuration validation and error handling
|
||||
*/
|
||||
function configurationValidationExample() {
|
||||
console.log('=== Configuration Validation Example ===');
|
||||
|
||||
try {
|
||||
// Check required configurations
|
||||
if (!ALPACA_API_KEY && DEFAULT_DATA_PROVIDER === 'alpaca') {
|
||||
throw new ConfigurationError('Alpaca API key is required when using Alpaca as default provider');
|
||||
}
|
||||
|
||||
// Validate risk settings
|
||||
if (RISK_MAX_POSITION_SIZE > 1.0) {
|
||||
throw new ConfigurationError('Maximum position size cannot exceed 100%');
|
||||
}
|
||||
|
||||
if (riskConfig.RISK_DEFAULT_STOP_LOSS > riskConfig.RISK_DEFAULT_TAKE_PROFIT) {
|
||||
console.warn('Warning: Stop loss is greater than take profit - check your risk settings');
|
||||
}
|
||||
|
||||
// Validate database connection settings
|
||||
if (databaseConfig.DB_POOL_MAX < databaseConfig.DB_POOL_MIN) {
|
||||
throw new ConfigurationError('Database max pool size must be greater than min pool size');
|
||||
}
|
||||
|
||||
console.log('✅ All configuration validations passed');
|
||||
|
||||
} catch (error) {
|
||||
if (error instanceof ConfigurationError) {
|
||||
console.error('❌ Configuration error:', error.message);
|
||||
} else {
|
||||
console.error('❌ Unexpected error:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Example 8: QuestDB time-series database configuration
|
||||
*/
|
||||
function questdbConfigurationExample() {
|
||||
console.log('=== QuestDB Configuration Example ===');
|
||||
|
||||
// Access QuestDB configuration
|
||||
console.log('QuestDB settings:');
|
||||
console.log(`- Host: ${questdbConfig.QUESTDB_HOST}`);
|
||||
console.log(`- HTTP port (web console): ${questdbConfig.QUESTDB_HTTP_PORT}`);
|
||||
console.log(`- PostgreSQL port: ${questdbConfig.QUESTDB_PG_PORT}`);
|
||||
console.log(`- InfluxDB port: ${questdbConfig.QUESTDB_INFLUX_PORT}`);
|
||||
console.log(`- TLS enabled: ${questdbConfig.QUESTDB_TLS_ENABLED}`);
|
||||
|
||||
// Example QuestDB client configuration
|
||||
const questdbClientConfig = {
|
||||
http: {
|
||||
host: questdbConfig.QUESTDB_HOST,
|
||||
port: questdbConfig.QUESTDB_HTTP_PORT,
|
||||
tls: questdbConfig.QUESTDB_TLS_ENABLED,
|
||||
timeout: questdbConfig.QUESTDB_REQUEST_TIMEOUT,
|
||||
},
|
||||
postgresql: {
|
||||
host: questdbConfig.QUESTDB_HOST,
|
||||
port: questdbConfig.QUESTDB_PG_PORT,
|
||||
database: questdbConfig.QUESTDB_DEFAULT_DATABASE,
|
||||
user: questdbConfig.QUESTDB_USER,
|
||||
password: questdbConfig.QUESTDB_PASSWORD,
|
||||
},
|
||||
influxdb: {
|
||||
host: questdbConfig.QUESTDB_HOST,
|
||||
port: questdbConfig.QUESTDB_INFLUX_PORT,
|
||||
}
|
||||
};
|
||||
|
||||
console.log('QuestDB client configuration:', questdbClientConfig);
|
||||
|
||||
// Example time-series table creation
|
||||
const createTableQuery = `
|
||||
CREATE TABLE IF NOT EXISTS ohlcv_data (
|
||||
timestamp TIMESTAMP,
|
||||
symbol SYMBOL,
|
||||
open DOUBLE,
|
||||
high DOUBLE,
|
||||
low DOUBLE,
|
||||
close DOUBLE,
|
||||
volume LONG
|
||||
) timestamp(timestamp) PARTITION BY DAY;
|
||||
`;
|
||||
|
||||
console.log('Example table creation query:', createTableQuery);
|
||||
}
|
||||
|
||||
/**
|
||||
* Example 9: MongoDB document database configuration
|
||||
*/
|
||||
function mongodbConfigurationExample() {
|
||||
console.log('=== MongoDB Configuration Example ===');
|
||||
|
||||
// Access MongoDB configuration
|
||||
console.log('MongoDB settings:');
|
||||
console.log(`- Host: ${mongodbConfig.MONGODB_HOST}`);
|
||||
console.log(`- Port: ${mongodbConfig.MONGODB_PORT}`);
|
||||
console.log(`- Database: ${mongodbConfig.MONGODB_DATABASE}`);
|
||||
console.log(`- Username: ${mongodbConfig.MONGODB_USERNAME}`);
|
||||
console.log(`- TLS enabled: ${mongodbConfig.MONGODB_TLS}`);
|
||||
console.log(`- Max pool size: ${mongodbConfig.MONGODB_MAX_POOL_SIZE}`);
|
||||
|
||||
// Build connection URI
|
||||
const buildMongoUri = () => {
|
||||
if (mongodbConfig.MONGODB_URI) {
|
||||
return mongodbConfig.MONGODB_URI;
|
||||
}
|
||||
|
||||
const auth = mongodbConfig.MONGODB_USERNAME && mongodbConfig.MONGODB_PASSWORD
|
||||
? `${mongodbConfig.MONGODB_USERNAME}:${mongodbConfig.MONGODB_PASSWORD}@`
|
||||
: '';
|
||||
|
||||
const tls = mongodbConfig.MONGODB_TLS ? '?tls=true' : '';
|
||||
|
||||
return `mongodb://${auth}${mongodbConfig.MONGODB_HOST}:${mongodbConfig.MONGODB_PORT}/${mongodbConfig.MONGODB_DATABASE}${tls}`;
|
||||
};
|
||||
|
||||
const mongoUri = buildMongoUri();
|
||||
console.log('MongoDB connection URI:', mongoUri.replace(/:[^:@]*@/, ':***@')); // Hide password
|
||||
|
||||
// Example MongoDB client configuration
|
||||
const mongoClientConfig = {
|
||||
maxPoolSize: mongodbConfig.MONGODB_MAX_POOL_SIZE,
|
||||
minPoolSize: mongodbConfig.MONGODB_MIN_POOL_SIZE,
|
||||
maxIdleTimeMS: mongodbConfig.MONGODB_MAX_IDLE_TIME,
|
||||
connectTimeoutMS: mongodbConfig.MONGODB_CONNECT_TIMEOUT,
|
||||
socketTimeoutMS: mongodbConfig.MONGODB_SOCKET_TIMEOUT,
|
||||
serverSelectionTimeoutMS: mongodbConfig.MONGODB_SERVER_SELECTION_TIMEOUT,
|
||||
retryWrites: mongodbConfig.MONGODB_RETRY_WRITES,
|
||||
w: mongodbConfig.MONGODB_WRITE_CONCERN,
|
||||
readPreference: mongodbConfig.MONGODB_READ_PREFERENCE,
|
||||
};
|
||||
|
||||
console.log('MongoDB client configuration:', mongoClientConfig);
|
||||
|
||||
// Example collections structure
|
||||
const collections = [
|
||||
'sentiment_data', // News sentiment analysis
|
||||
'market_news', // Raw news articles
|
||||
'social_signals', // Social media signals
|
||||
'earnings_reports', // Earnings data
|
||||
'analyst_ratings', // Analyst recommendations
|
||||
];
|
||||
|
||||
console.log('Example collections:', collections);
|
||||
}
|
||||
|
||||
/**
|
||||
* Example 10: Dragonfly (Redis replacement) configuration
|
||||
*/
|
||||
function dragonflyConfigurationExample() {
|
||||
console.log('=== Dragonfly Configuration Example ===');
|
||||
|
||||
// Access Dragonfly configuration
|
||||
console.log('Dragonfly settings:');
|
||||
console.log(`- Host: ${dragonflyConfig.DRAGONFLY_HOST}`);
|
||||
console.log(`- Port: ${dragonflyConfig.DRAGONFLY_PORT}`);
|
||||
console.log(`- Database: ${dragonflyConfig.DRAGONFLY_DATABASE}`);
|
||||
console.log(`- Cache mode: ${dragonflyConfig.DRAGONFLY_CACHE_MODE}`);
|
||||
console.log(`- Max memory: ${dragonflyConfig.DRAGONFLY_MAX_MEMORY}`);
|
||||
console.log(`- Pool size: ${dragonflyConfig.DRAGONFLY_POOL_SIZE}`);
|
||||
// Example Dragonfly client configuration
|
||||
const dragonflyClientConfig = {
|
||||
host: dragonflyConfig.DRAGONFLY_HOST,
|
||||
port: dragonflyConfig.DRAGONFLY_PORT,
|
||||
db: dragonflyConfig.DRAGONFLY_DATABASE,
|
||||
password: dragonflyConfig.DRAGONFLY_PASSWORD || undefined,
|
||||
username: dragonflyConfig.DRAGONFLY_USERNAME || undefined,
|
||||
retryDelayOnFailover: dragonflyConfig.DRAGONFLY_RETRY_DELAY,
|
||||
maxRetriesPerRequest: dragonflyConfig.DRAGONFLY_MAX_RETRIES,
|
||||
connectTimeout: dragonflyConfig.DRAGONFLY_CONNECT_TIMEOUT,
|
||||
commandTimeout: dragonflyConfig.DRAGONFLY_COMMAND_TIMEOUT,
|
||||
enableAutoPipelining: true,
|
||||
};
|
||||
|
||||
console.log('Dragonfly client configuration:', dragonflyClientConfig);
|
||||
|
||||
// Example cache key patterns
|
||||
const cachePatterns = {
|
||||
marketData: 'market:{symbol}:{timeframe}',
|
||||
indicators: 'indicators:{symbol}:{indicator}:{period}',
|
||||
positions: 'positions:{account_id}',
|
||||
orders: 'orders:{order_id}',
|
||||
rateLimit: 'rate_limit:{provider}:{endpoint}',
|
||||
sessions: 'session:{user_id}',
|
||||
};
|
||||
|
||||
console.log('Example cache key patterns:', cachePatterns);
|
||||
|
||||
// Example TTL configurations
|
||||
const ttlConfigs = {
|
||||
marketData: 60, // 1 minute for real-time data
|
||||
indicators: 300, // 5 minutes for calculated indicators
|
||||
positions: 30, // 30 seconds for positions
|
||||
orders: 86400, // 1 day for order history
|
||||
rateLimit: 3600, // 1 hour for rate limiting
|
||||
sessions: 1800, // 30 minutes for user sessions
|
||||
};
|
||||
|
||||
console.log('Example TTL configurations (seconds):', ttlConfigs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Example 11: Monitoring stack configuration (Prometheus, Grafana, Loki)
|
||||
*/
|
||||
function monitoringConfigurationExample() {
|
||||
console.log('=== Monitoring Configuration Example ===');
|
||||
|
||||
// Prometheus configuration
|
||||
console.log('Prometheus settings:');
|
||||
console.log(`- Host: ${prometheusConfig.PROMETHEUS_HOST}`);
|
||||
console.log(`- Port: ${prometheusConfig.PROMETHEUS_PORT}`);
|
||||
console.log(`- Scrape interval: ${prometheusConfig.PROMETHEUS_SCRAPE_INTERVAL}`);
|
||||
console.log(`- Retention time: ${prometheusConfig.PROMETHEUS_RETENTION_TIME}`);
|
||||
|
||||
// Grafana configuration
|
||||
console.log('\nGrafana settings:');
|
||||
console.log(`- Host: ${grafanaConfig.GRAFANA_HOST}`);
|
||||
console.log(`- Port: ${grafanaConfig.GRAFANA_PORT}`);
|
||||
console.log(`- Admin user: ${grafanaConfig.GRAFANA_ADMIN_USER}`);
|
||||
console.log(`- Allow sign up: ${grafanaConfig.GRAFANA_ALLOW_SIGN_UP}`);
|
||||
console.log(`- Database type: ${grafanaConfig.GRAFANA_DATABASE_TYPE}`);
|
||||
|
||||
// Loki configuration
|
||||
console.log('\nLoki settings:');
|
||||
console.log(`- Host: ${lokiConfig.LOKI_HOST}`);
|
||||
console.log(`- Port: ${lokiConfig.LOKI_PORT}`);
|
||||
console.log(`- Batch size: ${lokiConfig.LOKI_BATCH_SIZE}`);
|
||||
console.log(`- Retention period: ${lokiConfig.LOKI_RETENTION_PERIOD}`);
|
||||
|
||||
// Example monitoring endpoints
|
||||
const monitoringEndpoints = {
|
||||
prometheus: `http://${prometheusConfig.PROMETHEUS_HOST}:${prometheusConfig.PROMETHEUS_PORT}`,
|
||||
grafana: `http://${grafanaConfig.GRAFANA_HOST}:${grafanaConfig.GRAFANA_PORT}`,
|
||||
loki: `http://${lokiConfig.LOKI_HOST}:${lokiConfig.LOKI_PORT}`,
|
||||
};
|
||||
|
||||
console.log('\nMonitoring endpoints:', monitoringEndpoints);
|
||||
|
||||
// Example metrics configuration
|
||||
const metricsConfig = {
|
||||
defaultLabels: {
|
||||
service: 'stock-bot',
|
||||
environment: getEnvironment(),
|
||||
version: process.env.npm_package_version || '1.0.0',
|
||||
},
|
||||
collectDefaultMetrics: true,
|
||||
prefix: 'stockbot_',
|
||||
buckets: [0.1, 0.5, 1, 2, 5, 10, 30, 60], // Response time buckets in seconds
|
||||
};
|
||||
|
||||
console.log('Example metrics configuration:', metricsConfig);
|
||||
}
|
||||
|
||||
/**
|
||||
* Example 12: Multi-database service configuration
|
||||
*/
|
||||
function multiDatabaseServiceExample() {
|
||||
console.log('=== Multi-Database Service Example ===');
|
||||
|
||||
// Complete database configuration for a microservice
|
||||
const serviceConfig = {
|
||||
service: {
|
||||
name: 'market-data-processor',
|
||||
version: '1.0.0',
|
||||
environment: getEnvironment(),
|
||||
},
|
||||
|
||||
// PostgreSQL for operational data
|
||||
postgresql: {
|
||||
host: databaseConfig.DB_HOST,
|
||||
port: databaseConfig.DB_PORT,
|
||||
database: databaseConfig.DB_NAME,
|
||||
username: databaseConfig.DB_USER,
|
||||
password: databaseConfig.DB_PASSWORD,
|
||||
ssl: databaseConfig.DB_SSL,
|
||||
pool: {
|
||||
min: databaseConfig.DB_POOL_MIN,
|
||||
max: databaseConfig.DB_POOL_MAX,
|
||||
idleTimeout: databaseConfig.DB_POOL_IDLE_TIMEOUT,
|
||||
},
|
||||
},
|
||||
|
||||
// QuestDB for time-series data
|
||||
questdb: {
|
||||
host: questdbConfig.QUESTDB_HOST,
|
||||
httpPort: questdbConfig.QUESTDB_HTTP_PORT,
|
||||
pgPort: questdbConfig.QUESTDB_PG_PORT,
|
||||
database: questdbConfig.QUESTDB_DEFAULT_DATABASE,
|
||||
timeout: questdbConfig.QUESTDB_REQUEST_TIMEOUT,
|
||||
},
|
||||
|
||||
// MongoDB for document storage
|
||||
mongodb: {
|
||||
host: mongodbConfig.MONGODB_HOST,
|
||||
port: mongodbConfig.MONGODB_PORT,
|
||||
database: mongodbConfig.MONGODB_DATABASE,
|
||||
username: mongodbConfig.MONGODB_USERNAME,
|
||||
maxPoolSize: mongodbConfig.MONGODB_MAX_POOL_SIZE,
|
||||
readPreference: mongodbConfig.MONGODB_READ_PREFERENCE,
|
||||
},
|
||||
|
||||
// Dragonfly for caching
|
||||
dragonfly: {
|
||||
host: dragonflyConfig.DRAGONFLY_HOST,
|
||||
port: dragonflyConfig.DRAGONFLY_PORT,
|
||||
database: dragonflyConfig.DRAGONFLY_DATABASE,
|
||||
poolSize: dragonflyConfig.DRAGONFLY_POOL_SIZE,
|
||||
commandTimeout: dragonflyConfig.DRAGONFLY_COMMAND_TIMEOUT,
|
||||
},
|
||||
|
||||
// Monitoring
|
||||
monitoring: {
|
||||
prometheus: {
|
||||
pushGateway: `http://${prometheusConfig.PROMETHEUS_HOST}:${prometheusConfig.PROMETHEUS_PORT}`,
|
||||
scrapeInterval: prometheusConfig.PROMETHEUS_SCRAPE_INTERVAL,
|
||||
}, loki: {
|
||||
host: lokiConfig.LOKI_HOST,
|
||||
port: lokiConfig.LOKI_PORT,
|
||||
batchSize: lokiConfig.LOKI_BATCH_SIZE,
|
||||
labels: {
|
||||
service: 'market-data-processor',
|
||||
environment: getEnvironment(),
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
console.log('Complete service configuration:', JSON.stringify(serviceConfig, null, 2));
|
||||
|
||||
// Example data flow
|
||||
const dataFlow = {
|
||||
ingestion: 'Market data → Dragonfly (cache) → QuestDB (storage)',
|
||||
processing: 'QuestDB → Analysis → PostgreSQL (results) → MongoDB (metadata)',
|
||||
serving: 'Dragonfly (cache) ← PostgreSQL/QuestDB ← API requests',
|
||||
monitoring: 'All services → Prometheus → Grafana dashboards',
|
||||
logging: 'All services → Loki → Grafana log viewer',
|
||||
};
|
||||
|
||||
console.log('\nData flow patterns:', dataFlow);
|
||||
}
|
||||
|
||||
/**
|
||||
* Example 8: Creating a service configuration object
|
||||
*/
|
||||
function serviceConfigurationExample() {
|
||||
console.log('=== Service Configuration Example ===');
|
||||
|
||||
// Example: Market Data Gateway service configuration
|
||||
const marketDataGatewayConfig = {
|
||||
service: {
|
||||
name: 'market-data-gateway',
|
||||
port: 3001,
|
||||
environment: getEnvironment(),
|
||||
},
|
||||
database: {
|
||||
host: databaseConfig.DB_HOST,
|
||||
port: databaseConfig.DB_PORT,
|
||||
name: databaseConfig.DB_NAME,
|
||||
ssl: databaseConfig.DB_SSL,
|
||||
}, logging: {
|
||||
level: loggingConfig.LOG_LEVEL,
|
||||
console: loggingConfig.LOG_CONSOLE,
|
||||
loki: {
|
||||
host: lokiConfig.LOKI_HOST,
|
||||
port: lokiConfig.LOKI_PORT,
|
||||
labels: {
|
||||
service: 'market-data-gateway',
|
||||
environment: getEnvironment(),
|
||||
}
|
||||
}
|
||||
},
|
||||
dataProviders: {
|
||||
default: DEFAULT_DATA_PROVIDER,
|
||||
enabled: getEnabledProviders(),
|
||||
timeout: dataProvidersConfig.DATA_PROVIDER_TIMEOUT,
|
||||
retries: dataProvidersConfig.DATA_PROVIDER_RETRIES,
|
||||
},
|
||||
cache: {
|
||||
enabled: dataProvidersConfig.DATA_CACHE_ENABLED,
|
||||
ttl: dataProvidersConfig.DATA_CACHE_TTL,
|
||||
maxSize: dataProvidersConfig.DATA_CACHE_MAX_SIZE,
|
||||
}
|
||||
};
|
||||
|
||||
console.log('Market Data Gateway configuration:', JSON.stringify(marketDataGatewayConfig, null, 2));
|
||||
}
|
||||
|
||||
/**
|
||||
* Main example runner
|
||||
*/
|
||||
function runAllExamples() {
|
||||
console.log('🚀 Stock Bot Configuration Examples\n');
|
||||
|
||||
try {
|
||||
basicUsageExample();
|
||||
console.log('\n');
|
||||
|
||||
databaseConnectionExample();
|
||||
console.log('\n');
|
||||
|
||||
loggingSetupExample();
|
||||
console.log('\n');
|
||||
|
||||
riskManagementExample();
|
||||
console.log('\n');
|
||||
|
||||
dataProviderExample();
|
||||
console.log('\n');
|
||||
|
||||
environmentSpecificExample();
|
||||
console.log('\n');
|
||||
|
||||
configurationValidationExample();
|
||||
console.log('\n');
|
||||
|
||||
questdbConfigurationExample();
|
||||
console.log('\n');
|
||||
|
||||
mongodbConfigurationExample();
|
||||
console.log('\n');
|
||||
|
||||
dragonflyConfigurationExample();
|
||||
console.log('\n');
|
||||
|
||||
monitoringConfigurationExample();
|
||||
console.log('\n');
|
||||
|
||||
multiDatabaseServiceExample();
|
||||
console.log('\n');
|
||||
|
||||
serviceConfigurationExample();
|
||||
|
||||
} catch (error) {
|
||||
console.error('Example execution error:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// Export the examples for use in other files
|
||||
export {
|
||||
basicUsageExample,
|
||||
databaseConnectionExample,
|
||||
loggingSetupExample,
|
||||
riskManagementExample,
|
||||
dataProviderExample,
|
||||
environmentSpecificExample,
|
||||
configurationValidationExample,
|
||||
questdbConfigurationExample,
|
||||
mongodbConfigurationExample,
|
||||
dragonflyConfigurationExample,
|
||||
monitoringConfigurationExample,
|
||||
multiDatabaseServiceExample,
|
||||
serviceConfigurationExample,
|
||||
runAllExamples,
|
||||
};
|
||||
|
||||
// Run examples if this file is executed directly
|
||||
if (require.main === module) {
|
||||
runAllExamples();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,24 +1,25 @@
|
|||
/**
|
||||
* @stock-bot/config
|
||||
*
|
||||
* Configuration management library for Stock Bot platform
|
||||
* Configuration management library for Stock Bot platform using envalid
|
||||
*/
|
||||
|
||||
// Core configuration functionality
|
||||
export * from './core';
|
||||
export * from './types';
|
||||
|
||||
// Database configurations
|
||||
export * from './database';
|
||||
export * from './questdb';
|
||||
export * from './mongodb';
|
||||
export * from './dragonfly';
|
||||
|
||||
// Logging and monitoring configurations
|
||||
export * from './logging';
|
||||
export * from './loki';
|
||||
export * from './monitoring';
|
||||
|
||||
// Data provider configurations
|
||||
export * from './data-providers';
|
||||
|
||||
// Risk management configurations
|
||||
export * from './risk';
|
||||
|
||||
// Logging configurations
|
||||
export * from './logging';
|
||||
|
||||
// Service-specific configurations
|
||||
export * from './services/index';
|
||||
|
|
|
|||
|
|
@ -1,102 +1,81 @@
|
|||
/**
|
||||
* Loki logging configuration for Stock Bot platform
|
||||
* Logging configuration using envalid
|
||||
* Application logging settings without Loki (Loki config is in monitoring.ts)
|
||||
*/
|
||||
import { z } from 'zod';
|
||||
import { getEnvVar, getNumericEnvVar, getBooleanEnvVar, createConfigLoader, validateConfig } from './core';
|
||||
import { cleanEnv, str, bool, num } from 'envalid';
|
||||
|
||||
/**
|
||||
* Loki configuration schema
|
||||
* Logging configuration with validation and defaults
|
||||
*/
|
||||
export const lokiConfigSchema = z.object({
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(3100),
|
||||
username: z.string().optional(),
|
||||
password: z.string().optional(),
|
||||
retentionDays: z.number().default(30),
|
||||
labels: z.record(z.string()).default({}),
|
||||
batchSize: z.number().default(100),
|
||||
flushIntervalMs: z.number().default(5000)
|
||||
export const loggingConfig = cleanEnv(process.env, {
|
||||
// Basic Logging Settings
|
||||
LOG_LEVEL: str({
|
||||
default: 'info',
|
||||
choices: ['error', 'warn', 'info', 'http', 'verbose', 'debug', 'silly'],
|
||||
desc: 'Logging level'
|
||||
}),
|
||||
LOG_FORMAT: str({
|
||||
default: 'json',
|
||||
choices: ['json', 'simple', 'combined'],
|
||||
desc: 'Log output format'
|
||||
}),
|
||||
LOG_CONSOLE: bool({ default: true, desc: 'Enable console logging' }),
|
||||
LOG_FILE: bool({ default: false, desc: 'Enable file logging' }),
|
||||
|
||||
// File Logging Settings
|
||||
LOG_FILE_PATH: str({ default: 'logs', desc: 'Log file directory path' }),
|
||||
LOG_FILE_MAX_SIZE: str({ default: '20m', desc: 'Maximum log file size' }),
|
||||
LOG_FILE_MAX_FILES: num({ default: 14, desc: 'Maximum number of log files to keep' }),
|
||||
LOG_FILE_DATE_PATTERN: str({ default: 'YYYY-MM-DD', desc: 'Log file date pattern' }),
|
||||
|
||||
// Error Logging
|
||||
LOG_ERROR_FILE: bool({ default: true, desc: 'Enable separate error log file' }),
|
||||
LOG_ERROR_STACK: bool({ default: true, desc: 'Include stack traces in error logs' }),
|
||||
|
||||
// Performance Logging
|
||||
LOG_PERFORMANCE: bool({ default: false, desc: 'Enable performance logging' }),
|
||||
LOG_SQL_QUERIES: bool({ default: false, desc: 'Log SQL queries' }),
|
||||
LOG_HTTP_REQUESTS: bool({ default: true, desc: 'Log HTTP requests' }),
|
||||
|
||||
// Structured Logging
|
||||
LOG_STRUCTURED: bool({ default: true, desc: 'Use structured logging format' }),
|
||||
LOG_TIMESTAMP: bool({ default: true, desc: 'Include timestamps in logs' }),
|
||||
LOG_CALLER_INFO: bool({ default: false, desc: 'Include caller information in logs' }),
|
||||
|
||||
// Log Filtering
|
||||
LOG_SILENT_MODULES: str({ default: '', desc: 'Comma-separated list of modules to silence' }),
|
||||
LOG_VERBOSE_MODULES: str({ default: '', desc: 'Comma-separated list of modules for verbose logging' }),
|
||||
|
||||
// Application Context
|
||||
LOG_SERVICE_NAME: str({ default: 'stock-bot', desc: 'Service name for log context' }),
|
||||
LOG_SERVICE_VERSION: str({ default: '1.0.0', desc: 'Service version for log context' }),
|
||||
LOG_ENVIRONMENT: str({ default: 'development', desc: 'Environment for log context' }),
|
||||
});
|
||||
|
||||
export type LokiConfig = z.infer<typeof lokiConfigSchema>;
|
||||
// Export typed configuration object
|
||||
export type LoggingConfig = typeof loggingConfig;
|
||||
|
||||
/**
|
||||
* Logging configuration schema
|
||||
*/
|
||||
export const loggingConfigSchema = z.object({
|
||||
level: z.enum(['debug', 'info', 'warn', 'error']).default('info'),
|
||||
console: z.boolean().default(true),
|
||||
loki: lokiConfigSchema
|
||||
});
|
||||
|
||||
export type LoggingConfig = z.infer<typeof loggingConfigSchema>;
|
||||
|
||||
/**
|
||||
* Parse labels from environment variable string
|
||||
* Format: key1=value1,key2=value2
|
||||
*/
|
||||
function parseLabels(labelsStr?: string): Record<string, string> {
|
||||
if (!labelsStr) return {};
|
||||
|
||||
const labels: Record<string, string> = {};
|
||||
labelsStr.split(',').forEach(labelPair => {
|
||||
const [key, value] = labelPair.trim().split('=');
|
||||
if (key && value) {
|
||||
labels[key] = value;
|
||||
}
|
||||
});
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
/**
|
||||
* Default logging configuration
|
||||
*/
|
||||
const defaultLoggingConfig: LoggingConfig = {
|
||||
level: 'info',
|
||||
console: true,
|
||||
loki: {
|
||||
host: 'localhost',
|
||||
port: 3100,
|
||||
retentionDays: 30,
|
||||
labels: {},
|
||||
batchSize: 100,
|
||||
flushIntervalMs: 5000
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Load logging configuration from environment variables
|
||||
*/
|
||||
export function loadLoggingConfig(): LoggingConfig {
|
||||
const config = {
|
||||
level: (getEnvVar('LOG_LEVEL') || 'info') as 'debug' | 'info' | 'warn' | 'error',
|
||||
console: getBooleanEnvVar('LOG_CONSOLE', true),
|
||||
loki: {
|
||||
host: getEnvVar('LOKI_HOST') || 'localhost',
|
||||
port: getNumericEnvVar('LOKI_PORT', 3100),
|
||||
username: getEnvVar('LOKI_USERNAME'),
|
||||
password: getEnvVar('LOKI_PASSWORD'),
|
||||
retentionDays: getNumericEnvVar('LOKI_RETENTION_DAYS', 30),
|
||||
labels: parseLabels(getEnvVar('LOKI_LABELS')),
|
||||
batchSize: getNumericEnvVar('LOKI_BATCH_SIZE', 100),
|
||||
flushIntervalMs: getNumericEnvVar('LOKI_FLUSH_INTERVAL_MS', 5000)
|
||||
}
|
||||
};
|
||||
|
||||
return validateConfig(config, loggingConfigSchema);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a dynamic configuration loader for logging
|
||||
*/
|
||||
export const createLoggingConfig = createConfigLoader<typeof defaultLoggingConfig>(
|
||||
'logging',
|
||||
loggingConfigSchema,
|
||||
defaultLoggingConfig
|
||||
);
|
||||
|
||||
/**
|
||||
* Singleton logging configuration
|
||||
*/
|
||||
export const loggingConfig = loadLoggingConfig();
|
||||
// Export individual config values for convenience
|
||||
export const {
|
||||
LOG_LEVEL,
|
||||
LOG_FORMAT,
|
||||
LOG_CONSOLE,
|
||||
LOG_FILE,
|
||||
LOG_FILE_PATH,
|
||||
LOG_FILE_MAX_SIZE,
|
||||
LOG_FILE_MAX_FILES,
|
||||
LOG_FILE_DATE_PATTERN,
|
||||
LOG_ERROR_FILE,
|
||||
LOG_ERROR_STACK,
|
||||
LOG_PERFORMANCE,
|
||||
LOG_SQL_QUERIES,
|
||||
LOG_HTTP_REQUESTS,
|
||||
LOG_STRUCTURED,
|
||||
LOG_TIMESTAMP,
|
||||
LOG_CALLER_INFO,
|
||||
LOG_SILENT_MODULES,
|
||||
LOG_VERBOSE_MODULES,
|
||||
LOG_SERVICE_NAME,
|
||||
LOG_SERVICE_VERSION,
|
||||
LOG_ENVIRONMENT,
|
||||
} = loggingConfig;
|
||||
61
libs/config/src/loki.ts
Normal file
61
libs/config/src/loki.ts
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
/**
|
||||
* Loki log aggregation configuration using envalid
|
||||
* Centralized logging configuration for the Stock Bot platform
|
||||
*/
|
||||
import { cleanEnv, str, port, bool, num } from 'envalid';
|
||||
|
||||
/**
|
||||
* Loki configuration with validation and defaults
|
||||
*/
|
||||
export const lokiConfig = cleanEnv(process.env, {
|
||||
// Loki Server
|
||||
LOKI_HOST: str({ default: 'localhost', desc: 'Loki host' }),
|
||||
LOKI_PORT: port({ default: 3100, desc: 'Loki port' }),
|
||||
LOKI_URL: str({ default: '', desc: 'Complete Loki URL (overrides host/port)' }),
|
||||
|
||||
// Authentication
|
||||
LOKI_USERNAME: str({ default: '', desc: 'Loki username (if auth enabled)' }),
|
||||
LOKI_PASSWORD: str({ default: '', desc: 'Loki password (if auth enabled)' }),
|
||||
LOKI_TENANT_ID: str({ default: '', desc: 'Loki tenant ID (for multi-tenancy)' }),
|
||||
|
||||
// Push Configuration
|
||||
LOKI_PUSH_TIMEOUT: num({ default: 10000, desc: 'Push timeout in ms' }),
|
||||
LOKI_BATCH_SIZE: num({ default: 1024, desc: 'Batch size for log entries' }),
|
||||
LOKI_BATCH_WAIT: num({ default: 1000, desc: 'Batch wait time in ms' }),
|
||||
|
||||
// Retention Settings
|
||||
LOKI_RETENTION_PERIOD: str({ default: '30d', desc: 'Log retention period' }),
|
||||
LOKI_MAX_CHUNK_AGE: str({ default: '1h', desc: 'Maximum chunk age' }),
|
||||
|
||||
// TLS Settings
|
||||
LOKI_TLS_ENABLED: bool({ default: false, desc: 'Enable TLS for Loki' }),
|
||||
LOKI_TLS_INSECURE: bool({ default: false, desc: 'Skip TLS verification' }),
|
||||
|
||||
// Log Labels
|
||||
LOKI_DEFAULT_LABELS: str({ default: '', desc: 'Default labels for all log entries (JSON format)' }),
|
||||
LOKI_SERVICE_LABEL: str({ default: 'stock-bot', desc: 'Service label for log entries' }),
|
||||
LOKI_ENVIRONMENT_LABEL: str({ default: 'development', desc: 'Environment label for log entries' }),
|
||||
});
|
||||
|
||||
// Export typed configuration object
|
||||
export type LokiConfig = typeof lokiConfig;
|
||||
|
||||
// Export individual config values for convenience
|
||||
export const {
|
||||
LOKI_HOST,
|
||||
LOKI_PORT,
|
||||
LOKI_URL,
|
||||
LOKI_USERNAME,
|
||||
LOKI_PASSWORD,
|
||||
LOKI_TENANT_ID,
|
||||
LOKI_PUSH_TIMEOUT,
|
||||
LOKI_BATCH_SIZE,
|
||||
LOKI_BATCH_WAIT,
|
||||
LOKI_RETENTION_PERIOD,
|
||||
LOKI_MAX_CHUNK_AGE,
|
||||
LOKI_TLS_ENABLED,
|
||||
LOKI_TLS_INSECURE,
|
||||
LOKI_DEFAULT_LABELS,
|
||||
LOKI_SERVICE_LABEL,
|
||||
LOKI_ENVIRONMENT_LABEL,
|
||||
} = lokiConfig;
|
||||
75
libs/config/src/mongodb.ts
Normal file
75
libs/config/src/mongodb.ts
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
/**
|
||||
* MongoDB configuration using envalid
|
||||
* Document storage for sentiment data, raw documents, and unstructured data
|
||||
*/
|
||||
import { cleanEnv, str, port, bool, num } from 'envalid';
|
||||
|
||||
/**
|
||||
* MongoDB configuration with validation and defaults
|
||||
*/
|
||||
export const mongodbConfig = cleanEnv(process.env, {
|
||||
// MongoDB Connection
|
||||
MONGODB_HOST: str({ default: 'localhost', desc: 'MongoDB host' }),
|
||||
MONGODB_PORT: port({ default: 27017, desc: 'MongoDB port' }),
|
||||
MONGODB_DATABASE: str({ default: 'trading_documents', desc: 'MongoDB database name' }),
|
||||
|
||||
// Authentication
|
||||
MONGODB_USERNAME: str({ default: 'trading_admin', desc: 'MongoDB username' }),
|
||||
MONGODB_PASSWORD: str({ default: '', desc: 'MongoDB password' }),
|
||||
MONGODB_AUTH_SOURCE: str({ default: 'admin', desc: 'MongoDB authentication database' }),
|
||||
|
||||
// Connection URI (alternative to individual settings)
|
||||
MONGODB_URI: str({ default: '', desc: 'Complete MongoDB connection URI (overrides individual settings)' }),
|
||||
|
||||
// Connection Pool Settings
|
||||
MONGODB_MAX_POOL_SIZE: num({ default: 10, desc: 'Maximum connection pool size' }),
|
||||
MONGODB_MIN_POOL_SIZE: num({ default: 0, desc: 'Minimum connection pool size' }),
|
||||
MONGODB_MAX_IDLE_TIME: num({ default: 30000, desc: 'Maximum idle time for connections in ms' }),
|
||||
|
||||
// Timeouts
|
||||
MONGODB_CONNECT_TIMEOUT: num({ default: 10000, desc: 'Connection timeout in ms' }),
|
||||
MONGODB_SOCKET_TIMEOUT: num({ default: 30000, desc: 'Socket timeout in ms' }),
|
||||
MONGODB_SERVER_SELECTION_TIMEOUT: num({ default: 5000, desc: 'Server selection timeout in ms' }),
|
||||
|
||||
// SSL/TLS Settings
|
||||
MONGODB_TLS: bool({ default: false, desc: 'Enable TLS for MongoDB connection' }),
|
||||
MONGODB_TLS_INSECURE: bool({ default: false, desc: 'Allow invalid certificates in TLS mode' }),
|
||||
MONGODB_TLS_CA_FILE: str({ default: '', desc: 'Path to TLS CA certificate file' }),
|
||||
|
||||
// Additional Settings
|
||||
MONGODB_RETRY_WRITES: bool({ default: true, desc: 'Enable retryable writes' }),
|
||||
MONGODB_JOURNAL: bool({ default: true, desc: 'Enable write concern journal' }),
|
||||
MONGODB_READ_PREFERENCE: str({
|
||||
default: 'primary',
|
||||
choices: ['primary', 'primaryPreferred', 'secondary', 'secondaryPreferred', 'nearest'],
|
||||
desc: 'MongoDB read preference'
|
||||
}),
|
||||
MONGODB_WRITE_CONCERN: str({ default: 'majority', desc: 'Write concern level' }),
|
||||
});
|
||||
|
||||
// Export typed configuration object
|
||||
export type MongoDbConfig = typeof mongodbConfig;
|
||||
|
||||
// Export individual config values for convenience
|
||||
export const {
|
||||
MONGODB_HOST,
|
||||
MONGODB_PORT,
|
||||
MONGODB_DATABASE,
|
||||
MONGODB_USERNAME,
|
||||
MONGODB_PASSWORD,
|
||||
MONGODB_AUTH_SOURCE,
|
||||
MONGODB_URI,
|
||||
MONGODB_MAX_POOL_SIZE,
|
||||
MONGODB_MIN_POOL_SIZE,
|
||||
MONGODB_MAX_IDLE_TIME,
|
||||
MONGODB_CONNECT_TIMEOUT,
|
||||
MONGODB_SOCKET_TIMEOUT,
|
||||
MONGODB_SERVER_SELECTION_TIMEOUT,
|
||||
MONGODB_TLS,
|
||||
MONGODB_TLS_INSECURE,
|
||||
MONGODB_TLS_CA_FILE,
|
||||
MONGODB_RETRY_WRITES,
|
||||
MONGODB_JOURNAL,
|
||||
MONGODB_READ_PREFERENCE,
|
||||
MONGODB_WRITE_CONCERN,
|
||||
} = mongodbConfig;
|
||||
90
libs/config/src/monitoring.ts
Normal file
90
libs/config/src/monitoring.ts
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
/**
|
||||
* Monitoring configuration using envalid
|
||||
* Prometheus metrics, Grafana visualization, and Loki logging
|
||||
*/
|
||||
import { cleanEnv, str, port, bool, num } from 'envalid';
|
||||
|
||||
/**
|
||||
* Prometheus configuration with validation and defaults
|
||||
*/
|
||||
export const prometheusConfig = cleanEnv(process.env, {
|
||||
// Prometheus Server
|
||||
PROMETHEUS_HOST: str({ default: 'localhost', desc: 'Prometheus host' }),
|
||||
PROMETHEUS_PORT: port({ default: 9090, desc: 'Prometheus port' }),
|
||||
PROMETHEUS_URL: str({ default: '', desc: 'Complete Prometheus URL (overrides host/port)' }),
|
||||
|
||||
// Authentication
|
||||
PROMETHEUS_USERNAME: str({ default: '', desc: 'Prometheus username (if auth enabled)' }),
|
||||
PROMETHEUS_PASSWORD: str({ default: '', desc: 'Prometheus password (if auth enabled)' }),
|
||||
|
||||
// Metrics Collection
|
||||
PROMETHEUS_SCRAPE_INTERVAL: str({ default: '15s', desc: 'Default scrape interval' }),
|
||||
PROMETHEUS_EVALUATION_INTERVAL: str({ default: '15s', desc: 'Rule evaluation interval' }),
|
||||
PROMETHEUS_RETENTION_TIME: str({ default: '15d', desc: 'Data retention time' }),
|
||||
|
||||
// TLS Settings
|
||||
PROMETHEUS_TLS_ENABLED: bool({ default: false, desc: 'Enable TLS for Prometheus' }),
|
||||
PROMETHEUS_TLS_INSECURE: bool({ default: false, desc: 'Skip TLS verification' }),
|
||||
});
|
||||
|
||||
/**
|
||||
* Grafana configuration with validation and defaults
|
||||
*/
|
||||
export const grafanaConfig = cleanEnv(process.env, {
|
||||
// Grafana Server
|
||||
GRAFANA_HOST: str({ default: 'localhost', desc: 'Grafana host' }),
|
||||
GRAFANA_PORT: port({ default: 3000, desc: 'Grafana port' }),
|
||||
GRAFANA_URL: str({ default: '', desc: 'Complete Grafana URL (overrides host/port)' }),
|
||||
|
||||
// Authentication
|
||||
GRAFANA_ADMIN_USER: str({ default: 'admin', desc: 'Grafana admin username' }),
|
||||
GRAFANA_ADMIN_PASSWORD: str({ default: 'admin', desc: 'Grafana admin password' }),
|
||||
|
||||
// Security Settings
|
||||
GRAFANA_ALLOW_SIGN_UP: bool({ default: false, desc: 'Allow user sign up' }),
|
||||
GRAFANA_SECRET_KEY: str({ default: '', desc: 'Grafana secret key for encryption' }),
|
||||
|
||||
// Database Settings
|
||||
GRAFANA_DATABASE_TYPE: str({
|
||||
default: 'sqlite3',
|
||||
choices: ['mysql', 'postgres', 'sqlite3'],
|
||||
desc: 'Grafana database type'
|
||||
}),
|
||||
GRAFANA_DATABASE_URL: str({ default: '', desc: 'Grafana database URL' }),
|
||||
|
||||
// Feature Flags
|
||||
GRAFANA_DISABLE_GRAVATAR: bool({ default: true, desc: 'Disable Gravatar avatars' }),
|
||||
GRAFANA_ENABLE_GZIP: bool({ default: true, desc: 'Enable gzip compression' }),
|
||||
});
|
||||
|
||||
// Export typed configuration objects
|
||||
export type PrometheusConfig = typeof prometheusConfig;
|
||||
export type GrafanaConfig = typeof grafanaConfig;
|
||||
|
||||
// Export individual config values for convenience
|
||||
export const {
|
||||
PROMETHEUS_HOST,
|
||||
PROMETHEUS_PORT,
|
||||
PROMETHEUS_URL,
|
||||
PROMETHEUS_USERNAME,
|
||||
PROMETHEUS_PASSWORD,
|
||||
PROMETHEUS_SCRAPE_INTERVAL,
|
||||
PROMETHEUS_EVALUATION_INTERVAL,
|
||||
PROMETHEUS_RETENTION_TIME,
|
||||
PROMETHEUS_TLS_ENABLED,
|
||||
PROMETHEUS_TLS_INSECURE,
|
||||
} = prometheusConfig;
|
||||
|
||||
export const {
|
||||
GRAFANA_HOST,
|
||||
GRAFANA_PORT,
|
||||
GRAFANA_URL,
|
||||
GRAFANA_ADMIN_USER,
|
||||
GRAFANA_ADMIN_PASSWORD,
|
||||
GRAFANA_ALLOW_SIGN_UP,
|
||||
GRAFANA_SECRET_KEY,
|
||||
GRAFANA_DATABASE_TYPE,
|
||||
GRAFANA_DATABASE_URL,
|
||||
GRAFANA_DISABLE_GRAVATAR,
|
||||
GRAFANA_ENABLE_GZIP,
|
||||
} = grafanaConfig;
|
||||
53
libs/config/src/questdb.ts
Normal file
53
libs/config/src/questdb.ts
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
/**
|
||||
* QuestDB configuration using envalid
|
||||
* Time-series database for OHLCV data, indicators, and performance metrics
|
||||
*/
|
||||
import { cleanEnv, str, port, bool, num } from 'envalid';
|
||||
|
||||
/**
|
||||
* QuestDB configuration with validation and defaults
|
||||
*/
|
||||
export const questdbConfig = cleanEnv(process.env, {
|
||||
// QuestDB Connection
|
||||
QUESTDB_HOST: str({ default: 'localhost', desc: 'QuestDB host' }),
|
||||
QUESTDB_HTTP_PORT: port({ default: 9000, desc: 'QuestDB HTTP port (web console)' }),
|
||||
QUESTDB_PG_PORT: port({ default: 8812, desc: 'QuestDB PostgreSQL wire protocol port' }),
|
||||
QUESTDB_INFLUX_PORT: port({ default: 9009, desc: 'QuestDB InfluxDB line protocol port' }),
|
||||
|
||||
// Authentication (if enabled)
|
||||
QUESTDB_USER: str({ default: '', desc: 'QuestDB username (if auth enabled)' }),
|
||||
QUESTDB_PASSWORD: str({ default: '', desc: 'QuestDB password (if auth enabled)' }),
|
||||
|
||||
// Connection Settings
|
||||
QUESTDB_CONNECTION_TIMEOUT: num({ default: 5000, desc: 'Connection timeout in ms' }),
|
||||
QUESTDB_REQUEST_TIMEOUT: num({ default: 30000, desc: 'Request timeout in ms' }),
|
||||
QUESTDB_RETRY_ATTEMPTS: num({ default: 3, desc: 'Number of retry attempts' }),
|
||||
|
||||
// TLS Settings
|
||||
QUESTDB_TLS_ENABLED: bool({ default: false, desc: 'Enable TLS for QuestDB connection' }),
|
||||
QUESTDB_TLS_VERIFY_SERVER_CERT: bool({ default: true, desc: 'Verify server certificate' }),
|
||||
|
||||
// Database Settings
|
||||
QUESTDB_DEFAULT_DATABASE: str({ default: 'qdb', desc: 'Default database name' }),
|
||||
QUESTDB_TELEMETRY_ENABLED: bool({ default: false, desc: 'Enable telemetry' }),
|
||||
});
|
||||
|
||||
// Export typed configuration object
|
||||
export type QuestDbConfig = typeof questdbConfig;
|
||||
|
||||
// Export individual config values for convenience
|
||||
export const {
|
||||
QUESTDB_HOST,
|
||||
QUESTDB_HTTP_PORT,
|
||||
QUESTDB_PG_PORT,
|
||||
QUESTDB_INFLUX_PORT,
|
||||
QUESTDB_USER,
|
||||
QUESTDB_PASSWORD,
|
||||
QUESTDB_CONNECTION_TIMEOUT,
|
||||
QUESTDB_REQUEST_TIMEOUT,
|
||||
QUESTDB_RETRY_ATTEMPTS,
|
||||
QUESTDB_TLS_ENABLED,
|
||||
QUESTDB_TLS_VERIFY_SERVER_CERT,
|
||||
QUESTDB_DEFAULT_DATABASE,
|
||||
QUESTDB_TELEMETRY_ENABLED,
|
||||
} = questdbConfig;
|
||||
|
|
@ -1,45 +1,82 @@
|
|||
/**
|
||||
* Risk management configuration for trading operations
|
||||
* Risk management configuration using envalid
|
||||
*/
|
||||
import { getNumericEnvVar, validateConfig, createConfigLoader } from './core';
|
||||
import { riskConfigSchema, RiskConfig } from './types';
|
||||
import { cleanEnv, str, num, bool } from 'envalid';
|
||||
|
||||
/**
|
||||
* Default risk configuration
|
||||
* Risk configuration with validation and defaults
|
||||
*/
|
||||
const defaultRiskConfig: RiskConfig = {
|
||||
maxDrawdown: 0.05,
|
||||
maxPositionSize: 0.1,
|
||||
maxLeverage: 1,
|
||||
stopLossDefault: 0.02,
|
||||
takeProfitDefault: 0.05
|
||||
};
|
||||
export const riskConfig = cleanEnv(process.env, {
|
||||
// Position Sizing
|
||||
RISK_MAX_POSITION_SIZE: num({ default: 0.1, desc: 'Maximum position size as percentage of portfolio' }),
|
||||
RISK_MAX_PORTFOLIO_EXPOSURE: num({ default: 0.8, desc: 'Maximum portfolio exposure percentage' }),
|
||||
RISK_MAX_SINGLE_ASSET_EXPOSURE: num({ default: 0.2, desc: 'Maximum exposure to single asset' }),
|
||||
RISK_MAX_SECTOR_EXPOSURE: num({ default: 0.3, desc: 'Maximum exposure to single sector' }),
|
||||
|
||||
// Stop Loss and Take Profit
|
||||
RISK_DEFAULT_STOP_LOSS: num({ default: 0.05, desc: 'Default stop loss percentage' }),
|
||||
RISK_DEFAULT_TAKE_PROFIT: num({ default: 0.15, desc: 'Default take profit percentage' }),
|
||||
RISK_TRAILING_STOP_ENABLED: bool({ default: true, desc: 'Enable trailing stop losses' }),
|
||||
RISK_TRAILING_STOP_DISTANCE: num({ default: 0.03, desc: 'Trailing stop distance percentage' }),
|
||||
|
||||
// Risk Limits
|
||||
RISK_MAX_DAILY_LOSS: num({ default: 0.05, desc: 'Maximum daily loss percentage' }),
|
||||
RISK_MAX_WEEKLY_LOSS: num({ default: 0.1, desc: 'Maximum weekly loss percentage' }),
|
||||
RISK_MAX_MONTHLY_LOSS: num({ default: 0.2, desc: 'Maximum monthly loss percentage' }),
|
||||
|
||||
// Volatility Controls
|
||||
RISK_MAX_VOLATILITY_THRESHOLD: num({ default: 0.4, desc: 'Maximum volatility threshold' }),
|
||||
RISK_VOLATILITY_LOOKBACK_DAYS: num({ default: 20, desc: 'Volatility calculation lookback period' }),
|
||||
|
||||
// Correlation Controls
|
||||
RISK_MAX_CORRELATION_THRESHOLD: num({ default: 0.7, desc: 'Maximum correlation between positions' }),
|
||||
RISK_CORRELATION_LOOKBACK_DAYS: num({ default: 60, desc: 'Correlation calculation lookback period' }),
|
||||
|
||||
// Leverage Controls
|
||||
RISK_MAX_LEVERAGE: num({ default: 2.0, desc: 'Maximum leverage allowed' }),
|
||||
RISK_MARGIN_CALL_THRESHOLD: num({ default: 0.3, desc: 'Margin call threshold' }),
|
||||
|
||||
// Circuit Breakers
|
||||
RISK_CIRCUIT_BREAKER_ENABLED: bool({ default: true, desc: 'Enable circuit breakers' }),
|
||||
RISK_CIRCUIT_BREAKER_LOSS_THRESHOLD: num({ default: 0.1, desc: 'Circuit breaker loss threshold' }),
|
||||
RISK_CIRCUIT_BREAKER_COOLDOWN_MINUTES: num({ default: 60, desc: 'Circuit breaker cooldown period' }),
|
||||
|
||||
// Risk Model
|
||||
RISK_MODEL_TYPE: str({
|
||||
choices: ['var', 'cvar', 'expected_shortfall'],
|
||||
default: 'var',
|
||||
desc: 'Risk model type'
|
||||
}),
|
||||
RISK_CONFIDENCE_LEVEL: num({ default: 0.95, desc: 'Risk model confidence level' }),
|
||||
RISK_TIME_HORIZON_DAYS: num({ default: 1, desc: 'Risk time horizon in days' }),
|
||||
});
|
||||
|
||||
/**
|
||||
* Load risk configuration from environment variables
|
||||
*/
|
||||
export function loadRiskConfig(): RiskConfig {
|
||||
const config: RiskConfig = {
|
||||
maxDrawdown: getNumericEnvVar('RISK_MAX_DRAWDOWN', defaultRiskConfig.maxDrawdown),
|
||||
maxPositionSize: getNumericEnvVar('RISK_MAX_POSITION_SIZE', defaultRiskConfig.maxPositionSize),
|
||||
maxLeverage: getNumericEnvVar('RISK_MAX_LEVERAGE', defaultRiskConfig.maxLeverage),
|
||||
stopLossDefault: getNumericEnvVar('RISK_STOP_LOSS_DEFAULT', defaultRiskConfig.stopLossDefault),
|
||||
takeProfitDefault: getNumericEnvVar('RISK_TAKE_PROFIT_DEFAULT', defaultRiskConfig.takeProfitDefault)
|
||||
};
|
||||
// Export typed configuration object
|
||||
export type RiskConfig = typeof riskConfig;
|
||||
|
||||
return validateConfig(config, riskConfigSchema);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a dynamic configuration loader for risk management
|
||||
*/
|
||||
export const createRiskConfig = createConfigLoader<typeof defaultRiskConfig>(
|
||||
'risk',
|
||||
riskConfigSchema,
|
||||
defaultRiskConfig
|
||||
);
|
||||
|
||||
/**
|
||||
* Singleton risk configuration
|
||||
*/
|
||||
export const riskConfig = loadRiskConfig();
|
||||
// Export individual config values for convenience
|
||||
export const {
|
||||
RISK_MAX_POSITION_SIZE,
|
||||
RISK_MAX_PORTFOLIO_EXPOSURE,
|
||||
RISK_MAX_SINGLE_ASSET_EXPOSURE,
|
||||
RISK_MAX_SECTOR_EXPOSURE,
|
||||
RISK_DEFAULT_STOP_LOSS,
|
||||
RISK_DEFAULT_TAKE_PROFIT,
|
||||
RISK_TRAILING_STOP_ENABLED,
|
||||
RISK_TRAILING_STOP_DISTANCE,
|
||||
RISK_MAX_DAILY_LOSS,
|
||||
RISK_MAX_WEEKLY_LOSS,
|
||||
RISK_MAX_MONTHLY_LOSS,
|
||||
RISK_MAX_VOLATILITY_THRESHOLD,
|
||||
RISK_VOLATILITY_LOOKBACK_DAYS,
|
||||
RISK_MAX_CORRELATION_THRESHOLD,
|
||||
RISK_CORRELATION_LOOKBACK_DAYS,
|
||||
RISK_MAX_LEVERAGE,
|
||||
RISK_MARGIN_CALL_THRESHOLD,
|
||||
RISK_CIRCUIT_BREAKER_ENABLED,
|
||||
RISK_CIRCUIT_BREAKER_LOSS_THRESHOLD,
|
||||
RISK_CIRCUIT_BREAKER_COOLDOWN_MINUTES,
|
||||
RISK_MODEL_TYPE,
|
||||
RISK_CONFIDENCE_LEVEL,
|
||||
RISK_TIME_HORIZON_DAYS,
|
||||
} = riskConfig;
|
||||
|
|
|
|||
|
|
@ -1,5 +0,0 @@
|
|||
/**
|
||||
* Export all service-specific configurations
|
||||
*/
|
||||
export * from './market-data-gateway';
|
||||
export * from './risk-guardian';
|
||||
|
|
@ -1,106 +0,0 @@
|
|||
/**
|
||||
* Market Data Gateway service configuration
|
||||
*/
|
||||
import { z } from 'zod';
|
||||
import { getEnvVar, getNumericEnvVar, getBooleanEnvVar, createConfigLoader } from '../core';
|
||||
import { Environment, BaseConfig } from '../types';
|
||||
import { getEnvironment } from '../core';
|
||||
|
||||
/**
|
||||
* Market Data Gateway specific configuration schema
|
||||
*/
|
||||
export const marketDataGatewayConfigSchema = z.object({
|
||||
environment: z.nativeEnum(Environment),
|
||||
logLevel: z.enum(['debug', 'info', 'warn', 'error']).default('info'),
|
||||
service: z.object({
|
||||
name: z.string().default('market-data-gateway'),
|
||||
version: z.string().default('1.0.0'),
|
||||
port: z.number().default(4000)
|
||||
}),
|
||||
websocket: z.object({
|
||||
enabled: z.boolean().default(true),
|
||||
path: z.string().default('/ws/market-data'),
|
||||
heartbeatInterval: z.number().default(30000)
|
||||
}),
|
||||
throttling: z.object({
|
||||
maxRequestsPerMinute: z.number().default(300),
|
||||
maxConnectionsPerIP: z.number().default(5)
|
||||
}),
|
||||
caching: z.object({
|
||||
enabled: z.boolean().default(true),
|
||||
ttlSeconds: z.number().default(60)
|
||||
})
|
||||
});
|
||||
|
||||
/**
|
||||
* Market Data Gateway configuration type
|
||||
*/
|
||||
export type MarketDataGatewayConfig = z.infer<typeof marketDataGatewayConfigSchema>;
|
||||
|
||||
/**
|
||||
* Default Market Data Gateway configuration
|
||||
*/
|
||||
const defaultConfig: Partial<MarketDataGatewayConfig> = {
|
||||
environment: getEnvironment(),
|
||||
logLevel: 'info',
|
||||
service: {
|
||||
name: 'market-data-gateway',
|
||||
version: '1.0.0',
|
||||
port: 4000
|
||||
},
|
||||
websocket: {
|
||||
enabled: true,
|
||||
path: '/ws/market-data',
|
||||
heartbeatInterval: 30000 // 30 seconds
|
||||
},
|
||||
throttling: {
|
||||
maxRequestsPerMinute: 300,
|
||||
maxConnectionsPerIP: 5
|
||||
},
|
||||
caching: {
|
||||
enabled: true,
|
||||
ttlSeconds: 60
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Load Market Data Gateway configuration
|
||||
*/
|
||||
export function loadMarketDataGatewayConfig(): MarketDataGatewayConfig {
|
||||
return {
|
||||
environment: getEnvironment(),
|
||||
logLevel: (getEnvVar('LOG_LEVEL') || defaultConfig.logLevel) as 'debug' | 'info' | 'warn' | 'error',
|
||||
service: {
|
||||
name: getEnvVar('SERVICE_NAME') || defaultConfig.service!.name,
|
||||
version: getEnvVar('SERVICE_VERSION') || defaultConfig.service!.version,
|
||||
port: getNumericEnvVar('SERVICE_PORT', defaultConfig.service!.port)
|
||||
},
|
||||
websocket: {
|
||||
enabled: getBooleanEnvVar('WEBSOCKET_ENABLED', defaultConfig.websocket!.enabled),
|
||||
path: getEnvVar('WEBSOCKET_PATH') || defaultConfig.websocket!.path,
|
||||
heartbeatInterval: getNumericEnvVar('WEBSOCKET_HEARTBEAT_INTERVAL', defaultConfig.websocket!.heartbeatInterval)
|
||||
},
|
||||
throttling: {
|
||||
maxRequestsPerMinute: getNumericEnvVar('THROTTLING_MAX_REQUESTS', defaultConfig.throttling!.maxRequestsPerMinute),
|
||||
maxConnectionsPerIP: getNumericEnvVar('THROTTLING_MAX_CONNECTIONS', defaultConfig.throttling!.maxConnectionsPerIP)
|
||||
},
|
||||
caching: {
|
||||
enabled: getBooleanEnvVar('CACHING_ENABLED', defaultConfig.caching!.enabled),
|
||||
ttlSeconds: getNumericEnvVar('CACHING_TTL_SECONDS', defaultConfig.caching!.ttlSeconds)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a dynamic configuration loader for the Market Data Gateway
|
||||
*/
|
||||
export const createMarketDataGatewayConfig = createConfigLoader<MarketDataGatewayConfig>(
|
||||
'market-data-gateway',
|
||||
marketDataGatewayConfigSchema,
|
||||
defaultConfig
|
||||
);
|
||||
|
||||
/**
|
||||
* Singleton Market Data Gateway configuration
|
||||
*/
|
||||
export const marketDataGatewayConfig = loadMarketDataGatewayConfig();
|
||||
|
|
@ -1,112 +0,0 @@
|
|||
/**
|
||||
* Risk Guardian service configuration
|
||||
*/
|
||||
import { z } from 'zod';
|
||||
import { getEnvVar, getNumericEnvVar, getBooleanEnvVar, createConfigLoader } from '../core';
|
||||
import { Environment, BaseConfig } from '../types';
|
||||
import { getEnvironment } from '../core';
|
||||
|
||||
/**
|
||||
* Risk Guardian specific configuration schema
|
||||
*/
|
||||
export const riskGuardianConfigSchema = z.object({
|
||||
environment: z.nativeEnum(Environment),
|
||||
logLevel: z.enum(['debug', 'info', 'warn', 'error']).default('info'),
|
||||
service: z.object({
|
||||
name: z.string().default('risk-guardian'),
|
||||
version: z.string().default('1.0.0'),
|
||||
port: z.number().default(4001)
|
||||
}),
|
||||
riskChecks: z.object({
|
||||
preTradeValidation: z.boolean().default(true),
|
||||
portfolioValidation: z.boolean().default(true),
|
||||
leverageValidation: z.boolean().default(true),
|
||||
concentrationValidation: z.boolean().default(true)
|
||||
}),
|
||||
alerting: z.object({
|
||||
enabled: z.boolean().default(true),
|
||||
criticalThreshold: z.number().default(0.8),
|
||||
warningThreshold: z.number().default(0.6)
|
||||
}),
|
||||
watchdog: z.object({
|
||||
enabled: z.boolean().default(true),
|
||||
checkIntervalSeconds: z.number().default(60)
|
||||
})
|
||||
});
|
||||
|
||||
/**
|
||||
* Risk Guardian configuration type
|
||||
*/
|
||||
export type RiskGuardianConfig = z.infer<typeof riskGuardianConfigSchema>;
|
||||
|
||||
/**
|
||||
* Default Risk Guardian configuration
|
||||
*/
|
||||
const defaultConfig: Partial<RiskGuardianConfig> = {
|
||||
environment: getEnvironment(),
|
||||
logLevel: 'info',
|
||||
service: {
|
||||
name: 'risk-guardian',
|
||||
version: '1.0.0',
|
||||
port: 4001
|
||||
},
|
||||
riskChecks: {
|
||||
preTradeValidation: true,
|
||||
portfolioValidation: true,
|
||||
leverageValidation: true,
|
||||
concentrationValidation: true
|
||||
},
|
||||
alerting: {
|
||||
enabled: true,
|
||||
criticalThreshold: 0.8,
|
||||
warningThreshold: 0.6
|
||||
},
|
||||
watchdog: {
|
||||
enabled: true,
|
||||
checkIntervalSeconds: 60
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Load Risk Guardian configuration
|
||||
*/
|
||||
export function loadRiskGuardianConfig(): RiskGuardianConfig {
|
||||
return {
|
||||
environment: getEnvironment(),
|
||||
logLevel: (getEnvVar('LOG_LEVEL') || defaultConfig.logLevel) as 'debug' | 'info' | 'warn' | 'error',
|
||||
service: {
|
||||
name: getEnvVar('SERVICE_NAME') || defaultConfig.service!.name,
|
||||
version: getEnvVar('SERVICE_VERSION') || defaultConfig.service!.version,
|
||||
port: getNumericEnvVar('SERVICE_PORT', defaultConfig.service!.port)
|
||||
},
|
||||
riskChecks: {
|
||||
preTradeValidation: getBooleanEnvVar('RISK_CHECKS_PRE_TRADE', defaultConfig.riskChecks!.preTradeValidation),
|
||||
portfolioValidation: getBooleanEnvVar('RISK_CHECKS_PORTFOLIO', defaultConfig.riskChecks!.portfolioValidation),
|
||||
leverageValidation: getBooleanEnvVar('RISK_CHECKS_LEVERAGE', defaultConfig.riskChecks!.leverageValidation),
|
||||
concentrationValidation: getBooleanEnvVar('RISK_CHECKS_CONCENTRATION', defaultConfig.riskChecks!.concentrationValidation)
|
||||
},
|
||||
alerting: {
|
||||
enabled: getBooleanEnvVar('ALERTING_ENABLED', defaultConfig.alerting!.enabled),
|
||||
criticalThreshold: getNumericEnvVar('ALERTING_CRITICAL_THRESHOLD', defaultConfig.alerting!.criticalThreshold),
|
||||
warningThreshold: getNumericEnvVar('ALERTING_WARNING_THRESHOLD', defaultConfig.alerting!.warningThreshold)
|
||||
},
|
||||
watchdog: {
|
||||
enabled: getBooleanEnvVar('WATCHDOG_ENABLED', defaultConfig.watchdog!.enabled),
|
||||
checkIntervalSeconds: getNumericEnvVar('WATCHDOG_CHECK_INTERVAL', defaultConfig.watchdog!.checkIntervalSeconds)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a dynamic configuration loader for the Risk Guardian
|
||||
*/
|
||||
export const createRiskGuardianConfig = createConfigLoader<RiskGuardianConfig>(
|
||||
'risk-guardian',
|
||||
riskGuardianConfigSchema,
|
||||
defaultConfig
|
||||
);
|
||||
|
||||
/**
|
||||
* Singleton Risk Guardian configuration
|
||||
*/
|
||||
export const riskGuardianConfig = loadRiskGuardianConfig();
|
||||
|
|
@ -1,104 +0,0 @@
|
|||
/**
|
||||
* Configuration type definitions for the Stock Bot platform
|
||||
*/
|
||||
import { z } from 'zod';
|
||||
|
||||
/**
|
||||
* Environment enum for different deployment environments
|
||||
*/
|
||||
export enum Environment {
|
||||
Development = 'development',
|
||||
Testing = 'testing',
|
||||
Staging = 'staging',
|
||||
Production = 'production'
|
||||
}
|
||||
|
||||
/**
|
||||
* Common configuration interface for all service configs
|
||||
*/
|
||||
export interface BaseConfig {
|
||||
environment: Environment;
|
||||
logLevel: 'debug' | 'info' | 'warn' | 'error';
|
||||
service: {
|
||||
name: string;
|
||||
version: string;
|
||||
port: number;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Database configuration schema
|
||||
*/
|
||||
export const databaseConfigSchema = z.object({
|
||||
dragonfly: z.object({
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(6379),
|
||||
password: z.string().optional(),
|
||||
maxRetriesPerRequest: z.number().default(3)
|
||||
}),
|
||||
questDB: z.object({
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(8812),
|
||||
database: z.string().default('stockbot'),
|
||||
user: z.string().default('admin'),
|
||||
password: z.string().optional(),
|
||||
httpPort: z.number().default(9000)
|
||||
}),
|
||||
mongodb: z.object({
|
||||
uri: z.string().default('mongodb://localhost:27017'),
|
||||
database: z.string().default('stockbot'),
|
||||
username: z.string().optional(),
|
||||
password: z.string().optional(),
|
||||
options: z.record(z.string(), z.any()).optional()
|
||||
}),
|
||||
postgres: z.object({
|
||||
host: z.string().default('localhost'),
|
||||
port: z.number().default(5432),
|
||||
database: z.string().default('stockbot'),
|
||||
user: z.string().default('postgres'),
|
||||
password: z.string().optional(),
|
||||
ssl: z.boolean().default(false),
|
||||
poolSize: z.number().default(10)
|
||||
})
|
||||
});
|
||||
|
||||
/**
|
||||
* Data provider configuration schema
|
||||
*/
|
||||
export const dataProviderSchema = z.object({
|
||||
name: z.string(),
|
||||
type: z.enum(['rest', 'websocket', 'file']),
|
||||
baseUrl: z.string().url().optional(),
|
||||
wsUrl: z.string().url().optional(),
|
||||
apiKey: z.string().optional(),
|
||||
apiSecret: z.string().optional(),
|
||||
refreshInterval: z.number().optional(),
|
||||
rateLimits: z.object({
|
||||
maxRequestsPerMinute: z.number().optional(),
|
||||
maxRequestsPerSecond: z.number().optional()
|
||||
}).optional()
|
||||
});
|
||||
|
||||
export const dataProvidersConfigSchema = z.object({
|
||||
providers: z.array(dataProviderSchema),
|
||||
defaultProvider: z.string()
|
||||
});
|
||||
|
||||
/**
|
||||
* Risk management configuration schema
|
||||
*/
|
||||
export const riskConfigSchema = z.object({
|
||||
maxDrawdown: z.number().default(0.05),
|
||||
maxPositionSize: z.number().default(0.1),
|
||||
maxLeverage: z.number().default(1),
|
||||
stopLossDefault: z.number().default(0.02),
|
||||
takeProfitDefault: z.number().default(0.05)
|
||||
});
|
||||
|
||||
/**
|
||||
* Type definitions based on schemas
|
||||
*/
|
||||
export type DatabaseConfig = z.infer<typeof databaseConfigSchema>;
|
||||
export type DataProviderConfig = z.infer<typeof dataProviderSchema>;
|
||||
export type DataProvidersConfig = z.infer<typeof dataProvidersConfigSchema>;
|
||||
export type RiskConfig = z.infer<typeof riskConfigSchema>;
|
||||
1
libs/config/tsconfig.tsbuildinfo
Normal file
1
libs/config/tsconfig.tsbuildinfo
Normal file
File diff suppressed because one or more lines are too long
118
libs/config/validate-config.js
Normal file
118
libs/config/validate-config.js
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Configuration Validation Script
|
||||
* Tests that all configuration modules can be loaded and validated
|
||||
*/
|
||||
|
||||
// Set test environment variables
|
||||
process.env.NODE_ENV = 'test';
|
||||
process.env.PORT = '3001';
|
||||
|
||||
// Database configs
|
||||
process.env.DB_HOST = 'localhost';
|
||||
process.env.DB_PORT = '5432';
|
||||
process.env.DB_NAME = 'test_db';
|
||||
process.env.DB_USER = 'test_user';
|
||||
process.env.DB_PASSWORD = 'test_pass';
|
||||
|
||||
// QuestDB configs
|
||||
process.env.QUESTDB_HOST = 'localhost';
|
||||
process.env.QUESTDB_HTTP_PORT = '9000';
|
||||
process.env.QUESTDB_PG_PORT = '8812';
|
||||
|
||||
// MongoDB configs
|
||||
process.env.MONGODB_HOST = 'localhost';
|
||||
process.env.MONGODB_PORT = '27017';
|
||||
process.env.MONGODB_DATABASE = 'test_db';
|
||||
|
||||
// Dragonfly configs
|
||||
process.env.DRAGONFLY_HOST = 'localhost';
|
||||
process.env.DRAGONFLY_PORT = '6379';
|
||||
|
||||
// Monitoring configs
|
||||
process.env.PROMETHEUS_HOST = 'localhost';
|
||||
process.env.PROMETHEUS_PORT = '9090';
|
||||
process.env.GRAFANA_HOST = 'localhost';
|
||||
process.env.GRAFANA_PORT = '3000';
|
||||
|
||||
// Loki configs
|
||||
process.env.LOKI_HOST = 'localhost';
|
||||
process.env.LOKI_PORT = '3100';
|
||||
|
||||
// Logging configs
|
||||
process.env.LOG_LEVEL = 'info';
|
||||
process.env.LOG_FORMAT = 'json';
|
||||
|
||||
try {
|
||||
console.log('🔍 Validating configuration modules...\n');
|
||||
|
||||
// Test each configuration module
|
||||
const modules = [
|
||||
{ name: 'Database', path: './dist/database.js' },
|
||||
{ name: 'QuestDB', path: './dist/questdb.js' },
|
||||
{ name: 'MongoDB', path: './dist/mongodb.js' },
|
||||
{ name: 'Dragonfly', path: './dist/dragonfly.js' },
|
||||
{ name: 'Monitoring', path: './dist/monitoring.js' },
|
||||
{ name: 'Loki', path: './dist/loki.js' },
|
||||
{ name: 'Logging', path: './dist/logging.js' },
|
||||
];
|
||||
|
||||
const results = [];
|
||||
|
||||
for (const module of modules) {
|
||||
try {
|
||||
const config = require(module.path);
|
||||
const configKeys = Object.keys(config);
|
||||
|
||||
if (configKeys.length === 0) {
|
||||
throw new Error('No exported configuration found');
|
||||
}
|
||||
|
||||
// Try to access the main config object
|
||||
const mainConfig = config[configKeys[0]];
|
||||
if (!mainConfig || typeof mainConfig !== 'object') {
|
||||
throw new Error('Invalid configuration object');
|
||||
}
|
||||
|
||||
console.log(`✅ ${module.name}: ${configKeys.length} config(s) loaded`);
|
||||
results.push({ name: module.name, status: 'success', configs: configKeys });
|
||||
|
||||
} catch (error) {
|
||||
console.log(`❌ ${module.name}: ${error.message}`);
|
||||
results.push({ name: module.name, status: 'error', error: error.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Test main index exports
|
||||
try {
|
||||
const indexExports = require('./dist/index.js');
|
||||
const exportCount = Object.keys(indexExports).length;
|
||||
console.log(`\n✅ Index exports: ${exportCount} modules exported`);
|
||||
results.push({ name: 'Index', status: 'success', exports: exportCount });
|
||||
} catch (error) {
|
||||
console.log(`\n❌ Index exports: ${error.message}`);
|
||||
results.push({ name: 'Index', status: 'error', error: error.message });
|
||||
}
|
||||
|
||||
// Summary
|
||||
const successful = results.filter(r => r.status === 'success').length;
|
||||
const total = results.length;
|
||||
|
||||
console.log(`\n📊 Validation Summary:`);
|
||||
console.log(` Total modules: ${total}`);
|
||||
console.log(` Successful: ${successful}`);
|
||||
console.log(` Failed: ${total - successful}`);
|
||||
|
||||
if (successful === total) {
|
||||
console.log('\n🎉 All configuration modules validated successfully!');
|
||||
process.exit(0);
|
||||
} else {
|
||||
console.log('\n⚠️ Some configuration modules failed validation.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error('❌ Validation script failed:', error.message);
|
||||
process.exit(1);
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue