added working config lib

This commit is contained in:
Bojan Kucera 2025-06-03 14:09:31 -04:00
parent f8576c0d93
commit def9bce8dc
33 changed files with 2896 additions and 1485 deletions

242
.env.complete Normal file
View file

@ -0,0 +1,242 @@
# =======================================================================
# Stock Bot Platform Environment Configuration
# =======================================================================
# Core Application Settings
NODE_ENV=development
PORT=3001
APP_NAME=stock-bot
APP_VERSION=1.0.0
# =======================================================================
# DATABASE CONFIGURATIONS
# =======================================================================
# PostgreSQL - Operational Data (orders, positions, strategies)
POSTGRES_HOST=localhost
POSTGRES_PORT=5432
POSTGRES_DATABASE=trading_bot
POSTGRES_USERNAME=trading_user
POSTGRES_PASSWORD=trading_pass_dev
DB_HOST=localhost
DB_PORT=5432
DB_NAME=trading_bot
DB_USER=trading_user
DB_PASSWORD=trading_pass_dev
DB_POOL_MIN=2
DB_POOL_MAX=10
DB_POOL_IDLE_TIMEOUT=30000
DB_SSL=false
DB_SSL_REJECT_UNAUTHORIZED=true
DB_QUERY_TIMEOUT=30000
DB_CONNECTION_TIMEOUT=5000
# QuestDB - Time-series Data (OHLCV, indicators, performance)
QUESTDB_HOST=localhost
QUESTDB_HTTP_PORT=9000
QUESTDB_PG_PORT=8812
QUESTDB_INFLUX_PORT=9009
QUESTDB_USER=
QUESTDB_PASSWORD=
QUESTDB_CONNECTION_TIMEOUT=5000
QUESTDB_REQUEST_TIMEOUT=30000
QUESTDB_RETRY_ATTEMPTS=3
QUESTDB_TLS_ENABLED=false
QUESTDB_DEFAULT_DATABASE=qdb
QUESTDB_TELEMETRY_ENABLED=false
# MongoDB - Document Storage (sentiment, raw docs, unstructured data)
MONGODB_HOST=localhost
MONGODB_PORT=27017
MONGODB_DATABASE=trading_documents
MONGODB_USERNAME=trading_admin
MONGODB_PASSWORD=trading_mongo_dev
MONGODB_AUTH_SOURCE=admin
MONGODB_URI=
MONGODB_MAX_POOL_SIZE=10
MONGODB_MIN_POOL_SIZE=0
MONGODB_MAX_IDLE_TIME=30000
MONGODB_CONNECT_TIMEOUT=10000
MONGODB_SOCKET_TIMEOUT=30000
MONGODB_SERVER_SELECTION_TIMEOUT=5000
MONGODB_TLS=false
MONGODB_RETRY_WRITES=true
MONGODB_JOURNAL=true
MONGODB_READ_PREFERENCE=primary
MONGODB_WRITE_CONCERN=majority
# Dragonfly - Redis Replacement (caching and events)
DRAGONFLY_HOST=localhost
DRAGONFLY_PORT=6379
DRAGONFLY_PASSWORD=
DRAGONFLY_USERNAME=
DRAGONFLY_DATABASE=0
DRAGONFLY_MAX_RETRIES=3
DRAGONFLY_RETRY_DELAY=50
DRAGONFLY_CONNECT_TIMEOUT=10000
DRAGONFLY_COMMAND_TIMEOUT=5000
DRAGONFLY_POOL_SIZE=10
DRAGONFLY_POOL_MIN=1
DRAGONFLY_POOL_MAX=20
DRAGONFLY_TLS=false
DRAGONFLY_ENABLE_KEEPALIVE=true
DRAGONFLY_KEEPALIVE_INTERVAL=60
DRAGONFLY_CLUSTER_MODE=false
DRAGONFLY_CLUSTER_NODES=
DRAGONFLY_MAX_MEMORY=2gb
DRAGONFLY_CACHE_MODE=true
# =======================================================================
# MONITORING & LOGGING CONFIGURATIONS
# =======================================================================
# Logging Configuration
LOG_LEVEL=debug
LOG_FORMAT=json
LOG_CONSOLE=true
LOG_FILE=false
LOG_FILE_PATH=logs
LOG_FILE_MAX_SIZE=20m
LOG_FILE_MAX_FILES=14
LOG_FILE_DATE_PATTERN=YYYY-MM-DD
LOG_ERROR_FILE=true
LOG_ERROR_STACK=true
LOG_PERFORMANCE=false
LOG_SQL_QUERIES=false
LOG_HTTP_REQUESTS=true
LOG_STRUCTURED=true
LOG_TIMESTAMP=true
LOG_CALLER_INFO=false
LOG_SILENT_MODULES=
LOG_VERBOSE_MODULES=
LOG_SERVICE_NAME=stock-bot
LOG_SERVICE_VERSION=1.0.0
LOG_ENVIRONMENT=development
# Loki - Log Aggregation
LOKI_HOST=localhost
LOKI_PORT=3100
LOKI_URL=
LOKI_USERNAME=
LOKI_PASSWORD=
LOKI_TENANT_ID=
LOKI_PUSH_TIMEOUT=10000
LOKI_BATCH_SIZE=1024
LOKI_BATCH_WAIT=1000
LOKI_RETENTION_PERIOD=30d
LOKI_MAX_CHUNK_AGE=1h
LOKI_TLS_ENABLED=false
LOKI_TLS_INSECURE=false
LOKI_DEFAULT_LABELS=
LOKI_SERVICE_LABEL=stock-bot
LOKI_ENVIRONMENT_LABEL=development
# Prometheus - Metrics Collection
PROMETHEUS_HOST=localhost
PROMETHEUS_PORT=9090
PROMETHEUS_URL=
PROMETHEUS_USERNAME=
PROMETHEUS_PASSWORD=
PROMETHEUS_SCRAPE_INTERVAL=15s
PROMETHEUS_EVALUATION_INTERVAL=15s
PROMETHEUS_RETENTION_TIME=15d
PROMETHEUS_TLS_ENABLED=false
PROMETHEUS_TLS_INSECURE=false
# Grafana - Visualization
GRAFANA_HOST=localhost
GRAFANA_PORT=3000
GRAFANA_URL=
GRAFANA_ADMIN_USER=admin
GRAFANA_ADMIN_PASSWORD=admin
GRAFANA_ALLOW_SIGN_UP=false
GRAFANA_SECRET_KEY=
GRAFANA_DATABASE_TYPE=sqlite3
GRAFANA_DATABASE_URL=
GRAFANA_DISABLE_GRAVATAR=true
GRAFANA_ENABLE_GZIP=true
# =======================================================================
# DATA PROVIDER CONFIGURATIONS
# =======================================================================
# Default Data Provider
DEFAULT_DATA_PROVIDER=alpaca
# Alpaca Markets
ALPACA_ENABLED=true
ALPACA_API_KEY=your_alpaca_key_here
ALPACA_SECRET_KEY=your_alpaca_secret_here
ALPACA_BASE_URL=https://paper-api.alpaca.markets
ALPACA_DATA_URL=https://data.alpaca.markets
ALPACA_PAPER_TRADING=true
# Polygon.io
POLYGON_ENABLED=false
POLYGON_API_KEY=your_polygon_key_here
POLYGON_BASE_URL=https://api.polygon.io
# Yahoo Finance
YAHOO_ENABLED=true
YAHOO_BASE_URL=https://query1.finance.yahoo.com
# IEX Cloud
IEX_ENABLED=false
IEX_API_KEY=your_iex_key_here
IEX_BASE_URL=https://cloud.iexapis.com
# Alpha Vantage
ALPHA_VANTAGE_ENABLED=false
ALPHA_VANTAGE_API_KEY=demo
# Data Provider Settings
DATA_PROVIDER_TIMEOUT=30000
DATA_PROVIDER_RETRIES=3
DATA_PROVIDER_RETRY_DELAY=1000
DATA_CACHE_ENABLED=true
DATA_CACHE_TTL=300
DATA_CACHE_MAX_SIZE=1000
# =======================================================================
# TRADING & RISK MANAGEMENT
# =======================================================================
# Trading Configuration
PAPER_TRADING=true
MAX_POSITION_SIZE=0.1
MAX_DAILY_LOSS=1000
# Risk Management
RISK_MAX_POSITION_SIZE=0.25
RISK_MAX_LEVERAGE=2.0
RISK_DEFAULT_STOP_LOSS=0.02
RISK_DEFAULT_TAKE_PROFIT=0.06
RISK_MAX_DRAWDOWN=0.10
RISK_MAX_CONSECUTIVE_LOSSES=5
RISK_POSITION_SIZING_METHOD=fixed_percentage
RISK_CIRCUIT_BREAKER_ENABLED=true
RISK_CIRCUIT_BREAKER_THRESHOLD=0.05
RISK_CIRCUIT_BREAKER_COOLDOWN=3600000
RISK_ALLOW_WEEKEND_TRADING=false
RISK_MARKET_HOURS_ONLY=true
# =======================================================================
# FEATURE FLAGS
# =======================================================================
ENABLE_ML_SIGNALS=false
ENABLE_SENTIMENT_ANALYSIS=false
ENABLE_SOCIAL_SIGNALS=false
ENABLE_OPTIONS_TRADING=false
ENABLE_CRYPTO_TRADING=false
ENABLE_BACKTESTING=true
ENABLE_PAPER_TRADING=true
ENABLE_LIVE_TRADING=false
# =======================================================================
# DEVELOPMENT & DEBUGGING
# =======================================================================
DEBUG_MODE=true
VERBOSE_LOGGING=true
MOCK_DATA_PROVIDERS=false
ENABLE_API_RATE_LIMITING=true
ENABLE_REQUEST_LOGGING=true

144
.env.docker Normal file
View file

@ -0,0 +1,144 @@
# Docker Environment Configuration
# This file contains environment variables used by Docker Compose
# =============================================================================
# CONTAINER NETWORK SETTINGS
# =============================================================================
COMPOSE_PROJECT_NAME=stock-bot
NETWORK_NAME=trading-bot-network
# =============================================================================
# DATABASE CONTAINER SETTINGS
# =============================================================================
# PostgreSQL Container
POSTGRES_DB=trading_bot
POSTGRES_USER=trading_user
POSTGRES_PASSWORD=trading_pass_secure
POSTGRES_INITDB_ARGS=--encoding=UTF-8
# MongoDB Container
MONGO_INITDB_ROOT_USERNAME=trading_admin
MONGO_INITDB_ROOT_PASSWORD=trading_mongo_secure
MONGO_INITDB_DATABASE=trading_documents
# QuestDB Container
QDB_TELEMETRY_ENABLED=false
# Dragonfly Container
DRAGONFLY_MAXMEMORY=4gb
DRAGONFLY_PROACTOR_THREADS=8
# =============================================================================
# MONITORING CONTAINER SETTINGS
# =============================================================================
# Grafana Container
GF_SECURITY_ADMIN_USER=admin
GF_SECURITY_ADMIN_PASSWORD=secure_grafana_password
GF_USERS_ALLOW_SIGN_UP=false
GF_PATHS_PROVISIONING=/etc/grafana/provisioning
GF_DISABLE_GRAVATAR=true
# Prometheus Container
PROMETHEUS_CONFIG_FILE=/etc/prometheus/prometheus.yml
PROMETHEUS_STORAGE_PATH=/prometheus
PROMETHEUS_WEB_ENABLE_LIFECYCLE=true
# =============================================================================
# ADMIN INTERFACE CONTAINER SETTINGS
# =============================================================================
# PgAdmin Container
PGADMIN_DEFAULT_EMAIL=admin@tradingbot.local
PGADMIN_DEFAULT_PASSWORD=secure_pgadmin_password
PGADMIN_CONFIG_SERVER_MODE=False
PGADMIN_DISABLE_POSTFIX=true
# Mongo Express Container
ME_CONFIG_MONGODB_ADMINUSERNAME=trading_admin
ME_CONFIG_MONGODB_ADMINPASSWORD=trading_mongo_secure
ME_CONFIG_MONGODB_SERVER=mongodb
ME_CONFIG_MONGODB_PORT=27017
ME_CONFIG_BASICAUTH_USERNAME=admin
ME_CONFIG_BASICAUTH_PASSWORD=secure_mongo_express_password
# Redis Insight Container
REDIS_HOSTS=local:dragonfly:6379
# =============================================================================
# VOLUME MOUNT PATHS
# =============================================================================
# Data Volume Paths (adjust these for your host system)
POSTGRES_DATA_PATH=./data/postgres
QUESTDB_DATA_PATH=./data/questdb
MONGODB_DATA_PATH=./data/mongodb
DRAGONFLY_DATA_PATH=./data/dragonfly
PROMETHEUS_DATA_PATH=./data/prometheus
GRAFANA_DATA_PATH=./data/grafana
LOKI_DATA_PATH=./data/loki
PGADMIN_DATA_PATH=./data/pgadmin
# Config Volume Paths
PROMETHEUS_CONFIG_PATH=./monitoring/prometheus
GRAFANA_CONFIG_PATH=./monitoring/grafana
LOKI_CONFIG_PATH=./monitoring/loki
# Database Init Paths
POSTGRES_INIT_PATH=./database/postgres/init
MONGODB_INIT_PATH=./database/mongodb/init
# =============================================================================
# PORT MAPPINGS (HOST:CONTAINER)
# =============================================================================
# Database Ports
POSTGRES_PORT=5432
QUESTDB_HTTP_PORT=9000
QUESTDB_PG_PORT=8812
QUESTDB_INFLUX_PORT=9009
MONGODB_PORT=27017
DRAGONFLY_PORT=6379
# Monitoring Ports
PROMETHEUS_PORT=9090
GRAFANA_PORT=3000
LOKI_PORT=3100
# Admin Interface Ports
PGADMIN_PORT=8080
MONGO_EXPRESS_PORT=8081
REDIS_INSIGHT_PORT=8001
# =============================================================================
# HEALTH CHECK SETTINGS
# =============================================================================
# Health Check Intervals
HEALTHCHECK_INTERVAL=30s
HEALTHCHECK_TIMEOUT=10s
HEALTHCHECK_RETRIES=3
HEALTHCHECK_START_PERIOD=60s
# =============================================================================
# RESOURCE LIMITS
# =============================================================================
# Memory Limits (uncomment and adjust for production)
# POSTGRES_MEMORY_LIMIT=2g
# QUESTDB_MEMORY_LIMIT=4g
# MONGODB_MEMORY_LIMIT=2g
# DRAGONFLY_MEMORY_LIMIT=4g
# PROMETHEUS_MEMORY_LIMIT=2g
# GRAFANA_MEMORY_LIMIT=512m
# LOKI_MEMORY_LIMIT=1g
# CPU Limits (uncomment and adjust for production)
# POSTGRES_CPU_LIMIT=1
# QUESTDB_CPU_LIMIT=2
# MONGODB_CPU_LIMIT=1
# DRAGONFLY_CPU_LIMIT=2
# PROMETHEUS_CPU_LIMIT=1
# GRAFANA_CPU_LIMIT=0.5
# LOKI_CPU_LIMIT=1

233
.env.prod Normal file
View file

@ -0,0 +1,233 @@
# =======================================================================
# Stock Bot Platform Production Environment Configuration
# =======================================================================
# Core Application Settings
NODE_ENV=production
PORT=3001
APP_NAME=stock-bot
APP_VERSION=1.0.0
# =======================================================================
# DATABASE CONFIGURATIONS
# =======================================================================
# PostgreSQL - Operational Data (orders, positions, strategies)
DB_HOST=${DB_HOST}
DB_PORT=${DB_PORT:-5432}
DB_NAME=${DB_NAME}
DB_USER=${DB_USER}
DB_PASSWORD=${DB_PASSWORD}
DB_POOL_MIN=5
DB_POOL_MAX=20
DB_POOL_IDLE_TIMEOUT=60000
DB_SSL=true
DB_SSL_REJECT_UNAUTHORIZED=true
DB_QUERY_TIMEOUT=30000
DB_CONNECTION_TIMEOUT=10000
# QuestDB - Time-series Data (OHLCV, indicators, performance)
QUESTDB_HOST=${QUESTDB_HOST}
QUESTDB_HTTP_PORT=${QUESTDB_HTTP_PORT:-9000}
QUESTDB_PG_PORT=${QUESTDB_PG_PORT:-8812}
QUESTDB_INFLUX_PORT=${QUESTDB_INFLUX_PORT:-9009}
QUESTDB_USER=${QUESTDB_USER}
QUESTDB_PASSWORD=${QUESTDB_PASSWORD}
QUESTDB_CONNECTION_TIMEOUT=10000
QUESTDB_REQUEST_TIMEOUT=60000
QUESTDB_RETRY_ATTEMPTS=5
QUESTDB_TLS_ENABLED=true
QUESTDB_DEFAULT_DATABASE=qdb
QUESTDB_TELEMETRY_ENABLED=false
# MongoDB - Document Storage (sentiment, raw docs, unstructured data)
MONGODB_HOST=${MONGODB_HOST}
MONGODB_PORT=${MONGODB_PORT:-27017}
MONGODB_DATABASE=${MONGODB_DATABASE}
MONGODB_USERNAME=${MONGODB_USERNAME}
MONGODB_PASSWORD=${MONGODB_PASSWORD}
MONGODB_AUTH_SOURCE=admin
MONGODB_URI=${MONGODB_URI}
MONGODB_MAX_POOL_SIZE=50
MONGODB_MIN_POOL_SIZE=5
MONGODB_MAX_IDLE_TIME=60000
MONGODB_CONNECT_TIMEOUT=30000
MONGODB_SOCKET_TIMEOUT=60000
MONGODB_SERVER_SELECTION_TIMEOUT=10000
MONGODB_TLS=true
MONGODB_RETRY_WRITES=true
MONGODB_JOURNAL=true
MONGODB_READ_PREFERENCE=primaryPreferred
MONGODB_WRITE_CONCERN=majority
# Dragonfly - Redis Replacement (caching and events)
DRAGONFLY_HOST=${DRAGONFLY_HOST}
DRAGONFLY_PORT=${DRAGONFLY_PORT:-6379}
DRAGONFLY_PASSWORD=${DRAGONFLY_PASSWORD}
DRAGONFLY_USERNAME=${DRAGONFLY_USERNAME}
DRAGONFLY_DATABASE=0
DRAGONFLY_MAX_RETRIES=5
DRAGONFLY_RETRY_DELAY=100
DRAGONFLY_CONNECT_TIMEOUT=30000
DRAGONFLY_COMMAND_TIMEOUT=10000
DRAGONFLY_POOL_SIZE=50
DRAGONFLY_POOL_MIN=5
DRAGONFLY_POOL_MAX=100
DRAGONFLY_TLS=true
DRAGONFLY_ENABLE_KEEPALIVE=true
DRAGONFLY_KEEPALIVE_INTERVAL=30
DRAGONFLY_CLUSTER_MODE=false
DRAGONFLY_CLUSTER_NODES=
DRAGONFLY_MAX_MEMORY=8gb
DRAGONFLY_CACHE_MODE=true
# =======================================================================
# MONITORING & LOGGING CONFIGURATIONS
# =======================================================================
# Logging Configuration (Production - Less verbose)
LOG_LEVEL=info
LOG_FORMAT=json
LOG_CONSOLE=false
LOG_FILE=true
LOG_FILE_PATH=/var/log/stock-bot
LOG_FILE_MAX_SIZE=100m
LOG_FILE_MAX_FILES=30
LOG_FILE_DATE_PATTERN=YYYY-MM-DD
LOG_ERROR_FILE=true
LOG_ERROR_STACK=false
LOG_PERFORMANCE=true
LOG_SQL_QUERIES=false
LOG_HTTP_REQUESTS=false
LOG_STRUCTURED=true
LOG_TIMESTAMP=true
LOG_CALLER_INFO=false
LOG_SILENT_MODULES=
LOG_VERBOSE_MODULES=
LOG_SERVICE_NAME=stock-bot
LOG_SERVICE_VERSION=1.0.0
LOG_ENVIRONMENT=production
# Loki - Log Aggregation
LOKI_HOST=${LOKI_HOST}
LOKI_PORT=${LOKI_PORT:-3100}
LOKI_URL=${LOKI_URL}
LOKI_USERNAME=${LOKI_USERNAME}
LOKI_PASSWORD=${LOKI_PASSWORD}
LOKI_TENANT_ID=${LOKI_TENANT_ID}
LOKI_PUSH_TIMEOUT=30000
LOKI_BATCH_SIZE=2048
LOKI_BATCH_WAIT=5000
LOKI_RETENTION_PERIOD=90d
LOKI_MAX_CHUNK_AGE=2h
LOKI_TLS_ENABLED=true
LOKI_TLS_INSECURE=false
LOKI_DEFAULT_LABELS=
LOKI_SERVICE_LABEL=stock-bot
LOKI_ENVIRONMENT_LABEL=production
# Prometheus - Metrics Collection
PROMETHEUS_HOST=${PROMETHEUS_HOST}
PROMETHEUS_PORT=${PROMETHEUS_PORT:-9090}
PROMETHEUS_URL=${PROMETHEUS_URL}
PROMETHEUS_USERNAME=${PROMETHEUS_USERNAME}
PROMETHEUS_PASSWORD=${PROMETHEUS_PASSWORD}
PROMETHEUS_SCRAPE_INTERVAL=30s
PROMETHEUS_EVALUATION_INTERVAL=30s
PROMETHEUS_RETENTION_TIME=90d
PROMETHEUS_TLS_ENABLED=true
PROMETHEUS_TLS_INSECURE=false
# Grafana - Visualization
GRAFANA_HOST=${GRAFANA_HOST}
GRAFANA_PORT=${GRAFANA_PORT:-3000}
GRAFANA_URL=${GRAFANA_URL}
GRAFANA_ADMIN_USER=${GRAFANA_ADMIN_USER}
GRAFANA_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD}
GRAFANA_ALLOW_SIGN_UP=false
GRAFANA_SECRET_KEY=${GRAFANA_SECRET_KEY}
GRAFANA_DATABASE_TYPE=postgres
GRAFANA_DATABASE_URL=${GRAFANA_DATABASE_URL}
GRAFANA_DISABLE_GRAVATAR=true
GRAFANA_ENABLE_GZIP=true
# =======================================================================
# DATA PROVIDER CONFIGURATIONS
# =======================================================================
# Default Data Provider
DEFAULT_DATA_PROVIDER=alpaca
# Alpaca Markets (Production)
ALPACA_ENABLED=true
ALPACA_API_KEY=${ALPACA_API_KEY}
ALPACA_SECRET_KEY=${ALPACA_SECRET_KEY}
ALPACA_BASE_URL=https://api.alpaca.markets
ALPACA_DATA_URL=https://data.alpaca.markets
ALPACA_PAPER_TRADING=false
# Polygon.io
POLYGON_ENABLED=${POLYGON_ENABLED:-false}
POLYGON_API_KEY=${POLYGON_API_KEY}
POLYGON_BASE_URL=https://api.polygon.io
# Yahoo Finance
YAHOO_ENABLED=${YAHOO_ENABLED:-false}
YAHOO_BASE_URL=https://query1.finance.yahoo.com
# IEX Cloud
IEX_ENABLED=${IEX_ENABLED:-false}
IEX_API_KEY=${IEX_API_KEY}
IEX_BASE_URL=https://cloud.iexapis.com
# Data Provider Settings (Production)
DATA_PROVIDER_TIMEOUT=60000
DATA_PROVIDER_RETRIES=5
DATA_PROVIDER_RETRY_DELAY=2000
DATA_CACHE_ENABLED=true
DATA_CACHE_TTL=60
DATA_CACHE_MAX_SIZE=10000
# =======================================================================
# TRADING & RISK MANAGEMENT (Production)
# =======================================================================
# Trading Configuration
PAPER_TRADING=false
MAX_POSITION_SIZE=${MAX_POSITION_SIZE:-0.05}
MAX_DAILY_LOSS=${MAX_DAILY_LOSS:-10000}
# Risk Management (Stricter for production)
RISK_MAX_POSITION_SIZE=${RISK_MAX_POSITION_SIZE:-0.10}
RISK_MAX_LEVERAGE=${RISK_MAX_LEVERAGE:-1.5}
RISK_DEFAULT_STOP_LOSS=${RISK_DEFAULT_STOP_LOSS:-0.015}
RISK_DEFAULT_TAKE_PROFIT=${RISK_DEFAULT_TAKE_PROFIT:-0.045}
RISK_MAX_DRAWDOWN=${RISK_MAX_DRAWDOWN:-0.05}
RISK_MAX_CONSECUTIVE_LOSSES=${RISK_MAX_CONSECUTIVE_LOSSES:-3}
RISK_POSITION_SIZING_METHOD=volatility_adjusted
RISK_CIRCUIT_BREAKER_ENABLED=true
RISK_CIRCUIT_BREAKER_THRESHOLD=0.02
RISK_CIRCUIT_BREAKER_COOLDOWN=7200000
RISK_ALLOW_WEEKEND_TRADING=false
RISK_MARKET_HOURS_ONLY=true
# =======================================================================
# FEATURE FLAGS (Production)
# =======================================================================
ENABLE_ML_SIGNALS=${ENABLE_ML_SIGNALS:-false}
ENABLE_SENTIMENT_ANALYSIS=${ENABLE_SENTIMENT_ANALYSIS:-false}
ENABLE_SOCIAL_SIGNALS=${ENABLE_SOCIAL_SIGNALS:-false}
ENABLE_OPTIONS_TRADING=${ENABLE_OPTIONS_TRADING:-false}
ENABLE_CRYPTO_TRADING=${ENABLE_CRYPTO_TRADING:-false}
ENABLE_BACKTESTING=true
ENABLE_PAPER_TRADING=false
ENABLE_LIVE_TRADING=true
# =======================================================================
# PRODUCTION SETTINGS
# =======================================================================
DEBUG_MODE=false
VERBOSE_LOGGING=false
MOCK_DATA_PROVIDERS=false
ENABLE_API_RATE_LIMITING=true
ENABLE_REQUEST_LOGGING=false

135
.env.production Normal file
View file

@ -0,0 +1,135 @@
# Production Environment Configuration
NODE_ENV=production
PORT=3001
# =============================================================================
# DATABASE CONFIGURATIONS
# =============================================================================
# PostgreSQL - Operational data (orders, positions, strategies)
DB_HOST=postgres
DB_PORT=5432
DB_NAME=trading_bot
DB_USER=trading_user
DB_PASSWORD=${POSTGRES_PASSWORD}
DB_POOL_MIN=5
DB_POOL_MAX=20
DB_SSL=true
DB_SSL_REJECT_UNAUTHORIZED=true
# QuestDB - Time-series data (OHLCV, indicators, performance)
QUESTDB_HOST=questdb
QUESTDB_HTTP_PORT=9000
QUESTDB_PG_PORT=8812
QUESTDB_INFLUX_PORT=9009
QUESTDB_DEFAULT_DATABASE=qdb
QUESTDB_TELEMETRY_ENABLED=false
QUESTDB_TLS_ENABLED=true
# MongoDB - Document storage (sentiment, raw docs, unstructured data)
MONGODB_HOST=mongodb
MONGODB_PORT=27017
MONGODB_DATABASE=trading_documents
MONGODB_USERNAME=${MONGODB_ROOT_USERNAME}
MONGODB_PASSWORD=${MONGODB_ROOT_PASSWORD}
MONGODB_AUTH_SOURCE=admin
MONGODB_TLS=true
MONGODB_RETRY_WRITES=true
# Dragonfly - Redis replacement for caching and events
DRAGONFLY_HOST=dragonfly
DRAGONFLY_PORT=6379
DRAGONFLY_PASSWORD=${DRAGONFLY_PASSWORD}
DRAGONFLY_DATABASE=0
DRAGONFLY_MAX_MEMORY=4gb
DRAGONFLY_CACHE_MODE=true
DRAGONFLY_TLS=true
# =============================================================================
# MONITORING & OBSERVABILITY
# =============================================================================
# Prometheus - Metrics collection
PROMETHEUS_HOST=prometheus
PROMETHEUS_PORT=9090
PROMETHEUS_SCRAPE_INTERVAL=30s
PROMETHEUS_RETENTION_TIME=90d
PROMETHEUS_TLS_ENABLED=true
# Grafana - Visualization
GRAFANA_HOST=grafana
GRAFANA_PORT=3000
GRAFANA_ADMIN_USER=${GRAFANA_ADMIN_USER}
GRAFANA_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD}
GRAFANA_ALLOW_SIGN_UP=false
GRAFANA_SECRET_KEY=${GRAFANA_SECRET_KEY}
GRAFANA_DATABASE_TYPE=postgres
GRAFANA_DISABLE_GRAVATAR=true
# Loki - Log aggregation
LOKI_HOST=loki
LOKI_PORT=3100
LOKI_RETENTION_PERIOD=90d
LOKI_BATCH_SIZE=2048
LOKI_TLS_ENABLED=true
# =============================================================================
# ADMIN INTERFACES (Disabled in production)
# =============================================================================
# PgAdmin - PostgreSQL GUI (disabled in production)
PGADMIN_HOST=pgadmin
PGADMIN_PORT=8080
PGADMIN_DEFAULT_EMAIL=${PGADMIN_EMAIL}
PGADMIN_DEFAULT_PASSWORD=${PGADMIN_PASSWORD}
PGADMIN_SERVER_MODE=true
PGADMIN_MASTER_PASSWORD_REQUIRED=true
# Mongo Express - MongoDB GUI (disabled in production)
MONGO_EXPRESS_HOST=mongo-express
MONGO_EXPRESS_PORT=8081
MONGO_EXPRESS_MONGODB_SERVER=mongodb
MONGO_EXPRESS_BASICAUTH_USERNAME=${MONGO_EXPRESS_USER}
MONGO_EXPRESS_BASICAUTH_PASSWORD=${MONGO_EXPRESS_PASSWORD}
# Redis Insight - Dragonfly/Redis GUI (disabled in production)
REDIS_INSIGHT_HOST=redis-insight
REDIS_INSIGHT_PORT=8001
REDIS_INSIGHT_REDIS_HOSTS=production:dragonfly:6379
# =============================================================================
# DATA PROVIDERS & TRADING
# =============================================================================
# API Keys (Set from environment variables)
ALPHA_VANTAGE_API_KEY=${ALPHA_VANTAGE_API_KEY}
ALPACA_API_KEY=${ALPACA_API_KEY}
ALPACA_SECRET_KEY=${ALPACA_SECRET_KEY}
POLYGON_API_KEY=${POLYGON_API_KEY}
IEX_API_KEY=${IEX_API_KEY}
YAHOO_FINANCE_API_KEY=${YAHOO_FINANCE_API_KEY}
# Trading Configuration
PAPER_TRADING=false
MAX_POSITION_SIZE=0.05
MAX_DAILY_LOSS=5000
RISK_MANAGEMENT_ENABLED=true
# =============================================================================
# APPLICATION SETTINGS
# =============================================================================
# Logging
LOG_LEVEL=info
LOG_FORMAT=json
# Feature Flags
ENABLE_ML_SIGNALS=true
ENABLE_SENTIMENT_ANALYSIS=true
ENABLE_RISK_MONITORING=true
ENABLE_PERFORMANCE_TRACKING=true
# Security
CORS_ALLOWED_ORIGINS=${CORS_ALLOWED_ORIGINS}
JWT_SECRET=${JWT_SECRET}
API_RATE_LIMIT=1000

View file

@ -3,6 +3,9 @@
"workspaces": {
"": {
"name": "stock-bot",
"dependencies": {
"valibot": "^1.1.0",
},
"devDependencies": {
"@types/node": "^20.12.12",
"turbo": "^2.5.4",
@ -243,6 +246,7 @@
"version": "1.0.0",
"dependencies": {
"dotenv": "^16.3.1",
"envalid": "^8.0.0",
"zod": "^3.22.4",
},
"devDependencies": {
@ -1135,6 +1139,8 @@
"env-paths": ["env-paths@2.2.1", "", {}, "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A=="],
"envalid": ["envalid@8.0.0", "", { "dependencies": { "tslib": "2.6.2" } }, "sha512-PGeYJnJB5naN0ME6SH8nFcDj9HVbLpYIfg1p5lAyM9T4cH2lwtu2fLbozC/bq+HUUOIFxhX/LP0/GmlqPHT4tQ=="],
"environment": ["environment@1.1.0", "", {}, "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q=="],
"err-code": ["err-code@2.0.3", "", {}, "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA=="],
@ -1985,6 +1991,8 @@
"v8-to-istanbul": ["v8-to-istanbul@9.3.0", "", { "dependencies": { "@jridgewell/trace-mapping": "^0.3.12", "@types/istanbul-lib-coverage": "^2.0.1", "convert-source-map": "^2.0.0" } }, "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA=="],
"valibot": ["valibot@1.1.0", "", { "peerDependencies": { "typescript": ">=5" }, "optionalPeers": ["typescript"] }, "sha512-Nk8lX30Qhu+9txPYTwM0cFlWLdPFsFr6LblzqIySfbZph9+BFsAHsNvHOymEviUepeIW6KFHzpX8TKhbptBXXw=="],
"validate-npm-package-license": ["validate-npm-package-license@3.0.4", "", { "dependencies": { "spdx-correct": "^3.0.0", "spdx-expression-parse": "^3.0.0" } }, "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew=="],
"validate-npm-package-name": ["validate-npm-package-name@6.0.0", "", {}, "sha512-d7KLgL1LD3U3fgnvWEY1cQXoO/q6EQ1BSz48Sa149V/5zVTAbgmZIpyI8TRi6U9/JNyeYLlTKsEMPtLC27RFUg=="],
@ -2141,6 +2149,8 @@
"ent/punycode": ["punycode@1.4.1", "", {}, "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ=="],
"envalid/tslib": ["tslib@2.6.2", "", {}, "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q=="],
"execa/signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="],
"external-editor/tmp": ["tmp@0.0.33", "", { "dependencies": { "os-tmpdir": "~1.0.2" } }, "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw=="],

View file

@ -1,46 +0,0 @@
version: '3.8'
services:
# Loki - Log aggregation
loki:
image: grafana/loki:2.9.2
container_name: trading-bot-loki
ports:
- "3100:3100"
volumes:
- loki_data:/loki
- ./monitoring/loki:/etc/loki
command: -config.file=/etc/loki/loki-config.yaml
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3100/ready"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# Grafana - Visualization for logs and metrics
grafana:
image: grafana/grafana:10.2.0
container_name: trading-bot-grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
depends_on:
- loki
networks:
- trading-bot-network
volumes:
loki_data:
grafana_data:
networks:
trading-bot-network:
external: true

View file

@ -1,46 +0,0 @@
version: '3.8'
services:
# Loki - Log aggregation
loki:
image: grafana/loki:2.9.2
container_name: trading-bot-loki
ports:
- "3100:3100"
volumes:
- loki_data:/loki
- ./monitoring/loki:/etc/loki
command: -config.file=/etc/loki/loki-config.yaml
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3100/ready"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# Grafana - Visualization for logs and metrics
grafana:
image: grafana/grafana:10.2.0
container_name: trading-bot-grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
depends_on:
- loki
networks:
- trading-bot-network
volumes:
loki_data:
grafana_data:
networks:
trading-bot-network:
external: true

View file

@ -1,244 +0,0 @@
services:
# Dragonfly - Redis replacement for caching and events
dragonfly:
image: docker.dragonflydb.io/dragonflydb/dragonfly:latest
container_name: trading-bot-dragonfly
ports:
- "6379:6379"
command:
- dragonfly
- --logtostderr
- --cache_mode=true
- --maxmemory=2gb
- --proactor_threads=8
- --bind=0.0.0.0
volumes:
- dragonfly_data:/data
restart: unless-stopped
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# PostgreSQL - Operational data (orders, positions, strategies)
postgres:
image: postgres:16-alpine
container_name: trading-bot-postgres
environment:
POSTGRES_DB: trading_bot
POSTGRES_USER: trading_user
POSTGRES_PASSWORD: trading_pass_dev
POSTGRES_INITDB_ARGS: "--encoding=UTF-8"
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
- ./database/postgres/init:/docker-entrypoint-initdb.d
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U trading_user -d trading_bot"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# QuestDB - Time-series data (OHLCV, indicators, performance)
questdb:
image: questdb/questdb:latest
container_name: trading-bot-questdb
ports:
- "9000:9000" # Web console
- "8812:8812" # PostgreSQL wire protocol
- "9009:9009" # InfluxDB line protocol
volumes:
- questdb_data:/var/lib/questdb
environment:
- QDB_TELEMETRY_ENABLED=false
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/status"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# MongoDB - Document storage (sentiment, raw docs, unstructured data)
mongodb:
image: mongo:7-jammy
container_name: trading-bot-mongodb
environment:
MONGO_INITDB_ROOT_USERNAME: trading_admin
MONGO_INITDB_ROOT_PASSWORD: trading_mongo_dev
MONGO_INITDB_DATABASE: trading_documents
ports:
- "27017:27017"
volumes:
- mongodb_data:/data/db
- ./database/mongodb/init:/docker-entrypoint-initdb.d
restart: unless-stopped
healthcheck:
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# Redis Insight - GUI for Dragonfly debugging
redis-insight:
image: redislabs/redisinsight:latest
container_name: trading-bot-redis-insight
ports:
- "8001:8001"
environment:
- REDIS_HOSTS=local:dragonfly:6379
depends_on:
- dragonfly
restart: unless-stopped
networks:
- trading-bot-network
# PgAdmin - PostgreSQL GUI
pgadmin:
image: dpage/pgadmin4:latest
container_name: trading-bot-pgadmin
environment:
PGADMIN_DEFAULT_EMAIL: boki@stare.gg
PGADMIN_DEFAULT_PASSWORD: admin123
PGADMIN_CONFIG_SERVER_MODE: 'False'
PGADMIN_DISABLE_POSTFIX: 'true'
ports:
- "8080:80"
volumes:
- pgadmin_data:/var/lib/pgadmin
depends_on:
- postgres
restart: unless-stopped
networks:
- trading-bot-network
# Mongo Express - MongoDB GUI
mongo-express:
image: mongo-express:latest
container_name: trading-bot-mongo-express
environment:
ME_CONFIG_MONGODB_ADMINUSERNAME: trading_admin
ME_CONFIG_MONGODB_ADMINPASSWORD: trading_mongo_dev
ME_CONFIG_MONGODB_SERVER: mongodb
ME_CONFIG_MONGODB_PORT: 27017
ME_CONFIG_BASICAUTH_USERNAME: boki
ME_CONFIG_BASICAUTH_PASSWORD: admin123
ports:
- "8081:8081"
depends_on:
- mongodb
restart: unless-stopped
networks:
- trading-bot-network
# Prometheus - Metrics collection (optional)
prometheus:
image: prom/prometheus:latest
container_name: trading-bot-prometheus
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--web.enable-lifecycle'
restart: unless-stopped
networks:
- trading-bot-network
# Loki - Log aggregation
loki:
image: grafana/loki:2.9.2
container_name: trading-bot-loki
ports:
- "3100:3100"
volumes:
- loki_data:/loki
- ./monitoring/loki:/etc/loki
command: -config.file=/etc/loki/loki-config.yaml
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3100/ready"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- trading-bot-network
# Grafana - Visualization for logs and metrics
grafana:
image: grafana/grafana:10.2.0
container_name: trading-bot-grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
depends_on:
- prometheus
- loki
restart: unless-stopped
networks:
- trading-bot-network
volumes:
postgres_data:
questdb_data:
dragonfly_data:
mongodb_data:
pgadmin_data:
prometheus_data:
grafana_data:
loki_data:
- "3100:3100"
volumes:
- loki_data:/loki
- ./monitoring/loki:/etc/loki
command: -config.file=/etc/loki/loki-config.yaml
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3100/ready"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# Grafana - Visualization for logs and metrics
grafana:
image: grafana/grafana:10.2.0
container_name: trading-bot-grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
depends_on:
- loki
networks:
- trading-bot-network
networks:
trading-bot-network:
driver: bridge

View file

@ -1,217 +0,0 @@
version: '3.8'
services:
# Dragonfly - Redis replacement for caching and events
dragonfly:
image: docker.dragonflydb.io/dragonflydb/dragonfly:latest
container_name: trading-bot-dragonfly
ports:
- "6379:6379"
command:
- dragonfly
- --logtostderr
- --cache_mode=true
- --maxmemory=2gb
- --proactor_threads=8
- --bind=0.0.0.0
volumes:
- dragonfly_data:/data
restart: unless-stopped
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# PostgreSQL - Operational data (orders, positions, strategies)
postgres:
image: postgres:16-alpine
container_name: trading-bot-postgres
environment:
POSTGRES_DB: trading_bot
POSTGRES_USER: trading_user
POSTGRES_PASSWORD: trading_pass_dev
POSTGRES_INITDB_ARGS: "--encoding=UTF-8"
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
- ./database/postgres/init:/docker-entrypoint-initdb.d
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U trading_user -d trading_bot"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# QuestDB - Time-series data (OHLCV, indicators, performance)
questdb:
image: questdb/questdb:latest
container_name: trading-bot-questdb
ports:
- "9000:9000" # Web console
- "8812:8812" # PostgreSQL wire protocol
- "9009:9009" # InfluxDB line protocol
volumes:
- questdb_data:/var/lib/questdb
environment:
- QDB_TELEMETRY_ENABLED=false
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/status"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# MongoDB - Document storage (sentiment, raw docs, unstructured data)
mongodb:
image: mongo:7-jammy
container_name: trading-bot-mongodb
environment:
MONGO_INITDB_ROOT_USERNAME: trading_admin
MONGO_INITDB_ROOT_PASSWORD: trading_mongo_dev
MONGO_INITDB_DATABASE: trading_documents
ports:
- "27017:27017"
volumes:
- mongodb_data:/data/db
- ./database/mongodb/init:/docker-entrypoint-initdb.d
restart: unless-stopped
healthcheck:
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
interval: 30s
timeout: 10s
retries: 3
networks:
- trading-bot-network
# Redis Insight - GUI for Dragonfly debugging
redis-insight:
image: redislabs/redisinsight:latest
container_name: trading-bot-redis-insight
ports:
- "8001:8001"
environment:
- REDIS_HOSTS=local:dragonfly:6379
depends_on:
- dragonfly
restart: unless-stopped
networks:
- trading-bot-network
# PgAdmin - PostgreSQL GUI
pgadmin:
image: dpage/pgadmin4:latest
container_name: trading-bot-pgadmin
environment:
PGADMIN_DEFAULT_EMAIL: admin@tradingbot.local
PGADMIN_DEFAULT_PASSWORD: admin123
PGADMIN_CONFIG_SERVER_MODE: 'False'
PGADMIN_DISABLE_POSTFIX: 'true'
ports:
- "8080:80"
volumes:
- pgadmin_data:/var/lib/pgadmin
depends_on:
- postgres
restart: unless-stopped
networks:
- trading-bot-network
# Mongo Express - MongoDB GUI
mongo-express:
image: mongo-express:latest
container_name: trading-bot-mongo-express
environment:
ME_CONFIG_MONGODB_ADMINUSERNAME: trading_admin
ME_CONFIG_MONGODB_ADMINPASSWORD: trading_mongo_dev
ME_CONFIG_MONGODB_SERVER: mongodb
ME_CONFIG_MONGODB_PORT: 27017
ME_CONFIG_BASICAUTH_USERNAME: admin
ME_CONFIG_BASICAUTH_PASSWORD: admin123
ports:
- "8081:8081"
depends_on:
- mongodb
restart: unless-stopped
networks:
- trading-bot-network
# Prometheus - Metrics collection
prometheus:
image: prom/prometheus:latest
container_name: trading-bot-prometheus
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--web.enable-lifecycle'
restart: unless-stopped
networks:
- trading-bot-network
# Loki - Log aggregation
loki:
image: grafana/loki:2.9.2
container_name: trading-bot-loki
ports:
- "3100:3100"
volumes:
- loki_data:/loki
- ./monitoring/loki:/etc/loki
command: -config.file=/etc/loki/loki-config.yaml
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3100/ready"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- trading-bot-network
# Grafana - Visualization for logs and metrics
grafana:
image: grafana/grafana:10.2.0
container_name: trading-bot-grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
depends_on:
- prometheus
- loki
restart: unless-stopped
networks:
- trading-bot-network
volumes:
postgres_data:
questdb_data:
dragonfly_data:
mongodb_data:
pgadmin_data:
prometheus_data:
grafana_data:
loki_data:
networks:
trading-bot-network:
driver: bridge

View file

@ -0,0 +1,238 @@
# Stock Bot Multi-Database Architecture Documentation
## Overview
The Stock Bot platform uses a sophisticated multi-database architecture designed to handle different types of data efficiently. This document outlines the configuration system, database choices, and monitoring setup.
## Configuration System
### Migration from Custom Config to Envalid
The platform has migrated from a complex Valibot-based configuration system to a simpler, more maintainable **envalid** approach:
```typescript
// New configuration pattern used throughout
export const configName = cleanEnv(process.env, {
ENV_VAR: str({ default: 'value', desc: 'Description' }),
NUMERIC_VAR: num({ default: 3000, desc: 'Port number' }),
BOOLEAN_VAR: bool({ default: false, desc: 'Feature flag' }),
});
```
### Configuration Modules
| Module | Purpose | File |
|--------|---------|------|
| `database` | PostgreSQL operational data | `libs/config/src/database.ts` |
| `questdb` | Time-series data storage | `libs/config/src/questdb.ts` |
| `mongodb` | Document and unstructured data | `libs/config/src/mongodb.ts` |
| `dragonfly` | Caching and event streaming | `libs/config/src/dragonfly.ts` |
| `monitoring` | Prometheus and Grafana | `libs/config/src/monitoring.ts` |
| `loki` | Log aggregation | `libs/config/src/loki.ts` |
| `logging` | Application logging | `libs/config/src/logging.ts` |
## Database Architecture
### 1. PostgreSQL - Operational Data Store
**Purpose**: Primary relational database for structured operational data
- **Data Types**: Orders, positions, strategies, user accounts, trading rules
- **Strengths**: ACID compliance, complex queries, transactions
- **Configuration**: `libs/config/src/database.ts`
```typescript
// Example usage
import { databaseConfig } from '@trading-bot/config';
// Connects to operational PostgreSQL instance
```
### 2. QuestDB - Time-Series Database
**Purpose**: High-performance time-series data storage
- **Data Types**: OHLCV data, technical indicators, performance metrics, tick data
- **Strengths**: Fast ingestion, SQL queries on time-series, columnar storage
- **Configuration**: `libs/config/src/questdb.ts`
```typescript
// Example usage
import { questdbConfig } from '@trading-bot/config';
// Optimized for time-series queries and analytics
```
### 3. MongoDB - Document Store
**Purpose**: Flexible document storage for unstructured data
- **Data Types**: Market sentiment, news articles, research reports, ML model outputs
- **Strengths**: Schema flexibility, horizontal scaling, complex document queries
- **Configuration**: `libs/config/src/mongodb.ts`
```typescript
// Example usage
import { mongodbConfig } from '@trading-bot/config';
// Handles variable schema and complex nested data
```
### 4. Dragonfly - Cache & Event Store
**Purpose**: High-performance caching and real-time event streaming
- **Data Types**: Market data cache, session data, real-time events, pub/sub messages
- **Strengths**: Redis compatibility, better performance, memory efficiency
- **Configuration**: `libs/config/src/dragonfly.ts`
```typescript
// Example usage
import { dragonflyConfig } from '@trading-bot/config';
// Drop-in Redis replacement with better performance
```
## Monitoring & Observability Stack
### Prometheus - Metrics Collection
- **Purpose**: Time-series metrics and monitoring
- **Metrics**: System performance, trading metrics, database metrics
- **Configuration**: `libs/config/src/monitoring.ts`
### Grafana - Visualization
- **Purpose**: Dashboards and alerting
- **Dashboards**: Trading performance, system health, database monitoring
- **Configuration**: `libs/config/src/monitoring.ts`
### Loki - Log Aggregation
- **Purpose**: Centralized log collection and analysis
- **Logs**: Application logs, database logs, system logs
- **Configuration**: `libs/config/src/loki.ts`
### Application Logging
- **Purpose**: Structured application logging
- **Features**: Multiple formats, file rotation, log levels
- **Configuration**: `libs/config/src/logging.ts`
## Environment Files
### `.env` - Development (Local)
- **Purpose**: Local development with services on localhost
- **Databases**: All services running on localhost with standard ports
- **Logging**: Pretty-formatted console output
### `.env.docker` - Docker Compose
- **Purpose**: Container orchestration with Docker Compose
- **Databases**: Container names as hostnames (e.g., `postgres`, `mongodb`)
- **Features**: Health checks, resource limits, volume mounts
### `.env.complete` - Full Development
- **Purpose**: Complete feature set for development testing
- **Features**: All services enabled, verbose logging, debug mode
- **Use Case**: Testing the full platform locally
### `.env.prod` - Production
- **Purpose**: Production deployment configuration
- **Security**: Environment variable references, secure defaults
- **Features**: Optimized logging, monitoring enabled
## Admin Interfaces
### PgAdmin - PostgreSQL Management
- **URL**: http://localhost:8080 (development)
- **Purpose**: Database administration, query execution, monitoring
### Mongo Express - MongoDB Management
- **URL**: http://localhost:8081 (development)
- **Purpose**: Document browsing, collection management, query testing
### Redis Insight - Dragonfly/Redis Management
- **URL**: http://localhost:8001 (development)
- **Purpose**: Cache monitoring, key browsing, performance analysis
## Data Flow Architecture
```
┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
│ Market Data │───▶│ Dragonfly │───▶│ QuestDB │
│ Feed │ │ (Cache/Events) │ │ (Time-Series) │
└─────────────────┘ └──────────────────┘ └─────────────────┘
┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
│ Trading │◀──▶│ PostgreSQL │ │ MongoDB │
│ Engine │ │ (Operational) │ │ (Documents) │
└─────────────────┘ └──────────────────┘ └─────────────────┘
│ ▲
▼ │
┌─────────────────┐ ┌──────────────────┐ │
│ Monitoring │◀───│ Prometheus │ │
│ Dashboard │ │ (Metrics) │ │
└─────────────────┘ └──────────────────┘ │
┌─────────────────┐ ┌──────────────────┐ │
│ Log Analysis │◀───│ Loki │───────────┘
│ │ │ (Logs) │
└─────────────────┘ └──────────────────┘
```
## Best Practices
### Database Selection Guidelines
1. **PostgreSQL**: Use for transactional data requiring ACID properties
- Orders, positions, account balances, strategy configurations
2. **QuestDB**: Use for time-series data requiring fast analytics
- OHLCV data, technical indicators, performance metrics
3. **MongoDB**: Use for flexible, document-based data
- Market sentiment, news articles, ML model outputs
4. **Dragonfly**: Use for temporary data requiring fast access
- Real-time market data cache, session data, event streams
### Configuration Best Practices
1. **Environment Separation**: Use appropriate `.env` file for each environment
2. **Security**: Never commit sensitive credentials to version control
3. **Validation**: All configuration uses envalid for runtime validation
4. **Documentation**: Each config variable includes descriptive help text
### Monitoring Best Practices
1. **Metrics**: Monitor database performance, trading metrics, system health
2. **Logging**: Use structured logging with appropriate log levels
3. **Alerting**: Set up Grafana alerts for critical system metrics
4. **Log Retention**: Configure appropriate retention periods for each environment
## Migration Guide
If migrating from the old configuration system:
1. **Update Imports**: Change from custom config to new envalid-based modules
2. **Environment Variables**: Update `.env` files to include all new services
3. **Docker Setup**: Use `.env.docker` for container-based deployments
4. **Monitoring**: Enable Prometheus, Grafana, and Loki for observability
## Troubleshooting
### Common Issues
1. **Connection Failures**: Check container names in Docker environments
2. **Port Conflicts**: Verify port mappings in environment files
3. **Permission Errors**: Ensure proper database credentials and permissions
4. **Memory Issues**: Adjust resource limits in Docker configuration
### Debug Commands
```bash
# Check container status
docker-compose ps
# View container logs
docker-compose logs [service-name]
# Test database connections
docker-compose exec postgres pg_isready
docker-compose exec mongodb mongosh --eval "db.runCommand('ping')"
docker-compose exec questdb curl -f http://localhost:9000/status
docker-compose exec dragonfly redis-cli ping
```
## Future Enhancements
1. **Database Sharding**: Implement horizontal scaling for high-volume data
2. **Read Replicas**: Add read replicas for improved query performance
3. **Backup Strategy**: Implement automated backup and recovery procedures
4. **Security**: Add encryption at rest and in transit
5. **Performance**: Implement connection pooling and query optimization

131
libs/config/USAGE.md Normal file
View file

@ -0,0 +1,131 @@
# Stock Bot Configuration Library Usage Guide
This guide shows how to use the envalid-based configuration system in the Stock Bot platform.
## Quick Start
```typescript
import { databaseConfig, loggingConfig, riskConfig, dataProvidersConfig } from '@stock-bot/config';
// Access individual values
console.log(`Database: ${databaseConfig.DB_HOST}:${databaseConfig.DB_PORT}`);
console.log(`Log level: ${loggingConfig.LOG_LEVEL}`);
console.log(`Max position size: ${riskConfig.RISK_MAX_POSITION_SIZE}`);
```
## Environment Variables
All configuration is driven by environment variables. You can set them in:
- `.env` files
- System environment variables
- Docker environment variables
### Database Configuration
```bash
DB_HOST=localhost
DB_PORT=5432
DB_NAME=stockbot
DB_USER=stockbot
DB_PASSWORD=your_password
DB_SSL=false
DB_POOL_MAX=10
```
### Logging Configuration
```bash
LOG_LEVEL=info
LOG_CONSOLE=true
LOKI_HOST=localhost
LOKI_PORT=3100
LOKI_LABELS=service=market-data-gateway,version=1.0.0
```
### Risk Management Configuration
```bash
RISK_MAX_POSITION_SIZE=0.1
RISK_DEFAULT_STOP_LOSS=0.05
RISK_DEFAULT_TAKE_PROFIT=0.15
RISK_CIRCUIT_BREAKER_ENABLED=true
```
### Data Provider Configuration
```bash
DEFAULT_DATA_PROVIDER=alpaca
ALPACA_API_KEY=your_api_key
ALPACA_API_SECRET=your_api_secret
ALPACA_ENABLED=true
POLYGON_ENABLED=false
```
## Advanced Usage
### Type Safety
All configurations are fully typed:
```typescript
import type { DatabaseConfig, LoggingConfig, RiskConfig } from '@stock-bot/config';
function setupDatabase(config: DatabaseConfig) {
// TypeScript knows all the available properties
return {
host: config.DB_HOST,
port: config.DB_PORT, // number
ssl: config.DB_SSL, // boolean
};
}
```
### Environment Detection
```typescript
import { getEnvironment, Environment } from '@stock-bot/config';
const env = getEnvironment();
if (env === Environment.Production) {
// Production-specific logic
}
```
### Data Provider Helpers
```typescript
import { getProviderConfig, getEnabledProviders, getDefaultProvider } from '@stock-bot/config';
// Get specific provider
const alpaca = getProviderConfig('alpaca');
// Get all enabled providers
const providers = getEnabledProviders();
// Get default provider
const defaultProvider = getDefaultProvider();
```
## Configuration Files
The library consists of these modules:
- **core.ts** - Core utilities and environment detection
- **database.ts** - Database connection settings
- **logging.ts** - Logging and Loki configuration
- **risk.ts** - Risk management parameters
- **data-providers.ts** - Data provider settings
## Benefits of This Approach
1. **Zero Configuration Schema** - No complex schema definitions needed
2. **Automatic Type Inference** - TypeScript types are generated automatically
3. **Environment Variable Validation** - Invalid values are caught at startup
4. **Great Developer Experience** - IntelliSense works perfectly
5. **Production Ready** - Used by many large-scale applications
## Migration from Previous System
If you're migrating from the old Valibot-based system:
```typescript
// Old way
const config = createConfigLoader('database', databaseSchema, defaultConfig)();
// New way
import { databaseConfig } from '@stock-bot/config';
// That's it! No schema needed, no validation needed, no complex setup.
```

View file

@ -12,6 +12,7 @@
},
"dependencies": {
"dotenv": "^16.3.1",
"envalid": "^8.0.0",
"zod": "^3.22.4"
},
"devDependencies": {

View file

@ -0,0 +1,119 @@
/**
* Admin interfaces configuration using envalid
* PgAdmin, Mongo Express, Redis Insight for database management
*/
import { cleanEnv, str, port, bool } from 'envalid';
/**
* PgAdmin configuration with validation and defaults
*/
export const pgAdminConfig = cleanEnv(process.env, {
// PgAdmin Server
PGADMIN_HOST: str({ default: 'localhost', desc: 'PgAdmin host' }),
PGADMIN_PORT: port({ default: 8080, desc: 'PgAdmin port' }),
// Authentication
PGADMIN_DEFAULT_EMAIL: str({ default: 'admin@tradingbot.local', desc: 'PgAdmin default admin email' }),
PGADMIN_DEFAULT_PASSWORD: str({ default: 'admin123', desc: 'PgAdmin default admin password' }),
// Configuration
PGADMIN_SERVER_MODE: bool({ default: false, desc: 'Enable server mode (multi-user)' }),
PGADMIN_DISABLE_POSTFIX: bool({ default: true, desc: 'Disable postfix for email' }),
PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION: bool({ default: true, desc: 'Enhanced cookie protection' }),
// Security
PGADMIN_MASTER_PASSWORD_REQUIRED: bool({ default: false, desc: 'Require master password' }),
PGADMIN_SESSION_TIMEOUT: str({ default: '60', desc: 'Session timeout in minutes' }),
});
/**
* Mongo Express configuration with validation and defaults
*/
export const mongoExpressConfig = cleanEnv(process.env, {
// Mongo Express Server
MONGO_EXPRESS_HOST: str({ default: 'localhost', desc: 'Mongo Express host' }),
MONGO_EXPRESS_PORT: port({ default: 8081, desc: 'Mongo Express port' }),
// MongoDB Connection
MONGO_EXPRESS_MONGODB_SERVER: str({ default: 'mongodb', desc: 'MongoDB server name/host' }),
MONGO_EXPRESS_MONGODB_PORT: port({ default: 27017, desc: 'MongoDB port' }),
MONGO_EXPRESS_MONGODB_ADMINUSERNAME: str({ default: 'trading_admin', desc: 'MongoDB admin username' }),
MONGO_EXPRESS_MONGODB_ADMINPASSWORD: str({ default: '', desc: 'MongoDB admin password' }),
// Basic Authentication for Mongo Express
MONGO_EXPRESS_BASICAUTH_USERNAME: str({ default: 'admin', desc: 'Basic auth username for Mongo Express' }),
MONGO_EXPRESS_BASICAUTH_PASSWORD: str({ default: 'admin123', desc: 'Basic auth password for Mongo Express' }),
// Configuration
MONGO_EXPRESS_ENABLE_ADMIN: bool({ default: true, desc: 'Enable admin features' }),
MONGO_EXPRESS_OPTIONS_EDITOR_THEME: str({
default: 'rubyblue',
desc: 'Editor theme (rubyblue, 3024-night, etc.)'
}),
MONGO_EXPRESS_REQUEST_SIZE: str({ default: '100kb', desc: 'Maximum request size' }),
});
/**
* Redis Insight configuration with validation and defaults
*/
export const redisInsightConfig = cleanEnv(process.env, {
// Redis Insight Server
REDIS_INSIGHT_HOST: str({ default: 'localhost', desc: 'Redis Insight host' }),
REDIS_INSIGHT_PORT: port({ default: 8001, desc: 'Redis Insight port' }),
// Redis Connection Settings
REDIS_INSIGHT_REDIS_HOSTS: str({
default: 'local:dragonfly:6379',
desc: 'Redis hosts in format name:host:port,name:host:port'
}),
// Configuration
REDIS_INSIGHT_LOG_LEVEL: str({
default: 'info',
choices: ['error', 'warn', 'info', 'verbose', 'debug'],
desc: 'Redis Insight log level'
}),
REDIS_INSIGHT_DISABLE_ANALYTICS: bool({ default: true, desc: 'Disable analytics collection' }),
REDIS_INSIGHT_BUILD_TYPE: str({ default: 'DOCKER', desc: 'Build type identifier' }),
});
// Export typed configuration objects
export type PgAdminConfig = typeof pgAdminConfig;
export type MongoExpressConfig = typeof mongoExpressConfig;
export type RedisInsightConfig = typeof redisInsightConfig;
// Export individual config values for convenience
export const {
PGADMIN_HOST,
PGADMIN_PORT,
PGADMIN_DEFAULT_EMAIL,
PGADMIN_DEFAULT_PASSWORD,
PGADMIN_SERVER_MODE,
PGADMIN_DISABLE_POSTFIX,
PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION,
PGADMIN_MASTER_PASSWORD_REQUIRED,
PGADMIN_SESSION_TIMEOUT,
} = pgAdminConfig;
export const {
MONGO_EXPRESS_HOST,
MONGO_EXPRESS_PORT,
MONGO_EXPRESS_MONGODB_SERVER,
MONGO_EXPRESS_MONGODB_PORT,
MONGO_EXPRESS_MONGODB_ADMINUSERNAME,
MONGO_EXPRESS_MONGODB_ADMINPASSWORD,
MONGO_EXPRESS_BASICAUTH_USERNAME,
MONGO_EXPRESS_BASICAUTH_PASSWORD,
MONGO_EXPRESS_ENABLE_ADMIN,
MONGO_EXPRESS_OPTIONS_EDITOR_THEME,
MONGO_EXPRESS_REQUEST_SIZE,
} = mongoExpressConfig;
export const {
REDIS_INSIGHT_HOST,
REDIS_INSIGHT_PORT,
REDIS_INSIGHT_REDIS_HOSTS,
REDIS_INSIGHT_LOG_LEVEL,
REDIS_INSIGHT_DISABLE_ANALYTICS,
REDIS_INSIGHT_BUILD_TYPE,
} = redisInsightConfig;

View file

@ -1,136 +0,0 @@
/**
* Tests for the configuration library
*/
import { describe, expect, test, beforeAll, afterAll } from 'bun:test';
import {
getEnvironment,
validateConfig,
ConfigurationError,
loadEnvVariables,
getEnvVar,
getNumericEnvVar,
getBooleanEnvVar
} from './core';
import { Environment, databaseConfigSchema } from './types';
describe('Core configuration', () => {
// Save original environment variables
const originalEnv = { ...process.env };
// Setup test environment variables
beforeAll(() => {
process.env.NODE_ENV = 'testing';
process.env.TEST_STRING = 'test-value';
process.env.TEST_NUMBER = '42';
process.env.TEST_BOOL_TRUE = 'true';
process.env.TEST_BOOL_FALSE = 'false';
});
// Restore original environment variables
afterAll(() => {
process.env = { ...originalEnv };
});
test('getEnvironment returns correct environment', () => {
expect(getEnvironment()).toBe(Environment.Testing);
// Test different environments
process.env.NODE_ENV = 'development';
expect(getEnvironment()).toBe(Environment.Development);
process.env.NODE_ENV = 'production';
expect(getEnvironment()).toBe(Environment.Production);
process.env.NODE_ENV = 'staging';
expect(getEnvironment()).toBe(Environment.Staging);
// Test default environment
process.env.NODE_ENV = 'unknown';
expect(getEnvironment()).toBe(Environment.Development);
});
test('getEnvVar retrieves environment variables', () => {
expect(getEnvVar('TEST_STRING')).toBe('test-value');
expect(getEnvVar('NON_EXISTENT')).toBeUndefined();
expect(getEnvVar('NON_EXISTENT', false)).toBeUndefined();
// Test required variables
expect(() => getEnvVar('NON_EXISTENT', true)).toThrow(ConfigurationError);
});
test('getNumericEnvVar converts to number', () => {
expect(getNumericEnvVar('TEST_NUMBER')).toBe(42);
expect(getNumericEnvVar('NON_EXISTENT', 100)).toBe(100);
// Test invalid number
process.env.INVALID_NUMBER = 'not-a-number';
expect(() => getNumericEnvVar('INVALID_NUMBER')).toThrow(ConfigurationError);
});
test('getBooleanEnvVar converts to boolean', () => {
expect(getBooleanEnvVar('TEST_BOOL_TRUE')).toBe(true);
expect(getBooleanEnvVar('TEST_BOOL_FALSE')).toBe(false);
expect(getBooleanEnvVar('NON_EXISTENT', true)).toBe(true);
});
test('validateConfig validates against schema', () => {
// Valid config
const validConfig = {
dragonfly: {
host: 'localhost',
port: 6379,
maxRetriesPerRequest: 3
},
questDB: {
host: 'localhost',
port: 8812,
database: 'stockbot',
user: 'admin',
httpPort: 9000
},
mongodb: {
uri: 'mongodb://localhost:27017',
database: 'stockbot'
},
postgres: {
host: 'localhost',
port: 5432,
database: 'stockbot',
user: 'postgres',
poolSize: 10,
ssl: false
}
};
expect(() => validateConfig(validConfig, databaseConfigSchema)).not.toThrow();
// Invalid config (missing required field)
const invalidConfig = {
dragonfly: {
host: 'localhost',
// missing port
maxRetriesPerRequest: 3
},
questDB: {
host: 'localhost',
port: 8812,
database: 'stockbot',
user: 'admin',
httpPort: 9000
},
mongodb: {
uri: 'mongodb://localhost:27017',
database: 'stockbot'
},
postgres: {
host: 'localhost',
port: 5432,
database: 'stockbot',
user: 'postgres',
poolSize: 10,
ssl: false
}
};
expect(() => validateConfig(invalidConfig, databaseConfigSchema)).toThrow(ConfigurationError);
});
});

View file

@ -1,10 +1,8 @@
/**
* Core configuration module for the Stock Bot platform
* Core configuration module for the Stock Bot platform using envalid
*/
import { config as dotenvConfig } from 'dotenv';
import path from 'node:path';
import { z } from 'zod';
import { Environment } from './types';
/**
* Represents an error related to configuration validation
@ -16,6 +14,16 @@ export class ConfigurationError extends Error {
}
}
/**
* Environment types
*/
export enum Environment {
Development = 'development',
Testing = 'testing',
Staging = 'staging',
Production = 'production'
}
/**
* Loads environment variables from .env files based on the current environment
*/
@ -57,106 +65,3 @@ export function getEnvironment(): Environment {
return Environment.Development;
}
}
/**
* Validates configuration using Zod schema
*/
export function validateConfig<T>(config: unknown, schema: z.ZodSchema<T>): T {
try {
return schema.parse(config);
} catch (error) {
if (error instanceof z.ZodError) {
const issues = error.issues.map(issue =>
`${issue.path.join('.')}: ${issue.message}`
).join('\n');
throw new ConfigurationError(`Configuration validation failed:\n${issues}`);
}
throw new ConfigurationError('Invalid configuration');
}
}
/**
* Retrieves an environment variable with validation
*/
export function getEnvVar(key: string, required: boolean = false): string | undefined {
const value = process.env[key];
if (required && (value === undefined || value === '')) {
throw new ConfigurationError(`Required environment variable ${key} is missing`);
}
return value;
}
/**
* Retrieves a numeric environment variable with validation
*/
export function getNumericEnvVar(key: string, defaultValue?: number): number {
const value = process.env[key];
if (value === undefined || value === '') {
if (defaultValue !== undefined) {
return defaultValue;
}
throw new ConfigurationError(`Required numeric environment variable ${key} is missing`);
}
const numValue = Number(value);
if (isNaN(numValue)) {
throw new ConfigurationError(`Environment variable ${key} is not a valid number`);
}
return numValue;
}
/**
* Retrieves a boolean environment variable with validation
*/
export function getBooleanEnvVar(key: string, defaultValue?: boolean): boolean {
const value = process.env[key];
if (value === undefined || value === '') {
if (defaultValue !== undefined) {
return defaultValue;
}
throw new ConfigurationError(`Required boolean environment variable ${key} is missing`);
}
return value.toLowerCase() === 'true' || value === '1';
}
/**
* Creates a typed dynamic configuration loader for a specific service
*/
export function createConfigLoader<T>(
serviceName: string,
schema: z.ZodSchema<T>,
defaultConfig: Partial<T> = {}
): () => T {
return (): T => {
try {
loadEnvVariables();
const configEnvVar = `${serviceName.toUpperCase()}_CONFIG`;
let config = { ...defaultConfig } as unknown as T;
// Try to load JSON from environment variable if available
const configJson = process.env[configEnvVar];
if (configJson) {
try {
const parsedConfig = JSON.parse(configJson);
config = { ...config, ...parsedConfig };
} catch (error) {
throw new ConfigurationError(`Invalid JSON in ${configEnvVar} environment variable`);
}
}
// Validate and return the config
return validateConfig(config, schema);
} catch (error) {
if (error instanceof ConfigurationError) {
throw error;
}
throw new ConfigurationError(`Failed to load configuration for service ${serviceName}: ${error}`);
}
};
}

View file

@ -1,85 +1,157 @@
/**
* Data provider configurations for market data
* Data provider configurations using envalid
*/
import { getEnvVar, validateConfig, createConfigLoader } from './core';
import { dataProvidersConfigSchema, DataProvidersConfig, DataProviderConfig } from './types';
import { cleanEnv, str, num, bool } from 'envalid';
/**
* Default data provider configurations
* Data providers configuration with validation and defaults
*/
const defaultDataProviders: DataProviderConfig[] = [
{
name: 'alpaca',
type: 'rest',
baseUrl: 'https://data.alpaca.markets/v1beta1',
apiKey: '',
apiSecret: '',
rateLimits: {
maxRequestsPerMinute: 200
}
},
{
name: 'polygon',
type: 'rest',
baseUrl: 'https://api.polygon.io/v2',
apiKey: '',
rateLimits: {
maxRequestsPerMinute: 5
}
},
{
name: 'alpaca-websocket',
type: 'websocket',
wsUrl: 'wss://stream.data.alpaca.markets/v2/iex',
apiKey: '',
apiSecret: ''
export const dataProvidersConfig = cleanEnv(process.env, {
// Default Provider
DEFAULT_DATA_PROVIDER: str({
choices: ['alpaca', 'polygon', 'yahoo', 'iex'],
default: 'alpaca',
desc: 'Default data provider'
}),
// Alpaca Configuration
ALPACA_API_KEY: str({ default: '', desc: 'Alpaca API key' }),
ALPACA_API_SECRET: str({ default: '', desc: 'Alpaca API secret' }),
ALPACA_BASE_URL: str({ default: 'https://data.alpaca.markets/v1beta1', desc: 'Alpaca base URL' }),
ALPACA_RATE_LIMIT: num({ default: 200, desc: 'Alpaca rate limit per minute' }),
ALPACA_ENABLED: bool({ default: true, desc: 'Enable Alpaca provider' }),
// Polygon Configuration
POLYGON_API_KEY: str({ default: '', desc: 'Polygon API key' }),
POLYGON_BASE_URL: str({ default: 'https://api.polygon.io', desc: 'Polygon base URL' }),
POLYGON_RATE_LIMIT: num({ default: 5, desc: 'Polygon rate limit per minute' }),
POLYGON_ENABLED: bool({ default: false, desc: 'Enable Polygon provider' }),
// Yahoo Finance Configuration
YAHOO_BASE_URL: str({ default: 'https://query1.finance.yahoo.com', desc: 'Yahoo Finance base URL' }),
YAHOO_RATE_LIMIT: num({ default: 2000, desc: 'Yahoo Finance rate limit per hour' }),
YAHOO_ENABLED: bool({ default: true, desc: 'Enable Yahoo Finance provider' }),
// IEX Cloud Configuration
IEX_API_KEY: str({ default: '', desc: 'IEX Cloud API key' }),
IEX_BASE_URL: str({ default: 'https://cloud.iexapis.com/stable', desc: 'IEX Cloud base URL' }),
IEX_RATE_LIMIT: num({ default: 100, desc: 'IEX Cloud rate limit per second' }),
IEX_ENABLED: bool({ default: false, desc: 'Enable IEX Cloud provider' }),
// Connection Settings
DATA_PROVIDER_TIMEOUT: num({ default: 30000, desc: 'Request timeout in milliseconds' }),
DATA_PROVIDER_RETRIES: num({ default: 3, desc: 'Number of retry attempts' }),
DATA_PROVIDER_RETRY_DELAY: num({ default: 1000, desc: 'Retry delay in milliseconds' }),
// Cache Settings
DATA_CACHE_ENABLED: bool({ default: true, desc: 'Enable data caching' }),
DATA_CACHE_TTL: num({ default: 300000, desc: 'Cache TTL in milliseconds' }),
DATA_CACHE_MAX_SIZE: num({ default: 1000, desc: 'Maximum cache entries' }),
});
/**
* Helper function to get provider-specific configuration
*/
export function getProviderConfig(providerName: string) {
const name = providerName.toUpperCase();
switch (name) {
case 'ALPACA':
return {
name: 'alpaca',
type: 'rest' as const,
enabled: dataProvidersConfig.ALPACA_ENABLED,
baseUrl: dataProvidersConfig.ALPACA_BASE_URL,
apiKey: dataProvidersConfig.ALPACA_API_KEY,
apiSecret: dataProvidersConfig.ALPACA_API_SECRET,
rateLimits: {
maxRequestsPerMinute: dataProvidersConfig.ALPACA_RATE_LIMIT
}
};
case 'POLYGON':
return {
name: 'polygon',
type: 'rest' as const,
enabled: dataProvidersConfig.POLYGON_ENABLED,
baseUrl: dataProvidersConfig.POLYGON_BASE_URL,
apiKey: dataProvidersConfig.POLYGON_API_KEY,
rateLimits: {
maxRequestsPerMinute: dataProvidersConfig.POLYGON_RATE_LIMIT
}
};
case 'YAHOO':
return {
name: 'yahoo',
type: 'rest' as const,
enabled: dataProvidersConfig.YAHOO_ENABLED,
baseUrl: dataProvidersConfig.YAHOO_BASE_URL,
rateLimits: {
maxRequestsPerHour: dataProvidersConfig.YAHOO_RATE_LIMIT
}
};
case 'IEX':
return {
name: 'iex',
type: 'rest' as const,
enabled: dataProvidersConfig.IEX_ENABLED,
baseUrl: dataProvidersConfig.IEX_BASE_URL,
apiKey: dataProvidersConfig.IEX_API_KEY,
rateLimits: {
maxRequestsPerSecond: dataProvidersConfig.IEX_RATE_LIMIT
}
};
default:
throw new Error(`Unknown provider: ${providerName}`);
}
];
/**
* Load data provider configurations from environment variables
*/
export function loadDataProviderConfigs(): DataProvidersConfig {
// Get provider specific environment variables
const providers = defaultDataProviders.map(provider => {
const nameUpper = provider.name.toUpperCase().replace('-', '_');
const updatedProvider: DataProviderConfig = {
...provider,
apiKey: getEnvVar(`${nameUpper}_API_KEY`) || provider.apiKey || '',
};
if (provider.apiSecret !== undefined) {
updatedProvider.apiSecret = getEnvVar(`${nameUpper}_API_SECRET`) || provider.apiSecret || '';
}
return updatedProvider;
});
// Load default provider from environment
const defaultProvider = getEnvVar('DEFAULT_DATA_PROVIDER') || 'alpaca';
const config: DataProvidersConfig = {
providers,
defaultProvider
};
return validateConfig(config, dataProvidersConfigSchema);
}
/**
* Creates a dynamic configuration loader for data providers
* Get all enabled providers
*/
export const createDataProvidersConfig = createConfigLoader<DataProvidersConfig>(
'data-providers',
dataProvidersConfigSchema,
{
providers: defaultDataProviders,
defaultProvider: 'alpaca'
}
);
export function getEnabledProviders() {
const providers = ['alpaca', 'polygon', 'yahoo', 'iex'];
return providers
.map(provider => getProviderConfig(provider))
.filter(config => config.enabled);
}
/**
* Singleton data provider configurations
* Get the default provider configuration
*/
export const dataProviderConfigs = loadDataProviderConfigs();
export function getDefaultProvider() {
return getProviderConfig(dataProvidersConfig.DEFAULT_DATA_PROVIDER);
}
// Export typed configuration object
export type DataProvidersConfig = typeof dataProvidersConfig;
// Export individual config values for convenience
export const {
DEFAULT_DATA_PROVIDER,
ALPACA_API_KEY,
ALPACA_API_SECRET,
ALPACA_BASE_URL,
ALPACA_RATE_LIMIT,
ALPACA_ENABLED,
POLYGON_API_KEY,
POLYGON_BASE_URL,
POLYGON_RATE_LIMIT,
POLYGON_ENABLED,
YAHOO_BASE_URL,
YAHOO_RATE_LIMIT,
YAHOO_ENABLED,
IEX_API_KEY,
IEX_BASE_URL,
IEX_RATE_LIMIT,
IEX_ENABLED,
DATA_PROVIDER_TIMEOUT,
DATA_PROVIDER_RETRIES,
DATA_PROVIDER_RETRY_DELAY,
DATA_CACHE_ENABLED,
DATA_CACHE_TTL,
DATA_CACHE_MAX_SIZE,
} = dataProvidersConfig;

View file

@ -1,91 +1,54 @@
/**
* Database configuration for Stock Bot services
* Database configuration using envalid
*/
import { z } from 'zod';
import { getEnvVar, getNumericEnvVar, validateConfig, createConfigLoader } from './core';
import { databaseConfigSchema, DatabaseConfig } from './types';
import { cleanEnv, str, port, bool, num } from 'envalid';
/**
* Default database configuration
* Database configuration with validation and defaults
*/
const defaultDatabaseConfig: DatabaseConfig = {
dragonfly: {
host: 'localhost',
port: 6379,
maxRetriesPerRequest: 3
},
questDB: {
host: 'localhost',
port: 8812,
database: 'stockbot',
user: 'admin',
httpPort: 9000
},
mongodb: {
uri: 'mongodb://localhost:27017',
database: 'stockbot'
},
postgres: {
host: 'localhost',
port: 5432,
database: 'stockbot',
user: 'postgres',
poolSize: 10,
ssl: false
}
};
export const databaseConfig = cleanEnv(process.env, {
// PostgreSQL Configuration
DB_HOST: str({ default: 'localhost', desc: 'Database host' }),
DB_PORT: port({ default: 5432, desc: 'Database port' }),
DB_NAME: str({ default: 'stockbot', desc: 'Database name' }),
DB_USER: str({ default: 'stockbot', desc: 'Database user' }),
DB_PASSWORD: str({ default: '', desc: 'Database password' }),
// Connection Pool Settings
DB_POOL_MIN: num({ default: 2, desc: 'Minimum pool connections' }),
DB_POOL_MAX: num({ default: 10, desc: 'Maximum pool connections' }),
DB_POOL_IDLE_TIMEOUT: num({ default: 30000, desc: 'Pool idle timeout in ms' }),
// SSL Configuration
DB_SSL: bool({ default: false, desc: 'Enable SSL for database connection' }),
DB_SSL_REJECT_UNAUTHORIZED: bool({ default: true, desc: 'Reject unauthorized SSL certificates' }),
// Additional Settings
DB_QUERY_TIMEOUT: num({ default: 30000, desc: 'Query timeout in ms' }),
DB_CONNECTION_TIMEOUT: num({ default: 5000, desc: 'Connection timeout in ms' }),
DB_STATEMENT_TIMEOUT: num({ default: 30000, desc: 'Statement timeout in ms' }),
DB_LOCK_TIMEOUT: num({ default: 10000, desc: 'Lock timeout in ms' }),
DB_IDLE_IN_TRANSACTION_SESSION_TIMEOUT: num({ default: 60000, desc: 'Idle in transaction timeout in ms' }),
});
/**
* Load database configuration from environment variables
*/
export function loadDatabaseConfig(): DatabaseConfig {
const config = {
dragonfly: {
host: getEnvVar('DRAGONFLY_HOST') || defaultDatabaseConfig.dragonfly.host,
port: getNumericEnvVar('DRAGONFLY_PORT', defaultDatabaseConfig.dragonfly.port),
password: getEnvVar('DRAGONFLY_PASSWORD'),
maxRetriesPerRequest: getNumericEnvVar('DRAGONFLY_MAX_RETRIES_PER_REQUEST',
defaultDatabaseConfig.dragonfly.maxRetriesPerRequest)
},
questDB: {
host: getEnvVar('QUESTDB_HOST') || defaultDatabaseConfig.questDB.host,
port: getNumericEnvVar('QUESTDB_PORT', defaultDatabaseConfig.questDB.port),
database: getEnvVar('QUESTDB_DB') || defaultDatabaseConfig.questDB.database,
user: getEnvVar('QUESTDB_USER') || defaultDatabaseConfig.questDB.user,
password: getEnvVar('QUESTDB_PASSWORD'),
httpPort: getNumericEnvVar('QUESTDB_HTTP_PORT', defaultDatabaseConfig.questDB.httpPort)
},
mongodb: {
uri: getEnvVar('MONGODB_URI') || defaultDatabaseConfig.mongodb.uri,
database: getEnvVar('MONGODB_DATABASE') || defaultDatabaseConfig.mongodb.database,
username: getEnvVar('MONGODB_USERNAME'),
password: getEnvVar('MONGODB_PASSWORD'),
options: process.env.MONGODB_OPTIONS ? JSON.parse(process.env.MONGODB_OPTIONS) : undefined
},
postgres: {
host: getEnvVar('POSTGRES_HOST') || defaultDatabaseConfig.postgres.host,
port: getNumericEnvVar('POSTGRES_PORT', defaultDatabaseConfig.postgres.port),
database: getEnvVar('POSTGRES_DB') || defaultDatabaseConfig.postgres.database,
user: getEnvVar('POSTGRES_USER') || defaultDatabaseConfig.postgres.user,
password: getEnvVar('POSTGRES_PASSWORD'),
ssl: process.env.POSTGRES_SSL === 'true',
poolSize: getNumericEnvVar('POSTGRES_POOL_SIZE', defaultDatabaseConfig.postgres.poolSize)
}
};
// Export typed configuration object
export type DatabaseConfig = typeof databaseConfig;
return validateConfig(config, databaseConfigSchema);
}
/**
* Creates a dynamic configuration loader for database config
*/
export const createDatabaseConfig = createConfigLoader<typeof defaultDatabaseConfig>(
'database',
databaseConfigSchema,
defaultDatabaseConfig
);
/**
* Singleton database configuration
*/
export const databaseConfig = loadDatabaseConfig();
// Export individual config values for convenience
export const {
DB_HOST,
DB_PORT,
DB_NAME,
DB_USER,
DB_PASSWORD,
DB_POOL_MIN,
DB_POOL_MAX,
DB_POOL_IDLE_TIMEOUT,
DB_SSL,
DB_SSL_REJECT_UNAUTHORIZED,
DB_QUERY_TIMEOUT,
DB_CONNECTION_TIMEOUT,
DB_STATEMENT_TIMEOUT,
DB_LOCK_TIMEOUT,
DB_IDLE_IN_TRANSACTION_SESSION_TIMEOUT,
} = databaseConfig;

View file

@ -0,0 +1,79 @@
/**
* Dragonfly (Redis replacement) configuration using envalid
* High-performance caching and event streaming
*/
import { cleanEnv, str, port, bool, num } from 'envalid';
/**
* Dragonfly configuration with validation and defaults
*/
export const dragonflyConfig = cleanEnv(process.env, {
// Dragonfly Connection
DRAGONFLY_HOST: str({ default: 'localhost', desc: 'Dragonfly host' }),
DRAGONFLY_PORT: port({ default: 6379, desc: 'Dragonfly port' }),
DRAGONFLY_PASSWORD: str({ default: '', desc: 'Dragonfly password (if auth enabled)' }),
DRAGONFLY_USERNAME: str({ default: '', desc: 'Dragonfly username (if ACL enabled)' }),
// Database Selection
DRAGONFLY_DATABASE: num({ default: 0, desc: 'Dragonfly database number (0-15)' }),
// Connection Pool Settings
DRAGONFLY_MAX_RETRIES: num({ default: 3, desc: 'Maximum retry attempts' }),
DRAGONFLY_RETRY_DELAY: num({ default: 50, desc: 'Retry delay in ms' }),
DRAGONFLY_CONNECT_TIMEOUT: num({ default: 10000, desc: 'Connection timeout in ms' }),
DRAGONFLY_COMMAND_TIMEOUT: num({ default: 5000, desc: 'Command timeout in ms' }),
// Pool Configuration
DRAGONFLY_POOL_SIZE: num({ default: 10, desc: 'Connection pool size' }),
DRAGONFLY_POOL_MIN: num({ default: 1, desc: 'Minimum pool connections' }),
DRAGONFLY_POOL_MAX: num({ default: 20, desc: 'Maximum pool connections' }),
// TLS Settings
DRAGONFLY_TLS: bool({ default: false, desc: 'Enable TLS for Dragonfly connection' }),
DRAGONFLY_TLS_CERT_FILE: str({ default: '', desc: 'Path to TLS certificate file' }),
DRAGONFLY_TLS_KEY_FILE: str({ default: '', desc: 'Path to TLS key file' }),
DRAGONFLY_TLS_CA_FILE: str({ default: '', desc: 'Path to TLS CA certificate file' }),
DRAGONFLY_TLS_SKIP_VERIFY: bool({ default: false, desc: 'Skip TLS certificate verification' }),
// Performance Settings
DRAGONFLY_ENABLE_KEEPALIVE: bool({ default: true, desc: 'Enable TCP keepalive' }),
DRAGONFLY_KEEPALIVE_INTERVAL: num({ default: 60, desc: 'Keepalive interval in seconds' }),
// Clustering (if using cluster mode)
DRAGONFLY_CLUSTER_MODE: bool({ default: false, desc: 'Enable cluster mode' }),
DRAGONFLY_CLUSTER_NODES: str({ default: '', desc: 'Comma-separated list of cluster nodes (host:port)' }),
// Memory and Cache Settings
DRAGONFLY_MAX_MEMORY: str({ default: '2gb', desc: 'Maximum memory usage' }),
DRAGONFLY_CACHE_MODE: bool({ default: true, desc: 'Enable cache mode' }),
});
// Export typed configuration object
export type DragonflyConfig = typeof dragonflyConfig;
// Export individual config values for convenience
export const {
DRAGONFLY_HOST,
DRAGONFLY_PORT,
DRAGONFLY_PASSWORD,
DRAGONFLY_USERNAME,
DRAGONFLY_DATABASE,
DRAGONFLY_MAX_RETRIES,
DRAGONFLY_RETRY_DELAY,
DRAGONFLY_CONNECT_TIMEOUT,
DRAGONFLY_COMMAND_TIMEOUT,
DRAGONFLY_POOL_SIZE,
DRAGONFLY_POOL_MIN,
DRAGONFLY_POOL_MAX,
DRAGONFLY_TLS,
DRAGONFLY_TLS_CERT_FILE,
DRAGONFLY_TLS_KEY_FILE,
DRAGONFLY_TLS_CA_FILE,
DRAGONFLY_TLS_SKIP_VERIFY,
DRAGONFLY_ENABLE_KEEPALIVE,
DRAGONFLY_KEEPALIVE_INTERVAL,
DRAGONFLY_CLUSTER_MODE,
DRAGONFLY_CLUSTER_NODES,
DRAGONFLY_MAX_MEMORY,
DRAGONFLY_CACHE_MODE,
} = dragonflyConfig;

View file

@ -1,81 +1,818 @@
/**
* Example usage of the @stock-bot/config library
* Example usage of the Stock Bot configuration library
*
* This file demonstrates how to use the envalid-based configuration
* system for various services in the Stock Bot platform.
*/
// Import all the configuration modules
import {
databaseConfig,
dataProviderConfigs,
riskConfig,
Environment,
// Core utilities
loadEnvVariables,
getEnvironment,
marketDataGatewayConfig,
riskGuardianConfig,
Environment,
ConfigurationError,
validateConfig
} from './index';
} from './core';
import {
// Database configuration
databaseConfig,
DatabaseConfig,
DB_HOST,
DB_PORT,
DB_NAME,
DB_USER,
DB_PASSWORD,
} from './database';
import {
// QuestDB configuration
questdbConfig,
QuestDbConfig,
QUESTDB_HOST,
QUESTDB_HTTP_PORT,
QUESTDB_PG_PORT,
} from './questdb';
import {
// MongoDB configuration
mongodbConfig,
MongoDbConfig,
MONGODB_HOST,
MONGODB_PORT,
MONGODB_DATABASE,
MONGODB_USERNAME,
} from './mongodb';
import {
// Dragonfly configuration
dragonflyConfig,
DragonflyConfig,
DRAGONFLY_HOST,
DRAGONFLY_PORT,
DRAGONFLY_DATABASE,
} from './dragonfly';
import {
// Monitoring configuration
prometheusConfig,
grafanaConfig,
PrometheusConfig,
GrafanaConfig,
PROMETHEUS_HOST,
PROMETHEUS_PORT,
GRAFANA_HOST,
GRAFANA_PORT,
} from './monitoring';
import {
// Loki configuration
lokiConfig,
LokiConfig,
LOKI_HOST,
LOKI_PORT,
LOKI_SERVICE_LABEL,
LOKI_BATCH_SIZE,
} from './loki';
import {
// Logging configuration
loggingConfig,
LoggingConfig,
LOG_LEVEL,
LOG_FORMAT,
LOG_CONSOLE,
LOG_FILE,
LOG_SERVICE_NAME,
} from './logging';
import {
// Risk management configuration
riskConfig,
RiskConfig,
RISK_MAX_POSITION_SIZE,
RISK_DEFAULT_STOP_LOSS,
RISK_CIRCUIT_BREAKER_ENABLED,
} from './risk';
import {
// Data provider configuration
dataProvidersConfig,
DataProvidersConfig,
getProviderConfig,
getEnabledProviders,
getDefaultProvider,
DEFAULT_DATA_PROVIDER,
ALPACA_API_KEY,
} from './data-providers';
/**
* Display current configuration values
* Example 1: Basic usage - Load environment variables and get configuration
*/
export function printCurrentConfig(): void {
console.log('\n=== Stock Bot Configuration ===');
function basicUsageExample() {
console.log('=== Basic Usage Example ===');
console.log('\nEnvironment:', getEnvironment());
console.log('\n--- Database Config ---');
console.log('Dragonfly Host:', databaseConfig.dragonfly.host);
console.log('Dragonfly Port:', databaseConfig.dragonfly.port);
console.log('QuestDB Host:', databaseConfig.questDB.host);
console.log('QuestDB Database:', databaseConfig.questDB.database);
console.log('MongoDB URI:', databaseConfig.mongodb.uri);
console.log('MongoDB Database:', databaseConfig.mongodb.database);
console.log('PostgreSQL Host:', databaseConfig.postgres.host);
console.log('PostgreSQL Database:', databaseConfig.postgres.database);
console.log('\n--- Data Provider Config ---');
console.log('Default Provider:', dataProviderConfigs.defaultProvider);
console.log('Providers:');
dataProviderConfigs.providers.forEach((provider: {
name: string;
type: string;
baseUrl?: string;
wsUrl?: string;
}) => {
console.log(` - ${provider.name} (${provider.type})`);
if (provider.baseUrl) console.log(` URL: ${provider.baseUrl}`);
if (provider.wsUrl) console.log(` WebSocket: ${provider.wsUrl}`);
// Load environment variables (optional - they're loaded automatically)
loadEnvVariables();
// Get the current environment
const env = getEnvironment();
console.log(`Current environment: ${env}`);
// Access individual configuration values
console.log(`Database host: ${DB_HOST}`);
console.log(`Database port: ${DB_PORT}`);
console.log(`Log level: ${LOG_LEVEL}`);
// Access full configuration objects
console.log(`Full database config:`, {
host: databaseConfig.DB_HOST,
port: databaseConfig.DB_PORT,
name: databaseConfig.DB_NAME,
ssl: databaseConfig.DB_SSL,
});
console.log('\n--- Risk Config ---');
console.log('Max Drawdown:', riskConfig.maxDrawdown * 100, '%');
console.log('Max Position Size:', riskConfig.maxPositionSize * 100, '%');
console.log('Max Leverage:', riskConfig.maxLeverage, 'x');
console.log('Default Stop Loss:', riskConfig.stopLossDefault * 100, '%');
console.log('Default Take Profit:', riskConfig.takeProfitDefault * 100, '%');
console.log('\n--- Market Data Gateway Config ---');
console.log('Service Port:', marketDataGatewayConfig.service.port);
console.log('WebSocket Enabled:', marketDataGatewayConfig.websocket.enabled);
console.log('WebSocket Path:', marketDataGatewayConfig.websocket.path);
console.log('Caching Enabled:', marketDataGatewayConfig.caching.enabled);
console.log('Caching TTL:', marketDataGatewayConfig.caching.ttlSeconds, 'seconds');
console.log('\n--- Risk Guardian Config ---');
console.log('Service Port:', riskGuardianConfig.service.port);
console.log('Pre-Trade Validation:', riskGuardianConfig.riskChecks.preTradeValidation);
console.log('Portfolio Validation:', riskGuardianConfig.riskChecks.portfolioValidation);
console.log('Alerting Enabled:', riskGuardianConfig.alerting.enabled);
console.log('Critical Threshold:', riskGuardianConfig.alerting.criticalThreshold * 100, '%');
}
// Execute example if this file is run directly
if (require.main === module) { try {
printCurrentConfig();
} catch (error: unknown) {
if (error instanceof ConfigurationError) {
console.error('Configuration Error:', error.message);
} else if (error instanceof Error) {
console.error('Error:', error.message);
} else {
console.error('Unknown error:', error);
}
process.exit(1);
/**
* Example 2: Using configuration in a database connection
*/
async function databaseConnectionExample() {
console.log('=== Database Connection Example ===');
try {
// Use the database configuration to create a connection string
const connectionString = `postgresql://${DB_USER}:${DB_PASSWORD}@${DB_HOST}:${DB_PORT}/${DB_NAME}`;
console.log('Database connection settings:');
console.log(`- Host: ${databaseConfig.DB_HOST}`);
console.log(`- Port: ${databaseConfig.DB_PORT}`);
console.log(`- Database: ${databaseConfig.DB_NAME}`);
console.log(`- SSL enabled: ${databaseConfig.DB_SSL}`);
console.log(`- Pool max connections: ${databaseConfig.DB_POOL_MAX}`);
console.log(`- Query timeout: ${databaseConfig.DB_QUERY_TIMEOUT}ms`);
// Example pool configuration
const poolConfig = {
host: databaseConfig.DB_HOST,
port: databaseConfig.DB_PORT,
database: databaseConfig.DB_NAME,
user: databaseConfig.DB_USER,
password: databaseConfig.DB_PASSWORD,
ssl: databaseConfig.DB_SSL,
min: databaseConfig.DB_POOL_MIN,
max: databaseConfig.DB_POOL_MAX,
idleTimeoutMillis: databaseConfig.DB_POOL_IDLE_TIMEOUT,
};
console.log('Pool configuration:', poolConfig);
} catch (error) {
console.error('Database configuration error:', error);
}
}
/**
* Example 3: Logging setup example
*/
function loggingSetupExample() {
console.log('=== Logging Setup Example ===');
// Access logging configuration
console.log('Logging settings:');
console.log(`- Level: ${loggingConfig.LOG_LEVEL}`);
console.log(`- Format: ${loggingConfig.LOG_FORMAT}`);
console.log(`- Console enabled: ${loggingConfig.LOG_CONSOLE}`);
console.log(`- File logging: ${loggingConfig.LOG_FILE}`);
console.log(`- Service name: ${loggingConfig.LOG_SERVICE_NAME}`);
// Example logger configuration
const loggerConfig = {
level: loggingConfig.LOG_LEVEL,
format: loggingConfig.LOG_FORMAT,
transports: [] as any[],
defaultMeta: {
service: loggingConfig.LOG_SERVICE_NAME,
version: loggingConfig.LOG_SERVICE_VERSION,
environment: loggingConfig.LOG_ENVIRONMENT,
},
};
if (loggingConfig.LOG_CONSOLE) {
loggerConfig.transports.push({
type: 'console',
format: loggingConfig.LOG_FORMAT,
timestamp: loggingConfig.LOG_TIMESTAMP,
});
}
if (loggingConfig.LOG_FILE) {
loggerConfig.transports.push({
type: 'file',
filename: `${loggingConfig.LOG_FILE_PATH}/application.log`,
maxSize: loggingConfig.LOG_FILE_MAX_SIZE,
maxFiles: loggingConfig.LOG_FILE_MAX_FILES,
datePattern: loggingConfig.LOG_FILE_DATE_PATTERN,
});
}
// Example Loki transport configuration
if (lokiConfig.LOKI_HOST) {
loggerConfig.transports.push({
type: 'loki',
host: lokiConfig.LOKI_HOST,
port: lokiConfig.LOKI_PORT,
batchSize: lokiConfig.LOKI_BATCH_SIZE,
labels: {
service: lokiConfig.LOKI_SERVICE_LABEL,
environment: lokiConfig.LOKI_ENVIRONMENT_LABEL,
},
});
}
console.log('Logger configuration:', loggerConfig);
}
/**
* Example 4: Risk management configuration
*/
function riskManagementExample() {
console.log('=== Risk Management Example ===');
// Access risk configuration
console.log('Risk management settings:');
console.log(`- Max position size: ${RISK_MAX_POSITION_SIZE * 100}%`);
console.log(`- Default stop loss: ${RISK_DEFAULT_STOP_LOSS * 100}%`);
console.log(`- Circuit breaker enabled: ${RISK_CIRCUIT_BREAKER_ENABLED}`);
console.log(`- Max leverage: ${riskConfig.RISK_MAX_LEVERAGE}x`);
// Example risk calculator
function calculatePositionSize(portfolioValue: number, riskPerTrade: number = RISK_DEFAULT_STOP_LOSS) {
const maxPositionValue = portfolioValue * RISK_MAX_POSITION_SIZE;
const riskAmount = portfolioValue * riskPerTrade;
return {
maxPositionValue,
riskAmount,
maxShares: Math.floor(maxPositionValue / 100), // Assuming $100 per share
};
}
const portfolioValue = 100000; // $100k portfolio
const position = calculatePositionSize(portfolioValue);
console.log(`Position sizing for $${portfolioValue} portfolio:`, position);
}
/**
* Example 5: Data provider configuration
*/
function dataProviderExample() {
console.log('=== Data Provider Example ===');
// Get the default provider
const defaultProvider = getDefaultProvider();
console.log('Default provider:', defaultProvider);
// Get all enabled providers
const enabledProviders = getEnabledProviders();
console.log('Enabled providers:', enabledProviders.map(p => p.name));
// Get specific provider configuration
try {
const alpacaConfig = getProviderConfig('alpaca');
console.log('Alpaca configuration:', {
enabled: alpacaConfig.enabled,
baseUrl: alpacaConfig.baseUrl,
hasApiKey: !!alpacaConfig.apiKey,
rateLimit: alpacaConfig.rateLimits,
});
} catch (error) {
console.error('Error getting Alpaca config:', error);
}
// Example API client setup
const apiClients = enabledProviders.map(provider => ({
name: provider.name,
client: {
baseURL: provider.baseUrl,
timeout: dataProvidersConfig.DATA_PROVIDER_TIMEOUT,
retries: dataProvidersConfig.DATA_PROVIDER_RETRIES,
retryDelay: dataProvidersConfig.DATA_PROVIDER_RETRY_DELAY,
headers: provider.apiKey ? {
'Authorization': `Bearer ${provider.apiKey}`
} : {},
}
}));
console.log('API clients configuration:', apiClients);
}
/**
* Example 6: Environment-specific configuration
*/
function environmentSpecificExample() {
console.log('=== Environment-Specific Example ===');
const env = getEnvironment();
switch (env) {
case Environment.Development:
console.log('Development environment detected');
console.log('- Using local database');
console.log('- Verbose logging enabled');
console.log('- Paper trading mode');
break;
case Environment.Testing:
console.log('Testing environment detected');
console.log('- Using test database');
console.log('- Structured logging');
console.log('- Mock data providers');
break;
case Environment.Staging:
console.log('Staging environment detected');
console.log('- Using staging database');
console.log('- Production-like settings');
console.log('- Real data providers (limited)');
break;
case Environment.Production:
console.log('Production environment detected');
console.log('- Using production database');
console.log('- Optimized logging');
console.log('- Live trading enabled');
break;
}
// Example of environment-specific behavior
const isProduction = env === Environment.Production;
const tradingMode = isProduction ? 'live' : 'paper';
const logLevel = isProduction ? 'info' : 'debug';
console.log(`Trading mode: ${tradingMode}`);
console.log(`Recommended log level: ${logLevel}`);
}
/**
* Example 7: Configuration validation and error handling
*/
function configurationValidationExample() {
console.log('=== Configuration Validation Example ===');
try {
// Check required configurations
if (!ALPACA_API_KEY && DEFAULT_DATA_PROVIDER === 'alpaca') {
throw new ConfigurationError('Alpaca API key is required when using Alpaca as default provider');
}
// Validate risk settings
if (RISK_MAX_POSITION_SIZE > 1.0) {
throw new ConfigurationError('Maximum position size cannot exceed 100%');
}
if (riskConfig.RISK_DEFAULT_STOP_LOSS > riskConfig.RISK_DEFAULT_TAKE_PROFIT) {
console.warn('Warning: Stop loss is greater than take profit - check your risk settings');
}
// Validate database connection settings
if (databaseConfig.DB_POOL_MAX < databaseConfig.DB_POOL_MIN) {
throw new ConfigurationError('Database max pool size must be greater than min pool size');
}
console.log('✅ All configuration validations passed');
} catch (error) {
if (error instanceof ConfigurationError) {
console.error('❌ Configuration error:', error.message);
} else {
console.error('❌ Unexpected error:', error);
}
}
}
/**
* Example 8: QuestDB time-series database configuration
*/
function questdbConfigurationExample() {
console.log('=== QuestDB Configuration Example ===');
// Access QuestDB configuration
console.log('QuestDB settings:');
console.log(`- Host: ${questdbConfig.QUESTDB_HOST}`);
console.log(`- HTTP port (web console): ${questdbConfig.QUESTDB_HTTP_PORT}`);
console.log(`- PostgreSQL port: ${questdbConfig.QUESTDB_PG_PORT}`);
console.log(`- InfluxDB port: ${questdbConfig.QUESTDB_INFLUX_PORT}`);
console.log(`- TLS enabled: ${questdbConfig.QUESTDB_TLS_ENABLED}`);
// Example QuestDB client configuration
const questdbClientConfig = {
http: {
host: questdbConfig.QUESTDB_HOST,
port: questdbConfig.QUESTDB_HTTP_PORT,
tls: questdbConfig.QUESTDB_TLS_ENABLED,
timeout: questdbConfig.QUESTDB_REQUEST_TIMEOUT,
},
postgresql: {
host: questdbConfig.QUESTDB_HOST,
port: questdbConfig.QUESTDB_PG_PORT,
database: questdbConfig.QUESTDB_DEFAULT_DATABASE,
user: questdbConfig.QUESTDB_USER,
password: questdbConfig.QUESTDB_PASSWORD,
},
influxdb: {
host: questdbConfig.QUESTDB_HOST,
port: questdbConfig.QUESTDB_INFLUX_PORT,
}
};
console.log('QuestDB client configuration:', questdbClientConfig);
// Example time-series table creation
const createTableQuery = `
CREATE TABLE IF NOT EXISTS ohlcv_data (
timestamp TIMESTAMP,
symbol SYMBOL,
open DOUBLE,
high DOUBLE,
low DOUBLE,
close DOUBLE,
volume LONG
) timestamp(timestamp) PARTITION BY DAY;
`;
console.log('Example table creation query:', createTableQuery);
}
/**
* Example 9: MongoDB document database configuration
*/
function mongodbConfigurationExample() {
console.log('=== MongoDB Configuration Example ===');
// Access MongoDB configuration
console.log('MongoDB settings:');
console.log(`- Host: ${mongodbConfig.MONGODB_HOST}`);
console.log(`- Port: ${mongodbConfig.MONGODB_PORT}`);
console.log(`- Database: ${mongodbConfig.MONGODB_DATABASE}`);
console.log(`- Username: ${mongodbConfig.MONGODB_USERNAME}`);
console.log(`- TLS enabled: ${mongodbConfig.MONGODB_TLS}`);
console.log(`- Max pool size: ${mongodbConfig.MONGODB_MAX_POOL_SIZE}`);
// Build connection URI
const buildMongoUri = () => {
if (mongodbConfig.MONGODB_URI) {
return mongodbConfig.MONGODB_URI;
}
const auth = mongodbConfig.MONGODB_USERNAME && mongodbConfig.MONGODB_PASSWORD
? `${mongodbConfig.MONGODB_USERNAME}:${mongodbConfig.MONGODB_PASSWORD}@`
: '';
const tls = mongodbConfig.MONGODB_TLS ? '?tls=true' : '';
return `mongodb://${auth}${mongodbConfig.MONGODB_HOST}:${mongodbConfig.MONGODB_PORT}/${mongodbConfig.MONGODB_DATABASE}${tls}`;
};
const mongoUri = buildMongoUri();
console.log('MongoDB connection URI:', mongoUri.replace(/:[^:@]*@/, ':***@')); // Hide password
// Example MongoDB client configuration
const mongoClientConfig = {
maxPoolSize: mongodbConfig.MONGODB_MAX_POOL_SIZE,
minPoolSize: mongodbConfig.MONGODB_MIN_POOL_SIZE,
maxIdleTimeMS: mongodbConfig.MONGODB_MAX_IDLE_TIME,
connectTimeoutMS: mongodbConfig.MONGODB_CONNECT_TIMEOUT,
socketTimeoutMS: mongodbConfig.MONGODB_SOCKET_TIMEOUT,
serverSelectionTimeoutMS: mongodbConfig.MONGODB_SERVER_SELECTION_TIMEOUT,
retryWrites: mongodbConfig.MONGODB_RETRY_WRITES,
w: mongodbConfig.MONGODB_WRITE_CONCERN,
readPreference: mongodbConfig.MONGODB_READ_PREFERENCE,
};
console.log('MongoDB client configuration:', mongoClientConfig);
// Example collections structure
const collections = [
'sentiment_data', // News sentiment analysis
'market_news', // Raw news articles
'social_signals', // Social media signals
'earnings_reports', // Earnings data
'analyst_ratings', // Analyst recommendations
];
console.log('Example collections:', collections);
}
/**
* Example 10: Dragonfly (Redis replacement) configuration
*/
function dragonflyConfigurationExample() {
console.log('=== Dragonfly Configuration Example ===');
// Access Dragonfly configuration
console.log('Dragonfly settings:');
console.log(`- Host: ${dragonflyConfig.DRAGONFLY_HOST}`);
console.log(`- Port: ${dragonflyConfig.DRAGONFLY_PORT}`);
console.log(`- Database: ${dragonflyConfig.DRAGONFLY_DATABASE}`);
console.log(`- Cache mode: ${dragonflyConfig.DRAGONFLY_CACHE_MODE}`);
console.log(`- Max memory: ${dragonflyConfig.DRAGONFLY_MAX_MEMORY}`);
console.log(`- Pool size: ${dragonflyConfig.DRAGONFLY_POOL_SIZE}`);
// Example Dragonfly client configuration
const dragonflyClientConfig = {
host: dragonflyConfig.DRAGONFLY_HOST,
port: dragonflyConfig.DRAGONFLY_PORT,
db: dragonflyConfig.DRAGONFLY_DATABASE,
password: dragonflyConfig.DRAGONFLY_PASSWORD || undefined,
username: dragonflyConfig.DRAGONFLY_USERNAME || undefined,
retryDelayOnFailover: dragonflyConfig.DRAGONFLY_RETRY_DELAY,
maxRetriesPerRequest: dragonflyConfig.DRAGONFLY_MAX_RETRIES,
connectTimeout: dragonflyConfig.DRAGONFLY_CONNECT_TIMEOUT,
commandTimeout: dragonflyConfig.DRAGONFLY_COMMAND_TIMEOUT,
enableAutoPipelining: true,
};
console.log('Dragonfly client configuration:', dragonflyClientConfig);
// Example cache key patterns
const cachePatterns = {
marketData: 'market:{symbol}:{timeframe}',
indicators: 'indicators:{symbol}:{indicator}:{period}',
positions: 'positions:{account_id}',
orders: 'orders:{order_id}',
rateLimit: 'rate_limit:{provider}:{endpoint}',
sessions: 'session:{user_id}',
};
console.log('Example cache key patterns:', cachePatterns);
// Example TTL configurations
const ttlConfigs = {
marketData: 60, // 1 minute for real-time data
indicators: 300, // 5 minutes for calculated indicators
positions: 30, // 30 seconds for positions
orders: 86400, // 1 day for order history
rateLimit: 3600, // 1 hour for rate limiting
sessions: 1800, // 30 minutes for user sessions
};
console.log('Example TTL configurations (seconds):', ttlConfigs);
}
/**
* Example 11: Monitoring stack configuration (Prometheus, Grafana, Loki)
*/
function monitoringConfigurationExample() {
console.log('=== Monitoring Configuration Example ===');
// Prometheus configuration
console.log('Prometheus settings:');
console.log(`- Host: ${prometheusConfig.PROMETHEUS_HOST}`);
console.log(`- Port: ${prometheusConfig.PROMETHEUS_PORT}`);
console.log(`- Scrape interval: ${prometheusConfig.PROMETHEUS_SCRAPE_INTERVAL}`);
console.log(`- Retention time: ${prometheusConfig.PROMETHEUS_RETENTION_TIME}`);
// Grafana configuration
console.log('\nGrafana settings:');
console.log(`- Host: ${grafanaConfig.GRAFANA_HOST}`);
console.log(`- Port: ${grafanaConfig.GRAFANA_PORT}`);
console.log(`- Admin user: ${grafanaConfig.GRAFANA_ADMIN_USER}`);
console.log(`- Allow sign up: ${grafanaConfig.GRAFANA_ALLOW_SIGN_UP}`);
console.log(`- Database type: ${grafanaConfig.GRAFANA_DATABASE_TYPE}`);
// Loki configuration
console.log('\nLoki settings:');
console.log(`- Host: ${lokiConfig.LOKI_HOST}`);
console.log(`- Port: ${lokiConfig.LOKI_PORT}`);
console.log(`- Batch size: ${lokiConfig.LOKI_BATCH_SIZE}`);
console.log(`- Retention period: ${lokiConfig.LOKI_RETENTION_PERIOD}`);
// Example monitoring endpoints
const monitoringEndpoints = {
prometheus: `http://${prometheusConfig.PROMETHEUS_HOST}:${prometheusConfig.PROMETHEUS_PORT}`,
grafana: `http://${grafanaConfig.GRAFANA_HOST}:${grafanaConfig.GRAFANA_PORT}`,
loki: `http://${lokiConfig.LOKI_HOST}:${lokiConfig.LOKI_PORT}`,
};
console.log('\nMonitoring endpoints:', monitoringEndpoints);
// Example metrics configuration
const metricsConfig = {
defaultLabels: {
service: 'stock-bot',
environment: getEnvironment(),
version: process.env.npm_package_version || '1.0.0',
},
collectDefaultMetrics: true,
prefix: 'stockbot_',
buckets: [0.1, 0.5, 1, 2, 5, 10, 30, 60], // Response time buckets in seconds
};
console.log('Example metrics configuration:', metricsConfig);
}
/**
* Example 12: Multi-database service configuration
*/
function multiDatabaseServiceExample() {
console.log('=== Multi-Database Service Example ===');
// Complete database configuration for a microservice
const serviceConfig = {
service: {
name: 'market-data-processor',
version: '1.0.0',
environment: getEnvironment(),
},
// PostgreSQL for operational data
postgresql: {
host: databaseConfig.DB_HOST,
port: databaseConfig.DB_PORT,
database: databaseConfig.DB_NAME,
username: databaseConfig.DB_USER,
password: databaseConfig.DB_PASSWORD,
ssl: databaseConfig.DB_SSL,
pool: {
min: databaseConfig.DB_POOL_MIN,
max: databaseConfig.DB_POOL_MAX,
idleTimeout: databaseConfig.DB_POOL_IDLE_TIMEOUT,
},
},
// QuestDB for time-series data
questdb: {
host: questdbConfig.QUESTDB_HOST,
httpPort: questdbConfig.QUESTDB_HTTP_PORT,
pgPort: questdbConfig.QUESTDB_PG_PORT,
database: questdbConfig.QUESTDB_DEFAULT_DATABASE,
timeout: questdbConfig.QUESTDB_REQUEST_TIMEOUT,
},
// MongoDB for document storage
mongodb: {
host: mongodbConfig.MONGODB_HOST,
port: mongodbConfig.MONGODB_PORT,
database: mongodbConfig.MONGODB_DATABASE,
username: mongodbConfig.MONGODB_USERNAME,
maxPoolSize: mongodbConfig.MONGODB_MAX_POOL_SIZE,
readPreference: mongodbConfig.MONGODB_READ_PREFERENCE,
},
// Dragonfly for caching
dragonfly: {
host: dragonflyConfig.DRAGONFLY_HOST,
port: dragonflyConfig.DRAGONFLY_PORT,
database: dragonflyConfig.DRAGONFLY_DATABASE,
poolSize: dragonflyConfig.DRAGONFLY_POOL_SIZE,
commandTimeout: dragonflyConfig.DRAGONFLY_COMMAND_TIMEOUT,
},
// Monitoring
monitoring: {
prometheus: {
pushGateway: `http://${prometheusConfig.PROMETHEUS_HOST}:${prometheusConfig.PROMETHEUS_PORT}`,
scrapeInterval: prometheusConfig.PROMETHEUS_SCRAPE_INTERVAL,
}, loki: {
host: lokiConfig.LOKI_HOST,
port: lokiConfig.LOKI_PORT,
batchSize: lokiConfig.LOKI_BATCH_SIZE,
labels: {
service: 'market-data-processor',
environment: getEnvironment(),
},
},
},
};
console.log('Complete service configuration:', JSON.stringify(serviceConfig, null, 2));
// Example data flow
const dataFlow = {
ingestion: 'Market data → Dragonfly (cache) → QuestDB (storage)',
processing: 'QuestDB → Analysis → PostgreSQL (results) → MongoDB (metadata)',
serving: 'Dragonfly (cache) ← PostgreSQL/QuestDB ← API requests',
monitoring: 'All services → Prometheus → Grafana dashboards',
logging: 'All services → Loki → Grafana log viewer',
};
console.log('\nData flow patterns:', dataFlow);
}
/**
* Example 8: Creating a service configuration object
*/
function serviceConfigurationExample() {
console.log('=== Service Configuration Example ===');
// Example: Market Data Gateway service configuration
const marketDataGatewayConfig = {
service: {
name: 'market-data-gateway',
port: 3001,
environment: getEnvironment(),
},
database: {
host: databaseConfig.DB_HOST,
port: databaseConfig.DB_PORT,
name: databaseConfig.DB_NAME,
ssl: databaseConfig.DB_SSL,
}, logging: {
level: loggingConfig.LOG_LEVEL,
console: loggingConfig.LOG_CONSOLE,
loki: {
host: lokiConfig.LOKI_HOST,
port: lokiConfig.LOKI_PORT,
labels: {
service: 'market-data-gateway',
environment: getEnvironment(),
}
}
},
dataProviders: {
default: DEFAULT_DATA_PROVIDER,
enabled: getEnabledProviders(),
timeout: dataProvidersConfig.DATA_PROVIDER_TIMEOUT,
retries: dataProvidersConfig.DATA_PROVIDER_RETRIES,
},
cache: {
enabled: dataProvidersConfig.DATA_CACHE_ENABLED,
ttl: dataProvidersConfig.DATA_CACHE_TTL,
maxSize: dataProvidersConfig.DATA_CACHE_MAX_SIZE,
}
};
console.log('Market Data Gateway configuration:', JSON.stringify(marketDataGatewayConfig, null, 2));
}
/**
* Main example runner
*/
function runAllExamples() {
console.log('🚀 Stock Bot Configuration Examples\n');
try {
basicUsageExample();
console.log('\n');
databaseConnectionExample();
console.log('\n');
loggingSetupExample();
console.log('\n');
riskManagementExample();
console.log('\n');
dataProviderExample();
console.log('\n');
environmentSpecificExample();
console.log('\n');
configurationValidationExample();
console.log('\n');
questdbConfigurationExample();
console.log('\n');
mongodbConfigurationExample();
console.log('\n');
dragonflyConfigurationExample();
console.log('\n');
monitoringConfigurationExample();
console.log('\n');
multiDatabaseServiceExample();
console.log('\n');
serviceConfigurationExample();
} catch (error) {
console.error('Example execution error:', error);
}
}
// Export the examples for use in other files
export {
basicUsageExample,
databaseConnectionExample,
loggingSetupExample,
riskManagementExample,
dataProviderExample,
environmentSpecificExample,
configurationValidationExample,
questdbConfigurationExample,
mongodbConfigurationExample,
dragonflyConfigurationExample,
monitoringConfigurationExample,
multiDatabaseServiceExample,
serviceConfigurationExample,
runAllExamples,
};
// Run examples if this file is executed directly
if (require.main === module) {
runAllExamples();
}

View file

@ -1,24 +1,25 @@
/**
* @stock-bot/config
*
* Configuration management library for Stock Bot platform
* Configuration management library for Stock Bot platform using envalid
*/
// Core configuration functionality
export * from './core';
export * from './types';
// Database configurations
export * from './database';
export * from './questdb';
export * from './mongodb';
export * from './dragonfly';
// Logging and monitoring configurations
export * from './logging';
export * from './loki';
export * from './monitoring';
// Data provider configurations
export * from './data-providers';
// Risk management configurations
export * from './risk';
// Logging configurations
export * from './logging';
// Service-specific configurations
export * from './services/index';

View file

@ -1,102 +1,81 @@
/**
* Loki logging configuration for Stock Bot platform
* Logging configuration using envalid
* Application logging settings without Loki (Loki config is in monitoring.ts)
*/
import { z } from 'zod';
import { getEnvVar, getNumericEnvVar, getBooleanEnvVar, createConfigLoader, validateConfig } from './core';
import { cleanEnv, str, bool, num } from 'envalid';
/**
* Loki configuration schema
* Logging configuration with validation and defaults
*/
export const lokiConfigSchema = z.object({
host: z.string().default('localhost'),
port: z.number().default(3100),
username: z.string().optional(),
password: z.string().optional(),
retentionDays: z.number().default(30),
labels: z.record(z.string()).default({}),
batchSize: z.number().default(100),
flushIntervalMs: z.number().default(5000)
export const loggingConfig = cleanEnv(process.env, {
// Basic Logging Settings
LOG_LEVEL: str({
default: 'info',
choices: ['error', 'warn', 'info', 'http', 'verbose', 'debug', 'silly'],
desc: 'Logging level'
}),
LOG_FORMAT: str({
default: 'json',
choices: ['json', 'simple', 'combined'],
desc: 'Log output format'
}),
LOG_CONSOLE: bool({ default: true, desc: 'Enable console logging' }),
LOG_FILE: bool({ default: false, desc: 'Enable file logging' }),
// File Logging Settings
LOG_FILE_PATH: str({ default: 'logs', desc: 'Log file directory path' }),
LOG_FILE_MAX_SIZE: str({ default: '20m', desc: 'Maximum log file size' }),
LOG_FILE_MAX_FILES: num({ default: 14, desc: 'Maximum number of log files to keep' }),
LOG_FILE_DATE_PATTERN: str({ default: 'YYYY-MM-DD', desc: 'Log file date pattern' }),
// Error Logging
LOG_ERROR_FILE: bool({ default: true, desc: 'Enable separate error log file' }),
LOG_ERROR_STACK: bool({ default: true, desc: 'Include stack traces in error logs' }),
// Performance Logging
LOG_PERFORMANCE: bool({ default: false, desc: 'Enable performance logging' }),
LOG_SQL_QUERIES: bool({ default: false, desc: 'Log SQL queries' }),
LOG_HTTP_REQUESTS: bool({ default: true, desc: 'Log HTTP requests' }),
// Structured Logging
LOG_STRUCTURED: bool({ default: true, desc: 'Use structured logging format' }),
LOG_TIMESTAMP: bool({ default: true, desc: 'Include timestamps in logs' }),
LOG_CALLER_INFO: bool({ default: false, desc: 'Include caller information in logs' }),
// Log Filtering
LOG_SILENT_MODULES: str({ default: '', desc: 'Comma-separated list of modules to silence' }),
LOG_VERBOSE_MODULES: str({ default: '', desc: 'Comma-separated list of modules for verbose logging' }),
// Application Context
LOG_SERVICE_NAME: str({ default: 'stock-bot', desc: 'Service name for log context' }),
LOG_SERVICE_VERSION: str({ default: '1.0.0', desc: 'Service version for log context' }),
LOG_ENVIRONMENT: str({ default: 'development', desc: 'Environment for log context' }),
});
export type LokiConfig = z.infer<typeof lokiConfigSchema>;
// Export typed configuration object
export type LoggingConfig = typeof loggingConfig;
/**
* Logging configuration schema
*/
export const loggingConfigSchema = z.object({
level: z.enum(['debug', 'info', 'warn', 'error']).default('info'),
console: z.boolean().default(true),
loki: lokiConfigSchema
});
export type LoggingConfig = z.infer<typeof loggingConfigSchema>;
/**
* Parse labels from environment variable string
* Format: key1=value1,key2=value2
*/
function parseLabels(labelsStr?: string): Record<string, string> {
if (!labelsStr) return {};
const labels: Record<string, string> = {};
labelsStr.split(',').forEach(labelPair => {
const [key, value] = labelPair.trim().split('=');
if (key && value) {
labels[key] = value;
}
});
return labels;
}
/**
* Default logging configuration
*/
const defaultLoggingConfig: LoggingConfig = {
level: 'info',
console: true,
loki: {
host: 'localhost',
port: 3100,
retentionDays: 30,
labels: {},
batchSize: 100,
flushIntervalMs: 5000
}
};
/**
* Load logging configuration from environment variables
*/
export function loadLoggingConfig(): LoggingConfig {
const config = {
level: (getEnvVar('LOG_LEVEL') || 'info') as 'debug' | 'info' | 'warn' | 'error',
console: getBooleanEnvVar('LOG_CONSOLE', true),
loki: {
host: getEnvVar('LOKI_HOST') || 'localhost',
port: getNumericEnvVar('LOKI_PORT', 3100),
username: getEnvVar('LOKI_USERNAME'),
password: getEnvVar('LOKI_PASSWORD'),
retentionDays: getNumericEnvVar('LOKI_RETENTION_DAYS', 30),
labels: parseLabels(getEnvVar('LOKI_LABELS')),
batchSize: getNumericEnvVar('LOKI_BATCH_SIZE', 100),
flushIntervalMs: getNumericEnvVar('LOKI_FLUSH_INTERVAL_MS', 5000)
}
};
return validateConfig(config, loggingConfigSchema);
}
/**
* Creates a dynamic configuration loader for logging
*/
export const createLoggingConfig = createConfigLoader<typeof defaultLoggingConfig>(
'logging',
loggingConfigSchema,
defaultLoggingConfig
);
/**
* Singleton logging configuration
*/
export const loggingConfig = loadLoggingConfig();
// Export individual config values for convenience
export const {
LOG_LEVEL,
LOG_FORMAT,
LOG_CONSOLE,
LOG_FILE,
LOG_FILE_PATH,
LOG_FILE_MAX_SIZE,
LOG_FILE_MAX_FILES,
LOG_FILE_DATE_PATTERN,
LOG_ERROR_FILE,
LOG_ERROR_STACK,
LOG_PERFORMANCE,
LOG_SQL_QUERIES,
LOG_HTTP_REQUESTS,
LOG_STRUCTURED,
LOG_TIMESTAMP,
LOG_CALLER_INFO,
LOG_SILENT_MODULES,
LOG_VERBOSE_MODULES,
LOG_SERVICE_NAME,
LOG_SERVICE_VERSION,
LOG_ENVIRONMENT,
} = loggingConfig;

61
libs/config/src/loki.ts Normal file
View file

@ -0,0 +1,61 @@
/**
* Loki log aggregation configuration using envalid
* Centralized logging configuration for the Stock Bot platform
*/
import { cleanEnv, str, port, bool, num } from 'envalid';
/**
* Loki configuration with validation and defaults
*/
export const lokiConfig = cleanEnv(process.env, {
// Loki Server
LOKI_HOST: str({ default: 'localhost', desc: 'Loki host' }),
LOKI_PORT: port({ default: 3100, desc: 'Loki port' }),
LOKI_URL: str({ default: '', desc: 'Complete Loki URL (overrides host/port)' }),
// Authentication
LOKI_USERNAME: str({ default: '', desc: 'Loki username (if auth enabled)' }),
LOKI_PASSWORD: str({ default: '', desc: 'Loki password (if auth enabled)' }),
LOKI_TENANT_ID: str({ default: '', desc: 'Loki tenant ID (for multi-tenancy)' }),
// Push Configuration
LOKI_PUSH_TIMEOUT: num({ default: 10000, desc: 'Push timeout in ms' }),
LOKI_BATCH_SIZE: num({ default: 1024, desc: 'Batch size for log entries' }),
LOKI_BATCH_WAIT: num({ default: 1000, desc: 'Batch wait time in ms' }),
// Retention Settings
LOKI_RETENTION_PERIOD: str({ default: '30d', desc: 'Log retention period' }),
LOKI_MAX_CHUNK_AGE: str({ default: '1h', desc: 'Maximum chunk age' }),
// TLS Settings
LOKI_TLS_ENABLED: bool({ default: false, desc: 'Enable TLS for Loki' }),
LOKI_TLS_INSECURE: bool({ default: false, desc: 'Skip TLS verification' }),
// Log Labels
LOKI_DEFAULT_LABELS: str({ default: '', desc: 'Default labels for all log entries (JSON format)' }),
LOKI_SERVICE_LABEL: str({ default: 'stock-bot', desc: 'Service label for log entries' }),
LOKI_ENVIRONMENT_LABEL: str({ default: 'development', desc: 'Environment label for log entries' }),
});
// Export typed configuration object
export type LokiConfig = typeof lokiConfig;
// Export individual config values for convenience
export const {
LOKI_HOST,
LOKI_PORT,
LOKI_URL,
LOKI_USERNAME,
LOKI_PASSWORD,
LOKI_TENANT_ID,
LOKI_PUSH_TIMEOUT,
LOKI_BATCH_SIZE,
LOKI_BATCH_WAIT,
LOKI_RETENTION_PERIOD,
LOKI_MAX_CHUNK_AGE,
LOKI_TLS_ENABLED,
LOKI_TLS_INSECURE,
LOKI_DEFAULT_LABELS,
LOKI_SERVICE_LABEL,
LOKI_ENVIRONMENT_LABEL,
} = lokiConfig;

View file

@ -0,0 +1,75 @@
/**
* MongoDB configuration using envalid
* Document storage for sentiment data, raw documents, and unstructured data
*/
import { cleanEnv, str, port, bool, num } from 'envalid';
/**
* MongoDB configuration with validation and defaults
*/
export const mongodbConfig = cleanEnv(process.env, {
// MongoDB Connection
MONGODB_HOST: str({ default: 'localhost', desc: 'MongoDB host' }),
MONGODB_PORT: port({ default: 27017, desc: 'MongoDB port' }),
MONGODB_DATABASE: str({ default: 'trading_documents', desc: 'MongoDB database name' }),
// Authentication
MONGODB_USERNAME: str({ default: 'trading_admin', desc: 'MongoDB username' }),
MONGODB_PASSWORD: str({ default: '', desc: 'MongoDB password' }),
MONGODB_AUTH_SOURCE: str({ default: 'admin', desc: 'MongoDB authentication database' }),
// Connection URI (alternative to individual settings)
MONGODB_URI: str({ default: '', desc: 'Complete MongoDB connection URI (overrides individual settings)' }),
// Connection Pool Settings
MONGODB_MAX_POOL_SIZE: num({ default: 10, desc: 'Maximum connection pool size' }),
MONGODB_MIN_POOL_SIZE: num({ default: 0, desc: 'Minimum connection pool size' }),
MONGODB_MAX_IDLE_TIME: num({ default: 30000, desc: 'Maximum idle time for connections in ms' }),
// Timeouts
MONGODB_CONNECT_TIMEOUT: num({ default: 10000, desc: 'Connection timeout in ms' }),
MONGODB_SOCKET_TIMEOUT: num({ default: 30000, desc: 'Socket timeout in ms' }),
MONGODB_SERVER_SELECTION_TIMEOUT: num({ default: 5000, desc: 'Server selection timeout in ms' }),
// SSL/TLS Settings
MONGODB_TLS: bool({ default: false, desc: 'Enable TLS for MongoDB connection' }),
MONGODB_TLS_INSECURE: bool({ default: false, desc: 'Allow invalid certificates in TLS mode' }),
MONGODB_TLS_CA_FILE: str({ default: '', desc: 'Path to TLS CA certificate file' }),
// Additional Settings
MONGODB_RETRY_WRITES: bool({ default: true, desc: 'Enable retryable writes' }),
MONGODB_JOURNAL: bool({ default: true, desc: 'Enable write concern journal' }),
MONGODB_READ_PREFERENCE: str({
default: 'primary',
choices: ['primary', 'primaryPreferred', 'secondary', 'secondaryPreferred', 'nearest'],
desc: 'MongoDB read preference'
}),
MONGODB_WRITE_CONCERN: str({ default: 'majority', desc: 'Write concern level' }),
});
// Export typed configuration object
export type MongoDbConfig = typeof mongodbConfig;
// Export individual config values for convenience
export const {
MONGODB_HOST,
MONGODB_PORT,
MONGODB_DATABASE,
MONGODB_USERNAME,
MONGODB_PASSWORD,
MONGODB_AUTH_SOURCE,
MONGODB_URI,
MONGODB_MAX_POOL_SIZE,
MONGODB_MIN_POOL_SIZE,
MONGODB_MAX_IDLE_TIME,
MONGODB_CONNECT_TIMEOUT,
MONGODB_SOCKET_TIMEOUT,
MONGODB_SERVER_SELECTION_TIMEOUT,
MONGODB_TLS,
MONGODB_TLS_INSECURE,
MONGODB_TLS_CA_FILE,
MONGODB_RETRY_WRITES,
MONGODB_JOURNAL,
MONGODB_READ_PREFERENCE,
MONGODB_WRITE_CONCERN,
} = mongodbConfig;

View file

@ -0,0 +1,90 @@
/**
* Monitoring configuration using envalid
* Prometheus metrics, Grafana visualization, and Loki logging
*/
import { cleanEnv, str, port, bool, num } from 'envalid';
/**
* Prometheus configuration with validation and defaults
*/
export const prometheusConfig = cleanEnv(process.env, {
// Prometheus Server
PROMETHEUS_HOST: str({ default: 'localhost', desc: 'Prometheus host' }),
PROMETHEUS_PORT: port({ default: 9090, desc: 'Prometheus port' }),
PROMETHEUS_URL: str({ default: '', desc: 'Complete Prometheus URL (overrides host/port)' }),
// Authentication
PROMETHEUS_USERNAME: str({ default: '', desc: 'Prometheus username (if auth enabled)' }),
PROMETHEUS_PASSWORD: str({ default: '', desc: 'Prometheus password (if auth enabled)' }),
// Metrics Collection
PROMETHEUS_SCRAPE_INTERVAL: str({ default: '15s', desc: 'Default scrape interval' }),
PROMETHEUS_EVALUATION_INTERVAL: str({ default: '15s', desc: 'Rule evaluation interval' }),
PROMETHEUS_RETENTION_TIME: str({ default: '15d', desc: 'Data retention time' }),
// TLS Settings
PROMETHEUS_TLS_ENABLED: bool({ default: false, desc: 'Enable TLS for Prometheus' }),
PROMETHEUS_TLS_INSECURE: bool({ default: false, desc: 'Skip TLS verification' }),
});
/**
* Grafana configuration with validation and defaults
*/
export const grafanaConfig = cleanEnv(process.env, {
// Grafana Server
GRAFANA_HOST: str({ default: 'localhost', desc: 'Grafana host' }),
GRAFANA_PORT: port({ default: 3000, desc: 'Grafana port' }),
GRAFANA_URL: str({ default: '', desc: 'Complete Grafana URL (overrides host/port)' }),
// Authentication
GRAFANA_ADMIN_USER: str({ default: 'admin', desc: 'Grafana admin username' }),
GRAFANA_ADMIN_PASSWORD: str({ default: 'admin', desc: 'Grafana admin password' }),
// Security Settings
GRAFANA_ALLOW_SIGN_UP: bool({ default: false, desc: 'Allow user sign up' }),
GRAFANA_SECRET_KEY: str({ default: '', desc: 'Grafana secret key for encryption' }),
// Database Settings
GRAFANA_DATABASE_TYPE: str({
default: 'sqlite3',
choices: ['mysql', 'postgres', 'sqlite3'],
desc: 'Grafana database type'
}),
GRAFANA_DATABASE_URL: str({ default: '', desc: 'Grafana database URL' }),
// Feature Flags
GRAFANA_DISABLE_GRAVATAR: bool({ default: true, desc: 'Disable Gravatar avatars' }),
GRAFANA_ENABLE_GZIP: bool({ default: true, desc: 'Enable gzip compression' }),
});
// Export typed configuration objects
export type PrometheusConfig = typeof prometheusConfig;
export type GrafanaConfig = typeof grafanaConfig;
// Export individual config values for convenience
export const {
PROMETHEUS_HOST,
PROMETHEUS_PORT,
PROMETHEUS_URL,
PROMETHEUS_USERNAME,
PROMETHEUS_PASSWORD,
PROMETHEUS_SCRAPE_INTERVAL,
PROMETHEUS_EVALUATION_INTERVAL,
PROMETHEUS_RETENTION_TIME,
PROMETHEUS_TLS_ENABLED,
PROMETHEUS_TLS_INSECURE,
} = prometheusConfig;
export const {
GRAFANA_HOST,
GRAFANA_PORT,
GRAFANA_URL,
GRAFANA_ADMIN_USER,
GRAFANA_ADMIN_PASSWORD,
GRAFANA_ALLOW_SIGN_UP,
GRAFANA_SECRET_KEY,
GRAFANA_DATABASE_TYPE,
GRAFANA_DATABASE_URL,
GRAFANA_DISABLE_GRAVATAR,
GRAFANA_ENABLE_GZIP,
} = grafanaConfig;

View file

@ -0,0 +1,53 @@
/**
* QuestDB configuration using envalid
* Time-series database for OHLCV data, indicators, and performance metrics
*/
import { cleanEnv, str, port, bool, num } from 'envalid';
/**
* QuestDB configuration with validation and defaults
*/
export const questdbConfig = cleanEnv(process.env, {
// QuestDB Connection
QUESTDB_HOST: str({ default: 'localhost', desc: 'QuestDB host' }),
QUESTDB_HTTP_PORT: port({ default: 9000, desc: 'QuestDB HTTP port (web console)' }),
QUESTDB_PG_PORT: port({ default: 8812, desc: 'QuestDB PostgreSQL wire protocol port' }),
QUESTDB_INFLUX_PORT: port({ default: 9009, desc: 'QuestDB InfluxDB line protocol port' }),
// Authentication (if enabled)
QUESTDB_USER: str({ default: '', desc: 'QuestDB username (if auth enabled)' }),
QUESTDB_PASSWORD: str({ default: '', desc: 'QuestDB password (if auth enabled)' }),
// Connection Settings
QUESTDB_CONNECTION_TIMEOUT: num({ default: 5000, desc: 'Connection timeout in ms' }),
QUESTDB_REQUEST_TIMEOUT: num({ default: 30000, desc: 'Request timeout in ms' }),
QUESTDB_RETRY_ATTEMPTS: num({ default: 3, desc: 'Number of retry attempts' }),
// TLS Settings
QUESTDB_TLS_ENABLED: bool({ default: false, desc: 'Enable TLS for QuestDB connection' }),
QUESTDB_TLS_VERIFY_SERVER_CERT: bool({ default: true, desc: 'Verify server certificate' }),
// Database Settings
QUESTDB_DEFAULT_DATABASE: str({ default: 'qdb', desc: 'Default database name' }),
QUESTDB_TELEMETRY_ENABLED: bool({ default: false, desc: 'Enable telemetry' }),
});
// Export typed configuration object
export type QuestDbConfig = typeof questdbConfig;
// Export individual config values for convenience
export const {
QUESTDB_HOST,
QUESTDB_HTTP_PORT,
QUESTDB_PG_PORT,
QUESTDB_INFLUX_PORT,
QUESTDB_USER,
QUESTDB_PASSWORD,
QUESTDB_CONNECTION_TIMEOUT,
QUESTDB_REQUEST_TIMEOUT,
QUESTDB_RETRY_ATTEMPTS,
QUESTDB_TLS_ENABLED,
QUESTDB_TLS_VERIFY_SERVER_CERT,
QUESTDB_DEFAULT_DATABASE,
QUESTDB_TELEMETRY_ENABLED,
} = questdbConfig;

View file

@ -1,45 +1,82 @@
/**
* Risk management configuration for trading operations
* Risk management configuration using envalid
*/
import { getNumericEnvVar, validateConfig, createConfigLoader } from './core';
import { riskConfigSchema, RiskConfig } from './types';
import { cleanEnv, str, num, bool } from 'envalid';
/**
* Default risk configuration
* Risk configuration with validation and defaults
*/
const defaultRiskConfig: RiskConfig = {
maxDrawdown: 0.05,
maxPositionSize: 0.1,
maxLeverage: 1,
stopLossDefault: 0.02,
takeProfitDefault: 0.05
};
export const riskConfig = cleanEnv(process.env, {
// Position Sizing
RISK_MAX_POSITION_SIZE: num({ default: 0.1, desc: 'Maximum position size as percentage of portfolio' }),
RISK_MAX_PORTFOLIO_EXPOSURE: num({ default: 0.8, desc: 'Maximum portfolio exposure percentage' }),
RISK_MAX_SINGLE_ASSET_EXPOSURE: num({ default: 0.2, desc: 'Maximum exposure to single asset' }),
RISK_MAX_SECTOR_EXPOSURE: num({ default: 0.3, desc: 'Maximum exposure to single sector' }),
// Stop Loss and Take Profit
RISK_DEFAULT_STOP_LOSS: num({ default: 0.05, desc: 'Default stop loss percentage' }),
RISK_DEFAULT_TAKE_PROFIT: num({ default: 0.15, desc: 'Default take profit percentage' }),
RISK_TRAILING_STOP_ENABLED: bool({ default: true, desc: 'Enable trailing stop losses' }),
RISK_TRAILING_STOP_DISTANCE: num({ default: 0.03, desc: 'Trailing stop distance percentage' }),
// Risk Limits
RISK_MAX_DAILY_LOSS: num({ default: 0.05, desc: 'Maximum daily loss percentage' }),
RISK_MAX_WEEKLY_LOSS: num({ default: 0.1, desc: 'Maximum weekly loss percentage' }),
RISK_MAX_MONTHLY_LOSS: num({ default: 0.2, desc: 'Maximum monthly loss percentage' }),
// Volatility Controls
RISK_MAX_VOLATILITY_THRESHOLD: num({ default: 0.4, desc: 'Maximum volatility threshold' }),
RISK_VOLATILITY_LOOKBACK_DAYS: num({ default: 20, desc: 'Volatility calculation lookback period' }),
// Correlation Controls
RISK_MAX_CORRELATION_THRESHOLD: num({ default: 0.7, desc: 'Maximum correlation between positions' }),
RISK_CORRELATION_LOOKBACK_DAYS: num({ default: 60, desc: 'Correlation calculation lookback period' }),
// Leverage Controls
RISK_MAX_LEVERAGE: num({ default: 2.0, desc: 'Maximum leverage allowed' }),
RISK_MARGIN_CALL_THRESHOLD: num({ default: 0.3, desc: 'Margin call threshold' }),
// Circuit Breakers
RISK_CIRCUIT_BREAKER_ENABLED: bool({ default: true, desc: 'Enable circuit breakers' }),
RISK_CIRCUIT_BREAKER_LOSS_THRESHOLD: num({ default: 0.1, desc: 'Circuit breaker loss threshold' }),
RISK_CIRCUIT_BREAKER_COOLDOWN_MINUTES: num({ default: 60, desc: 'Circuit breaker cooldown period' }),
// Risk Model
RISK_MODEL_TYPE: str({
choices: ['var', 'cvar', 'expected_shortfall'],
default: 'var',
desc: 'Risk model type'
}),
RISK_CONFIDENCE_LEVEL: num({ default: 0.95, desc: 'Risk model confidence level' }),
RISK_TIME_HORIZON_DAYS: num({ default: 1, desc: 'Risk time horizon in days' }),
});
/**
* Load risk configuration from environment variables
*/
export function loadRiskConfig(): RiskConfig {
const config: RiskConfig = {
maxDrawdown: getNumericEnvVar('RISK_MAX_DRAWDOWN', defaultRiskConfig.maxDrawdown),
maxPositionSize: getNumericEnvVar('RISK_MAX_POSITION_SIZE', defaultRiskConfig.maxPositionSize),
maxLeverage: getNumericEnvVar('RISK_MAX_LEVERAGE', defaultRiskConfig.maxLeverage),
stopLossDefault: getNumericEnvVar('RISK_STOP_LOSS_DEFAULT', defaultRiskConfig.stopLossDefault),
takeProfitDefault: getNumericEnvVar('RISK_TAKE_PROFIT_DEFAULT', defaultRiskConfig.takeProfitDefault)
};
// Export typed configuration object
export type RiskConfig = typeof riskConfig;
return validateConfig(config, riskConfigSchema);
}
/**
* Creates a dynamic configuration loader for risk management
*/
export const createRiskConfig = createConfigLoader<typeof defaultRiskConfig>(
'risk',
riskConfigSchema,
defaultRiskConfig
);
/**
* Singleton risk configuration
*/
export const riskConfig = loadRiskConfig();
// Export individual config values for convenience
export const {
RISK_MAX_POSITION_SIZE,
RISK_MAX_PORTFOLIO_EXPOSURE,
RISK_MAX_SINGLE_ASSET_EXPOSURE,
RISK_MAX_SECTOR_EXPOSURE,
RISK_DEFAULT_STOP_LOSS,
RISK_DEFAULT_TAKE_PROFIT,
RISK_TRAILING_STOP_ENABLED,
RISK_TRAILING_STOP_DISTANCE,
RISK_MAX_DAILY_LOSS,
RISK_MAX_WEEKLY_LOSS,
RISK_MAX_MONTHLY_LOSS,
RISK_MAX_VOLATILITY_THRESHOLD,
RISK_VOLATILITY_LOOKBACK_DAYS,
RISK_MAX_CORRELATION_THRESHOLD,
RISK_CORRELATION_LOOKBACK_DAYS,
RISK_MAX_LEVERAGE,
RISK_MARGIN_CALL_THRESHOLD,
RISK_CIRCUIT_BREAKER_ENABLED,
RISK_CIRCUIT_BREAKER_LOSS_THRESHOLD,
RISK_CIRCUIT_BREAKER_COOLDOWN_MINUTES,
RISK_MODEL_TYPE,
RISK_CONFIDENCE_LEVEL,
RISK_TIME_HORIZON_DAYS,
} = riskConfig;

View file

@ -1,5 +0,0 @@
/**
* Export all service-specific configurations
*/
export * from './market-data-gateway';
export * from './risk-guardian';

View file

@ -1,106 +0,0 @@
/**
* Market Data Gateway service configuration
*/
import { z } from 'zod';
import { getEnvVar, getNumericEnvVar, getBooleanEnvVar, createConfigLoader } from '../core';
import { Environment, BaseConfig } from '../types';
import { getEnvironment } from '../core';
/**
* Market Data Gateway specific configuration schema
*/
export const marketDataGatewayConfigSchema = z.object({
environment: z.nativeEnum(Environment),
logLevel: z.enum(['debug', 'info', 'warn', 'error']).default('info'),
service: z.object({
name: z.string().default('market-data-gateway'),
version: z.string().default('1.0.0'),
port: z.number().default(4000)
}),
websocket: z.object({
enabled: z.boolean().default(true),
path: z.string().default('/ws/market-data'),
heartbeatInterval: z.number().default(30000)
}),
throttling: z.object({
maxRequestsPerMinute: z.number().default(300),
maxConnectionsPerIP: z.number().default(5)
}),
caching: z.object({
enabled: z.boolean().default(true),
ttlSeconds: z.number().default(60)
})
});
/**
* Market Data Gateway configuration type
*/
export type MarketDataGatewayConfig = z.infer<typeof marketDataGatewayConfigSchema>;
/**
* Default Market Data Gateway configuration
*/
const defaultConfig: Partial<MarketDataGatewayConfig> = {
environment: getEnvironment(),
logLevel: 'info',
service: {
name: 'market-data-gateway',
version: '1.0.0',
port: 4000
},
websocket: {
enabled: true,
path: '/ws/market-data',
heartbeatInterval: 30000 // 30 seconds
},
throttling: {
maxRequestsPerMinute: 300,
maxConnectionsPerIP: 5
},
caching: {
enabled: true,
ttlSeconds: 60
}
};
/**
* Load Market Data Gateway configuration
*/
export function loadMarketDataGatewayConfig(): MarketDataGatewayConfig {
return {
environment: getEnvironment(),
logLevel: (getEnvVar('LOG_LEVEL') || defaultConfig.logLevel) as 'debug' | 'info' | 'warn' | 'error',
service: {
name: getEnvVar('SERVICE_NAME') || defaultConfig.service!.name,
version: getEnvVar('SERVICE_VERSION') || defaultConfig.service!.version,
port: getNumericEnvVar('SERVICE_PORT', defaultConfig.service!.port)
},
websocket: {
enabled: getBooleanEnvVar('WEBSOCKET_ENABLED', defaultConfig.websocket!.enabled),
path: getEnvVar('WEBSOCKET_PATH') || defaultConfig.websocket!.path,
heartbeatInterval: getNumericEnvVar('WEBSOCKET_HEARTBEAT_INTERVAL', defaultConfig.websocket!.heartbeatInterval)
},
throttling: {
maxRequestsPerMinute: getNumericEnvVar('THROTTLING_MAX_REQUESTS', defaultConfig.throttling!.maxRequestsPerMinute),
maxConnectionsPerIP: getNumericEnvVar('THROTTLING_MAX_CONNECTIONS', defaultConfig.throttling!.maxConnectionsPerIP)
},
caching: {
enabled: getBooleanEnvVar('CACHING_ENABLED', defaultConfig.caching!.enabled),
ttlSeconds: getNumericEnvVar('CACHING_TTL_SECONDS', defaultConfig.caching!.ttlSeconds)
}
};
}
/**
* Creates a dynamic configuration loader for the Market Data Gateway
*/
export const createMarketDataGatewayConfig = createConfigLoader<MarketDataGatewayConfig>(
'market-data-gateway',
marketDataGatewayConfigSchema,
defaultConfig
);
/**
* Singleton Market Data Gateway configuration
*/
export const marketDataGatewayConfig = loadMarketDataGatewayConfig();

View file

@ -1,112 +0,0 @@
/**
* Risk Guardian service configuration
*/
import { z } from 'zod';
import { getEnvVar, getNumericEnvVar, getBooleanEnvVar, createConfigLoader } from '../core';
import { Environment, BaseConfig } from '../types';
import { getEnvironment } from '../core';
/**
* Risk Guardian specific configuration schema
*/
export const riskGuardianConfigSchema = z.object({
environment: z.nativeEnum(Environment),
logLevel: z.enum(['debug', 'info', 'warn', 'error']).default('info'),
service: z.object({
name: z.string().default('risk-guardian'),
version: z.string().default('1.0.0'),
port: z.number().default(4001)
}),
riskChecks: z.object({
preTradeValidation: z.boolean().default(true),
portfolioValidation: z.boolean().default(true),
leverageValidation: z.boolean().default(true),
concentrationValidation: z.boolean().default(true)
}),
alerting: z.object({
enabled: z.boolean().default(true),
criticalThreshold: z.number().default(0.8),
warningThreshold: z.number().default(0.6)
}),
watchdog: z.object({
enabled: z.boolean().default(true),
checkIntervalSeconds: z.number().default(60)
})
});
/**
* Risk Guardian configuration type
*/
export type RiskGuardianConfig = z.infer<typeof riskGuardianConfigSchema>;
/**
* Default Risk Guardian configuration
*/
const defaultConfig: Partial<RiskGuardianConfig> = {
environment: getEnvironment(),
logLevel: 'info',
service: {
name: 'risk-guardian',
version: '1.0.0',
port: 4001
},
riskChecks: {
preTradeValidation: true,
portfolioValidation: true,
leverageValidation: true,
concentrationValidation: true
},
alerting: {
enabled: true,
criticalThreshold: 0.8,
warningThreshold: 0.6
},
watchdog: {
enabled: true,
checkIntervalSeconds: 60
}
};
/**
* Load Risk Guardian configuration
*/
export function loadRiskGuardianConfig(): RiskGuardianConfig {
return {
environment: getEnvironment(),
logLevel: (getEnvVar('LOG_LEVEL') || defaultConfig.logLevel) as 'debug' | 'info' | 'warn' | 'error',
service: {
name: getEnvVar('SERVICE_NAME') || defaultConfig.service!.name,
version: getEnvVar('SERVICE_VERSION') || defaultConfig.service!.version,
port: getNumericEnvVar('SERVICE_PORT', defaultConfig.service!.port)
},
riskChecks: {
preTradeValidation: getBooleanEnvVar('RISK_CHECKS_PRE_TRADE', defaultConfig.riskChecks!.preTradeValidation),
portfolioValidation: getBooleanEnvVar('RISK_CHECKS_PORTFOLIO', defaultConfig.riskChecks!.portfolioValidation),
leverageValidation: getBooleanEnvVar('RISK_CHECKS_LEVERAGE', defaultConfig.riskChecks!.leverageValidation),
concentrationValidation: getBooleanEnvVar('RISK_CHECKS_CONCENTRATION', defaultConfig.riskChecks!.concentrationValidation)
},
alerting: {
enabled: getBooleanEnvVar('ALERTING_ENABLED', defaultConfig.alerting!.enabled),
criticalThreshold: getNumericEnvVar('ALERTING_CRITICAL_THRESHOLD', defaultConfig.alerting!.criticalThreshold),
warningThreshold: getNumericEnvVar('ALERTING_WARNING_THRESHOLD', defaultConfig.alerting!.warningThreshold)
},
watchdog: {
enabled: getBooleanEnvVar('WATCHDOG_ENABLED', defaultConfig.watchdog!.enabled),
checkIntervalSeconds: getNumericEnvVar('WATCHDOG_CHECK_INTERVAL', defaultConfig.watchdog!.checkIntervalSeconds)
}
};
}
/**
* Creates a dynamic configuration loader for the Risk Guardian
*/
export const createRiskGuardianConfig = createConfigLoader<RiskGuardianConfig>(
'risk-guardian',
riskGuardianConfigSchema,
defaultConfig
);
/**
* Singleton Risk Guardian configuration
*/
export const riskGuardianConfig = loadRiskGuardianConfig();

View file

@ -1,104 +0,0 @@
/**
* Configuration type definitions for the Stock Bot platform
*/
import { z } from 'zod';
/**
* Environment enum for different deployment environments
*/
export enum Environment {
Development = 'development',
Testing = 'testing',
Staging = 'staging',
Production = 'production'
}
/**
* Common configuration interface for all service configs
*/
export interface BaseConfig {
environment: Environment;
logLevel: 'debug' | 'info' | 'warn' | 'error';
service: {
name: string;
version: string;
port: number;
};
}
/**
* Database configuration schema
*/
export const databaseConfigSchema = z.object({
dragonfly: z.object({
host: z.string().default('localhost'),
port: z.number().default(6379),
password: z.string().optional(),
maxRetriesPerRequest: z.number().default(3)
}),
questDB: z.object({
host: z.string().default('localhost'),
port: z.number().default(8812),
database: z.string().default('stockbot'),
user: z.string().default('admin'),
password: z.string().optional(),
httpPort: z.number().default(9000)
}),
mongodb: z.object({
uri: z.string().default('mongodb://localhost:27017'),
database: z.string().default('stockbot'),
username: z.string().optional(),
password: z.string().optional(),
options: z.record(z.string(), z.any()).optional()
}),
postgres: z.object({
host: z.string().default('localhost'),
port: z.number().default(5432),
database: z.string().default('stockbot'),
user: z.string().default('postgres'),
password: z.string().optional(),
ssl: z.boolean().default(false),
poolSize: z.number().default(10)
})
});
/**
* Data provider configuration schema
*/
export const dataProviderSchema = z.object({
name: z.string(),
type: z.enum(['rest', 'websocket', 'file']),
baseUrl: z.string().url().optional(),
wsUrl: z.string().url().optional(),
apiKey: z.string().optional(),
apiSecret: z.string().optional(),
refreshInterval: z.number().optional(),
rateLimits: z.object({
maxRequestsPerMinute: z.number().optional(),
maxRequestsPerSecond: z.number().optional()
}).optional()
});
export const dataProvidersConfigSchema = z.object({
providers: z.array(dataProviderSchema),
defaultProvider: z.string()
});
/**
* Risk management configuration schema
*/
export const riskConfigSchema = z.object({
maxDrawdown: z.number().default(0.05),
maxPositionSize: z.number().default(0.1),
maxLeverage: z.number().default(1),
stopLossDefault: z.number().default(0.02),
takeProfitDefault: z.number().default(0.05)
});
/**
* Type definitions based on schemas
*/
export type DatabaseConfig = z.infer<typeof databaseConfigSchema>;
export type DataProviderConfig = z.infer<typeof dataProviderSchema>;
export type DataProvidersConfig = z.infer<typeof dataProvidersConfigSchema>;
export type RiskConfig = z.infer<typeof riskConfigSchema>;

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,118 @@
#!/usr/bin/env node
/**
* Configuration Validation Script
* Tests that all configuration modules can be loaded and validated
*/
// Set test environment variables
process.env.NODE_ENV = 'test';
process.env.PORT = '3001';
// Database configs
process.env.DB_HOST = 'localhost';
process.env.DB_PORT = '5432';
process.env.DB_NAME = 'test_db';
process.env.DB_USER = 'test_user';
process.env.DB_PASSWORD = 'test_pass';
// QuestDB configs
process.env.QUESTDB_HOST = 'localhost';
process.env.QUESTDB_HTTP_PORT = '9000';
process.env.QUESTDB_PG_PORT = '8812';
// MongoDB configs
process.env.MONGODB_HOST = 'localhost';
process.env.MONGODB_PORT = '27017';
process.env.MONGODB_DATABASE = 'test_db';
// Dragonfly configs
process.env.DRAGONFLY_HOST = 'localhost';
process.env.DRAGONFLY_PORT = '6379';
// Monitoring configs
process.env.PROMETHEUS_HOST = 'localhost';
process.env.PROMETHEUS_PORT = '9090';
process.env.GRAFANA_HOST = 'localhost';
process.env.GRAFANA_PORT = '3000';
// Loki configs
process.env.LOKI_HOST = 'localhost';
process.env.LOKI_PORT = '3100';
// Logging configs
process.env.LOG_LEVEL = 'info';
process.env.LOG_FORMAT = 'json';
try {
console.log('🔍 Validating configuration modules...\n');
// Test each configuration module
const modules = [
{ name: 'Database', path: './dist/database.js' },
{ name: 'QuestDB', path: './dist/questdb.js' },
{ name: 'MongoDB', path: './dist/mongodb.js' },
{ name: 'Dragonfly', path: './dist/dragonfly.js' },
{ name: 'Monitoring', path: './dist/monitoring.js' },
{ name: 'Loki', path: './dist/loki.js' },
{ name: 'Logging', path: './dist/logging.js' },
];
const results = [];
for (const module of modules) {
try {
const config = require(module.path);
const configKeys = Object.keys(config);
if (configKeys.length === 0) {
throw new Error('No exported configuration found');
}
// Try to access the main config object
const mainConfig = config[configKeys[0]];
if (!mainConfig || typeof mainConfig !== 'object') {
throw new Error('Invalid configuration object');
}
console.log(`${module.name}: ${configKeys.length} config(s) loaded`);
results.push({ name: module.name, status: 'success', configs: configKeys });
} catch (error) {
console.log(`${module.name}: ${error.message}`);
results.push({ name: module.name, status: 'error', error: error.message });
}
}
// Test main index exports
try {
const indexExports = require('./dist/index.js');
const exportCount = Object.keys(indexExports).length;
console.log(`\n✅ Index exports: ${exportCount} modules exported`);
results.push({ name: 'Index', status: 'success', exports: exportCount });
} catch (error) {
console.log(`\n❌ Index exports: ${error.message}`);
results.push({ name: 'Index', status: 'error', error: error.message });
}
// Summary
const successful = results.filter(r => r.status === 'success').length;
const total = results.length;
console.log(`\n📊 Validation Summary:`);
console.log(` Total modules: ${total}`);
console.log(` Successful: ${successful}`);
console.log(` Failed: ${total - successful}`);
if (successful === total) {
console.log('\n🎉 All configuration modules validated successfully!');
process.exit(0);
} else {
console.log('\n⚠ Some configuration modules failed validation.');
process.exit(1);
}
} catch (error) {
console.error('❌ Validation script failed:', error.message);
process.exit(1);
}

View file

@ -39,5 +39,8 @@
"engines": {
"node": ">=18.0.0",
"bun": ">=1.1.0"
},
"dependencies": {
"valibot": "^1.1.0"
}
}