format
This commit is contained in:
parent
d858222af7
commit
7d9044ab29
202 changed files with 10755 additions and 10972 deletions
|
|
@ -1,148 +0,0 @@
|
|||
# Enhanced Cache Provider Usage
|
||||
|
||||
The Redis cache provider now supports advanced TTL handling and conditional operations.
|
||||
|
||||
## Basic Usage (Backward Compatible)
|
||||
|
||||
```typescript
|
||||
import { RedisCache } from '@stock-bot/cache';
|
||||
|
||||
const cache = new RedisCache({
|
||||
keyPrefix: 'trading:',
|
||||
defaultTTL: 3600 // 1 hour
|
||||
});
|
||||
|
||||
// Simple set with TTL (old way - still works)
|
||||
await cache.set('user:123', userData, 1800); // 30 minutes
|
||||
|
||||
// Simple get
|
||||
const user = await cache.get<UserData>('user:123');
|
||||
```
|
||||
|
||||
## Enhanced Set Options
|
||||
|
||||
```typescript
|
||||
// Preserve existing TTL when updating
|
||||
await cache.set('user:123', updatedUserData, { preserveTTL: true });
|
||||
|
||||
// Only set if key exists (update operation)
|
||||
const oldValue = await cache.set('user:123', newData, {
|
||||
onlyIfExists: true,
|
||||
getOldValue: true
|
||||
});
|
||||
|
||||
// Only set if key doesn't exist (create operation)
|
||||
await cache.set('user:456', newUser, {
|
||||
onlyIfNotExists: true,
|
||||
ttl: 7200 // 2 hours
|
||||
});
|
||||
|
||||
// Get old value when setting new one
|
||||
const previousData = await cache.set('session:abc', sessionData, {
|
||||
getOldValue: true,
|
||||
ttl: 1800
|
||||
});
|
||||
```
|
||||
|
||||
## Convenience Methods
|
||||
|
||||
```typescript
|
||||
// Update value preserving TTL
|
||||
await cache.update('user:123', updatedUserData);
|
||||
|
||||
// Set only if exists
|
||||
const updated = await cache.setIfExists('user:123', newData, 3600);
|
||||
|
||||
// Set only if not exists (returns true if created)
|
||||
const created = await cache.setIfNotExists('user:456', userData);
|
||||
|
||||
// Replace existing key with new TTL
|
||||
const oldData = await cache.replace('user:123', newData, 7200);
|
||||
|
||||
// Atomic field updates
|
||||
await cache.updateField('counter:views', (current) => (current || 0) + 1);
|
||||
|
||||
await cache.updateField('user:123', (user) => ({
|
||||
...user,
|
||||
lastSeen: new Date().toISOString(),
|
||||
loginCount: (user?.loginCount || 0) + 1
|
||||
}));
|
||||
```
|
||||
|
||||
## Stock Bot Use Cases
|
||||
|
||||
### 1. Rate Limiting
|
||||
```typescript
|
||||
// Only create rate limit if not exists
|
||||
const rateLimited = await cache.setIfNotExists(
|
||||
`ratelimit:${userId}:${endpoint}`,
|
||||
{ count: 1, resetTime: Date.now() + 60000 },
|
||||
60 // 1 minute
|
||||
);
|
||||
|
||||
if (!rateLimited) {
|
||||
// Increment existing counter
|
||||
await cache.updateField(`ratelimit:${userId}:${endpoint}`, (data) => ({
|
||||
...data,
|
||||
count: data.count + 1
|
||||
}));
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Session Management
|
||||
```typescript
|
||||
// Update session data without changing expiration
|
||||
await cache.update(`session:${sessionId}`, {
|
||||
...sessionData,
|
||||
lastActivity: Date.now()
|
||||
});
|
||||
```
|
||||
|
||||
### 3. Cache Warming
|
||||
```typescript
|
||||
// Only update existing cached data, don't create new entries
|
||||
const warmed = await cache.setIfExists(`stock:${symbol}:price`, latestPrice);
|
||||
if (warmed) {
|
||||
console.log(`Warmed cache for ${symbol}`);
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Atomic Counters
|
||||
```typescript
|
||||
// Thread-safe counter increments
|
||||
await cache.updateField('metrics:api:calls', (count) => (count || 0) + 1);
|
||||
await cache.updateField('metrics:errors:500', (count) => (count || 0) + 1);
|
||||
```
|
||||
|
||||
### 5. TTL Preservation for Frequently Updated Data
|
||||
```typescript
|
||||
// Keep original expiration when updating frequently changing data
|
||||
await cache.set(`portfolio:${userId}:positions`, positions, { preserveTTL: true });
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The cache provider includes robust error handling:
|
||||
|
||||
```typescript
|
||||
try {
|
||||
await cache.set('key', value);
|
||||
} catch (error) {
|
||||
// Errors are logged and fallback values returned
|
||||
// The cache operations are non-blocking
|
||||
}
|
||||
|
||||
// Check cache health
|
||||
const isHealthy = await cache.health();
|
||||
|
||||
// Wait for cache to be ready
|
||||
await cache.waitForReady(10000); // 10 second timeout
|
||||
```
|
||||
|
||||
## Performance Benefits
|
||||
|
||||
1. **Atomic Operations**: `updateField` uses Lua scripts to prevent race conditions
|
||||
2. **TTL Preservation**: Avoids unnecessary TTL resets on updates
|
||||
3. **Conditional Operations**: Reduces network round trips
|
||||
4. **Shared Connections**: Efficient connection pooling
|
||||
5. **Error Recovery**: Graceful degradation when Redis is unavailable
|
||||
|
|
@ -1,169 +0,0 @@
|
|||
# Loki Logging for Stock Bot
|
||||
|
||||
This document outlines how to use the Loki logging system integrated with the Stock Bot platform (Updated June 2025).
|
||||
|
||||
## Overview
|
||||
|
||||
Loki provides centralized logging for all Stock Bot services with:
|
||||
|
||||
1. **Centralized logging** for all microservices
|
||||
2. **Log aggregation** and filtering by service, level, and custom labels
|
||||
3. **Grafana integration** for visualization and dashboards
|
||||
4. **Query capabilities** using LogQL for log analysis
|
||||
5. **Alert capabilities** for critical issues
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Starting the Logging Stack
|
||||
|
||||
```cmd
|
||||
# Start the monitoring stack (includes Loki and Grafana)
|
||||
scripts\docker.ps1 monitoring
|
||||
```
|
||||
|
||||
Or start services individually:
|
||||
|
||||
```cmd
|
||||
# Start Loki service only
|
||||
docker-compose up -d loki
|
||||
|
||||
# Start Loki and Grafana
|
||||
docker-compose up -d loki grafana
|
||||
```
|
||||
|
||||
### Viewing Logs
|
||||
|
||||
Once started:
|
||||
|
||||
1. Access Grafana at http://localhost:3000 (login with admin/admin)
|
||||
2. Navigate to the "Stock Bot Logs" dashboard
|
||||
3. View and query your logs
|
||||
|
||||
## Using the Logger in Your Services
|
||||
|
||||
The Stock Bot logger automatically sends logs to Loki using the updated pattern:
|
||||
|
||||
```typescript
|
||||
import { getLogger } from '@stock-bot/logger';
|
||||
|
||||
// Create a logger for your service
|
||||
const logger = getLogger('your-service-name');
|
||||
|
||||
// Log at different levels
|
||||
logger.debug('Detailed information for debugging');
|
||||
logger.info('General information about operations');
|
||||
logger.warn('Potential issues that don\'t affect operation');
|
||||
logger.error('Critical errors that require attention');
|
||||
|
||||
// Log with structured data (searchable in Loki)
|
||||
logger.info('Processing trade', {
|
||||
symbol: 'MSFT',
|
||||
price: 410.75,
|
||||
quantity: 50
|
||||
});
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
Logger configuration is managed through the `@stock-bot/config` package and can be set in your `.env` file:
|
||||
|
||||
```bash
|
||||
# Logging configuration
|
||||
LOG_LEVEL=debug # debug, info, warn, error
|
||||
LOG_CONSOLE=true # Log to console in addition to Loki
|
||||
LOKI_HOST=localhost # Loki server hostname
|
||||
LOKI_PORT=3100 # Loki server port
|
||||
LOKI_RETENTION_DAYS=30 # Days to retain logs
|
||||
LOKI_LABELS=environment=development,service=stock-bot # Default labels
|
||||
LOKI_BATCH_SIZE=100 # Number of logs to batch before sending
|
||||
LOKI_BATCH_WAIT=5 # Max time to wait before sending logs
|
||||
```
|
||||
|
||||
## Useful Loki Queries
|
||||
|
||||
Inside Grafana, you can use these LogQL queries to analyze your logs:
|
||||
|
||||
1. **All logs from a specific service**:
|
||||
```
|
||||
{service="market-data-gateway"}
|
||||
```
|
||||
|
||||
2. **All error logs across all services**:
|
||||
```
|
||||
{level="error"}
|
||||
```
|
||||
|
||||
3. **Logs containing specific text**:
|
||||
```
|
||||
{service="market-data-gateway"} |= "trade"
|
||||
```
|
||||
|
||||
4. **Count of error logs by service over time**:
|
||||
```
|
||||
sum by(service) (count_over_time({level="error"}[5m]))
|
||||
```
|
||||
|
||||
## Testing the Logging Integration
|
||||
|
||||
Test the logging integration using Bun:
|
||||
|
||||
```cmd
|
||||
# Run from project root using Bun (current runtime)
|
||||
bun run tools/test-loki-logging.ts
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
Our logging implementation follows this architecture:
|
||||
|
||||
```
|
||||
┌─────────────────┐ ┌─────────────────┐
|
||||
│ Trading Services│────►│ @stock-bot/logger│
|
||||
└─────────────────┘ │ getLogger() │
|
||||
└────────┬────────┘
|
||||
│
|
||||
▼
|
||||
┌────────────────────────────────────────┐
|
||||
│ Loki │
|
||||
└────────────────┬───────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌────────────────────────────────────────┐
|
||||
│ Grafana │
|
||||
└────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Adding New Dashboards
|
||||
|
||||
To create new Grafana dashboards for log visualization:
|
||||
|
||||
1. Build your dashboard in the Grafana UI
|
||||
2. Export it to JSON
|
||||
3. Add it to `monitoring/grafana/provisioning/dashboards/json/`
|
||||
4. Restart the monitoring stack
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If logs aren't appearing in Grafana:
|
||||
|
||||
1. Run the status check script to verify Loki and Grafana are working:
|
||||
```cmd
|
||||
tools\check-loki-status.bat
|
||||
```
|
||||
|
||||
2. Check that Loki and Grafana containers are running:
|
||||
```cmd
|
||||
docker ps | findstr "loki grafana"
|
||||
```
|
||||
|
||||
3. Verify .env configuration for Loki host and port:
|
||||
```cmd
|
||||
type .env | findstr "LOKI_"
|
||||
```
|
||||
|
||||
4. Ensure your service has the latest @stock-bot/logger package
|
||||
|
||||
5. Check for errors in the Loki container logs:
|
||||
```cmd
|
||||
docker logs stock-bot-loki
|
||||
```
|
||||
|
|
@ -1,212 +0,0 @@
|
|||
# MongoDB Client Multi-Database Migration Guide
|
||||
|
||||
## Overview
|
||||
Your MongoDB client has been enhanced to support multiple databases dynamically while maintaining full backward compatibility.
|
||||
|
||||
## Key Features Added
|
||||
|
||||
### 1. **Dynamic Database Switching**
|
||||
```typescript
|
||||
// Set default database (all operations will use this unless overridden)
|
||||
client.setDefaultDatabase('analytics');
|
||||
|
||||
// Get current default database
|
||||
const currentDb = client.getDefaultDatabase(); // Returns: 'analytics'
|
||||
```
|
||||
|
||||
### 2. **Database Parameter in Methods**
|
||||
All methods now accept an optional `database` parameter:
|
||||
|
||||
```typescript
|
||||
// Old way (still works - uses default database)
|
||||
await client.batchUpsert('symbols', data, 'symbol');
|
||||
|
||||
// New way (specify database explicitly)
|
||||
await client.batchUpsert('symbols', data, 'symbol', { database: 'stock' });
|
||||
```
|
||||
|
||||
### 3. **Convenience Methods**
|
||||
Pre-configured methods for common databases:
|
||||
|
||||
```typescript
|
||||
// Stock database operations
|
||||
await client.batchUpsertStock('symbols', data, 'symbol');
|
||||
|
||||
// Analytics database operations
|
||||
await client.batchUpsertAnalytics('metrics', data, 'metric_name');
|
||||
|
||||
// Trading documents database operations
|
||||
await client.batchUpsertTrading('orders', data, 'order_id');
|
||||
```
|
||||
|
||||
### 4. **Direct Database Access**
|
||||
```typescript
|
||||
// Get specific database instances
|
||||
const stockDb = client.getDatabase('stock');
|
||||
const analyticsDb = client.getDatabase('analytics');
|
||||
|
||||
// Get collections with database override
|
||||
const collection = client.getCollection('symbols', 'stock');
|
||||
```
|
||||
|
||||
## Migration Steps
|
||||
|
||||
### Step 1: No Changes Required (Backward Compatible)
|
||||
Your existing code continues to work unchanged:
|
||||
|
||||
```typescript
|
||||
// This still works exactly as before
|
||||
const client = MongoDBClient.getInstance();
|
||||
await client.connect();
|
||||
await client.batchUpsert('exchanges', exchangeData, 'exchange_id');
|
||||
```
|
||||
|
||||
### Step 2: Organize Data by Database (Recommended)
|
||||
Update your data service to use appropriate databases:
|
||||
|
||||
```typescript
|
||||
// In your data service initialization
|
||||
export class DataService {
|
||||
private mongoClient = MongoDBClient.getInstance();
|
||||
|
||||
async initialize() {
|
||||
await this.mongoClient.connect();
|
||||
|
||||
// Set stock as default for most operations
|
||||
this.mongoClient.setDefaultDatabase('stock');
|
||||
}
|
||||
|
||||
async saveInteractiveBrokersData(exchanges: any[], symbols: any[]) {
|
||||
// Stock market data goes to 'stock' database (default)
|
||||
await this.mongoClient.batchUpsert('exchanges', exchanges, 'exchange_id');
|
||||
await this.mongoClient.batchUpsert('symbols', symbols, 'symbol');
|
||||
}
|
||||
|
||||
async saveAnalyticsData(performance: any[]) {
|
||||
// Analytics data goes to 'analytics' database
|
||||
await this.mongoClient.batchUpsert(
|
||||
'performance',
|
||||
performance,
|
||||
'date',
|
||||
{ database: 'analytics' }
|
||||
);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: Use Convenience Methods (Optional)
|
||||
Replace explicit database parameters with convenience methods:
|
||||
|
||||
```typescript
|
||||
// Instead of:
|
||||
await client.batchUpsert('symbols', data, 'symbol', { database: 'stock' });
|
||||
|
||||
// Use:
|
||||
await client.batchUpsertStock('symbols', data, 'symbol');
|
||||
```
|
||||
|
||||
## Factory Functions
|
||||
New factory functions are available for easier database management:
|
||||
|
||||
```typescript
|
||||
import {
|
||||
connectMongoDB,
|
||||
setDefaultDatabase,
|
||||
getCurrentDatabase,
|
||||
getDatabase
|
||||
} from '@stock-bot/mongodb-client';
|
||||
|
||||
// Set default database globally
|
||||
setDefaultDatabase('analytics');
|
||||
|
||||
// Get current default
|
||||
const current = getCurrentDatabase();
|
||||
|
||||
// Get specific database
|
||||
const stockDb = getDatabase('stock');
|
||||
```
|
||||
|
||||
## Database Recommendations
|
||||
|
||||
### Stock Database (`stock`)
|
||||
- Market data (symbols, exchanges, prices)
|
||||
- Financial instruments
|
||||
- Market events
|
||||
- Real-time data
|
||||
|
||||
### Analytics Database (`analytics`)
|
||||
- Performance metrics
|
||||
- Calculated indicators
|
||||
- Reports and dashboards
|
||||
- Aggregated data
|
||||
|
||||
### Trading Documents Database (`trading_documents`)
|
||||
- Trade orders and executions
|
||||
- User portfolios
|
||||
- Transaction logs
|
||||
- Audit trails
|
||||
|
||||
## Example: Updating Your Data Service
|
||||
|
||||
```typescript
|
||||
// Before (still works)
|
||||
export class DataService {
|
||||
async saveExchanges(exchanges: any[]) {
|
||||
const client = MongoDBClient.getInstance();
|
||||
await client.batchUpsert('exchanges', exchanges, 'exchange_id');
|
||||
}
|
||||
}
|
||||
|
||||
// After (recommended)
|
||||
export class DataService {
|
||||
private mongoClient = MongoDBClient.getInstance();
|
||||
|
||||
async initialize() {
|
||||
await this.mongoClient.connect();
|
||||
this.mongoClient.setDefaultDatabase('stock'); // Set appropriate default
|
||||
}
|
||||
|
||||
async saveExchanges(exchanges: any[]) {
|
||||
// Uses default 'stock' database
|
||||
await this.mongoClient.batchUpsert('exchanges', exchanges, 'exchange_id');
|
||||
|
||||
// Or use convenience method
|
||||
await this.mongoClient.batchUpsertStock('exchanges', exchanges, 'exchange_id');
|
||||
}
|
||||
|
||||
async savePerformanceMetrics(metrics: any[]) {
|
||||
// Save to analytics database
|
||||
await this.mongoClient.batchUpsertAnalytics('metrics', metrics, 'metric_name');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Testing
|
||||
Your existing tests continue to work. For new multi-database features:
|
||||
|
||||
```typescript
|
||||
import { MongoDBClient } from '@stock-bot/mongodb-client';
|
||||
|
||||
const client = MongoDBClient.getInstance();
|
||||
await client.connect();
|
||||
|
||||
// Test database switching
|
||||
client.setDefaultDatabase('test_db');
|
||||
expect(client.getDefaultDatabase()).toBe('test_db');
|
||||
|
||||
// Test explicit database parameter
|
||||
await client.batchUpsert('test_collection', data, 'id', { database: 'other_db' });
|
||||
```
|
||||
|
||||
## Benefits
|
||||
1. **Organized Data**: Separate databases for different data types
|
||||
2. **Better Performance**: Smaller, focused databases
|
||||
3. **Easier Maintenance**: Clear data boundaries
|
||||
4. **Scalability**: Can scale databases independently
|
||||
5. **Backward Compatibility**: No breaking changes
|
||||
|
||||
## Next Steps
|
||||
1. Update your data service to use appropriate default database
|
||||
2. Gradually migrate to using specific databases for different data types
|
||||
3. Consider using convenience methods for cleaner code
|
||||
4. Update tests to cover multi-database scenarios
|
||||
Loading…
Add table
Add a link
Reference in a new issue