added initial integration tests with bun
This commit is contained in:
parent
3e451558ac
commit
fb22815450
52 changed files with 7588 additions and 364 deletions
472
libs/questdb-client/src/client.ts
Normal file
472
libs/questdb-client/src/client.ts
Normal file
|
|
@ -0,0 +1,472 @@
|
|||
import { Pool } from 'pg';
|
||||
import { questdbConfig } from '@stock-bot/config';
|
||||
import { Logger } from '@stock-bot/logger';
|
||||
import type {
|
||||
QuestDBClientConfig,
|
||||
QuestDBConnectionOptions,
|
||||
QueryResult,
|
||||
InsertResult,
|
||||
BaseTimeSeriesData,
|
||||
TableNames
|
||||
} from './types';
|
||||
import { QuestDBHealthMonitor } from './health';
|
||||
import { QuestDBQueryBuilder } from './query-builder';
|
||||
import { QuestDBInfluxWriter } from './influx-writer';
|
||||
import { QuestDBSchemaManager } from './schema';
|
||||
|
||||
/**
|
||||
* QuestDB Client for Stock Bot
|
||||
*
|
||||
* Provides high-performance time-series data access with support for
|
||||
* multiple protocols (HTTP, PostgreSQL, InfluxDB Line Protocol).
|
||||
*/
|
||||
export class QuestDBClient {
|
||||
private pgPool: Pool | null = null;
|
||||
private readonly config: QuestDBClientConfig;
|
||||
private readonly options: QuestDBConnectionOptions;
|
||||
private readonly logger: Logger;
|
||||
private readonly healthMonitor: QuestDBHealthMonitor;
|
||||
private readonly influxWriter: QuestDBInfluxWriter;
|
||||
private readonly schemaManager: QuestDBSchemaManager;
|
||||
private isConnected = false;
|
||||
|
||||
constructor(
|
||||
config?: Partial<QuestDBClientConfig>,
|
||||
options?: QuestDBConnectionOptions
|
||||
) {
|
||||
this.config = this.buildConfig(config);
|
||||
this.options = {
|
||||
protocol: 'pg',
|
||||
retryAttempts: 3,
|
||||
retryDelay: 1000,
|
||||
healthCheckInterval: 30000,
|
||||
...options
|
||||
};
|
||||
|
||||
this.logger = new Logger('QuestDBClient');
|
||||
this.healthMonitor = new QuestDBHealthMonitor(this);
|
||||
this.influxWriter = new QuestDBInfluxWriter(this);
|
||||
this.schemaManager = new QuestDBSchemaManager(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to QuestDB
|
||||
*/
|
||||
async connect(): Promise<void> {
|
||||
if (this.isConnected) {
|
||||
return;
|
||||
}
|
||||
|
||||
let lastError: Error | null = null;
|
||||
|
||||
for (let attempt = 1; attempt <= this.options.retryAttempts!; attempt++) {
|
||||
try {
|
||||
this.logger.info(`Connecting to QuestDB (attempt ${attempt}/${this.options.retryAttempts})...`);
|
||||
|
||||
// Connect via PostgreSQL wire protocol
|
||||
this.pgPool = new Pool(this.buildPgPoolConfig());
|
||||
|
||||
// Test the connection
|
||||
const client = await this.pgPool.connect();
|
||||
await client.query('SELECT 1');
|
||||
client.release();
|
||||
|
||||
this.isConnected = true;
|
||||
this.logger.info('Successfully connected to QuestDB');
|
||||
// Initialize schema
|
||||
await this.schemaManager.initializeDatabase();
|
||||
|
||||
// Start health monitoring
|
||||
this.healthMonitor.startMonitoring();
|
||||
|
||||
return;
|
||||
} catch (error) {
|
||||
lastError = error as Error;
|
||||
this.logger.error(`QuestDB connection attempt ${attempt} failed:`, error);
|
||||
|
||||
if (this.pgPool) {
|
||||
await this.pgPool.end();
|
||||
this.pgPool = null;
|
||||
}
|
||||
|
||||
if (attempt < this.options.retryAttempts!) {
|
||||
await this.delay(this.options.retryDelay! * attempt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`Failed to connect to QuestDB after ${this.options.retryAttempts} attempts: ${lastError?.message}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disconnect from QuestDB
|
||||
*/
|
||||
async disconnect(): Promise<void> {
|
||||
if (!this.isConnected) {
|
||||
return;
|
||||
} try {
|
||||
this.healthMonitor.stopMonitoring();
|
||||
|
||||
if (this.pgPool) {
|
||||
await this.pgPool.end();
|
||||
this.pgPool = null;
|
||||
}
|
||||
|
||||
this.isConnected = false;
|
||||
this.logger.info('Disconnected from QuestDB');
|
||||
} catch (error) {
|
||||
this.logger.error('Error disconnecting from QuestDB:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a SQL query
|
||||
*/
|
||||
async query<T = any>(sql: string, params?: any[]): Promise<QueryResult<T>> {
|
||||
if (!this.pgPool) {
|
||||
throw new Error('QuestDB client not connected');
|
||||
}
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
const result = await this.pgPool.query(sql, params);
|
||||
const executionTime = Date.now() - startTime;
|
||||
|
||||
this.logger.debug(`Query executed in ${executionTime}ms`, {
|
||||
query: sql.substring(0, 100),
|
||||
rowCount: result.rowCount
|
||||
});
|
||||
|
||||
return {
|
||||
rows: result.rows,
|
||||
rowCount: result.rowCount || 0,
|
||||
executionTime, metadata: {
|
||||
columns: result.fields?.map((field: any) => ({
|
||||
name: field.name,
|
||||
type: this.mapDataType(field.dataTypeID)
|
||||
})) || []
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
const executionTime = Date.now() - startTime;
|
||||
this.logger.error(`Query failed after ${executionTime}ms:`, {
|
||||
error: (error as Error).message,
|
||||
query: sql,
|
||||
params
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Write OHLCV data using InfluxDB Line Protocol
|
||||
*/
|
||||
async writeOHLCV(
|
||||
symbol: string,
|
||||
exchange: string,
|
||||
data: Array<{
|
||||
timestamp: Date;
|
||||
open: number;
|
||||
high: number;
|
||||
low: number;
|
||||
close: number;
|
||||
volume: number;
|
||||
}>
|
||||
): Promise<void> {
|
||||
return await this.influxWriter.writeOHLCV(symbol, exchange, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write market analytics data
|
||||
*/
|
||||
async writeMarketAnalytics(
|
||||
symbol: string,
|
||||
exchange: string,
|
||||
analytics: {
|
||||
timestamp: Date;
|
||||
rsi?: number;
|
||||
macd?: number;
|
||||
signal?: number;
|
||||
histogram?: number;
|
||||
bollinger_upper?: number;
|
||||
bollinger_lower?: number;
|
||||
volume_sma?: number;
|
||||
}
|
||||
): Promise<void> {
|
||||
return await this.influxWriter.writeMarketAnalytics(symbol, exchange, analytics);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a query builder instance
|
||||
*/
|
||||
queryBuilder(): QuestDBQueryBuilder {
|
||||
return new QuestDBQueryBuilder(this);
|
||||
}
|
||||
/**
|
||||
* Create a SELECT query builder
|
||||
*/
|
||||
select(...columns: string[]): QuestDBQueryBuilder {
|
||||
return this.queryBuilder().select(...columns);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an aggregation query builder
|
||||
*/
|
||||
aggregate(table: TableNames): QuestDBQueryBuilder {
|
||||
return this.queryBuilder().from(table);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a time-series specific query with SAMPLE BY
|
||||
*/
|
||||
async sampleBy<T = any>(
|
||||
table: TableNames,
|
||||
columns: string[],
|
||||
interval: string,
|
||||
timeColumn: string = 'timestamp',
|
||||
where?: string,
|
||||
params?: any[]
|
||||
): Promise<QueryResult<T>> {
|
||||
const columnsStr = columns.join(', ');
|
||||
const whereClause = where ? `WHERE ${where}` : '';
|
||||
|
||||
const sql = `
|
||||
SELECT ${columnsStr}
|
||||
FROM ${table}
|
||||
${whereClause}
|
||||
SAMPLE BY ${interval}
|
||||
ALIGN TO CALENDAR
|
||||
`;
|
||||
|
||||
return await this.query<T>(sql, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get latest values by symbol using LATEST BY
|
||||
*/
|
||||
async latestBy<T = any>(
|
||||
table: TableNames,
|
||||
columns: string | string[] = '*',
|
||||
keyColumns: string | string[] = 'symbol'
|
||||
): Promise<QueryResult<T>> {
|
||||
const columnsStr = Array.isArray(columns) ? columns.join(', ') : columns;
|
||||
const keyColumnsStr = Array.isArray(keyColumns) ? keyColumns.join(', ') : keyColumns;
|
||||
|
||||
const sql = `
|
||||
SELECT ${columnsStr}
|
||||
FROM ${table}
|
||||
LATEST BY ${keyColumnsStr}
|
||||
`;
|
||||
|
||||
return await this.query<T>(sql);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute ASOF JOIN for time-series correlation
|
||||
*/
|
||||
async asofJoin<T = any>(
|
||||
leftTable: TableNames,
|
||||
rightTable: TableNames,
|
||||
joinCondition: string,
|
||||
columns?: string[],
|
||||
where?: string,
|
||||
params?: any[]
|
||||
): Promise<QueryResult<T>> {
|
||||
const columnsStr = columns ? columns.join(', ') : '*';
|
||||
const whereClause = where ? `WHERE ${where}` : '';
|
||||
|
||||
const sql = `
|
||||
SELECT ${columnsStr}
|
||||
FROM ${leftTable}
|
||||
ASOF JOIN ${rightTable} ON ${joinCondition}
|
||||
${whereClause}
|
||||
`;
|
||||
|
||||
return await this.query<T>(sql, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get database statistics
|
||||
*/
|
||||
async getStats(): Promise<any> {
|
||||
const result = await this.query(`
|
||||
SELECT
|
||||
table_name,
|
||||
row_count,
|
||||
partition_count,
|
||||
size_bytes
|
||||
FROM tables()
|
||||
WHERE table_name NOT LIKE 'sys.%'
|
||||
ORDER BY row_count DESC
|
||||
`);
|
||||
return result.rows;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get table information
|
||||
*/
|
||||
async getTableInfo(tableName: string): Promise<any> {
|
||||
const result = await this.query(
|
||||
`SELECT * FROM table_columns WHERE table_name = ?`,
|
||||
[tableName]
|
||||
);
|
||||
return result.rows;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if PostgreSQL pool is healthy
|
||||
*/
|
||||
isPgPoolHealthy(): boolean {
|
||||
return this.pgPool !== null && !this.pgPool.ended;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get HTTP endpoint URL
|
||||
*/
|
||||
getHttpUrl(): string {
|
||||
const protocol = this.config.tls?.enabled ? 'https' : 'http';
|
||||
return `${protocol}://${this.config.host}:${this.config.httpPort}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get InfluxDB endpoint URL
|
||||
*/
|
||||
getInfluxUrl(): string {
|
||||
const protocol = this.config.tls?.enabled ? 'https' : 'http';
|
||||
return `${protocol}://${this.config.host}:${this.config.influxPort}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get health monitor instance
|
||||
*/
|
||||
getHealthMonitor(): QuestDBHealthMonitor {
|
||||
return this.healthMonitor;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get schema manager instance
|
||||
*/
|
||||
getSchemaManager(): QuestDBSchemaManager {
|
||||
return this.schemaManager;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get InfluxDB writer instance
|
||||
*/
|
||||
getInfluxWriter(): QuestDBInfluxWriter {
|
||||
return this.influxWriter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize table by rebuilding partitions
|
||||
*/
|
||||
async optimizeTable(tableName: string): Promise<void> {
|
||||
await this.query(`VACUUM TABLE ${tableName}`);
|
||||
this.logger.info(`Optimized table: ${tableName}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a table with time-series optimizations
|
||||
*/
|
||||
async createTable(
|
||||
tableName: string,
|
||||
columns: string,
|
||||
partitionBy: string = 'DAY',
|
||||
timestampColumn: string = 'timestamp'
|
||||
): Promise<void> {
|
||||
const sql = `
|
||||
CREATE TABLE IF NOT EXISTS ${tableName} (
|
||||
${columns}
|
||||
) TIMESTAMP(${timestampColumn}) PARTITION BY ${partitionBy}
|
||||
`;
|
||||
|
||||
await this.query(sql);
|
||||
this.logger.info(`Created table: ${tableName}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if client is connected
|
||||
*/
|
||||
get connected(): boolean {
|
||||
return this.isConnected && !!this.pgPool;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the PostgreSQL connection pool
|
||||
*/
|
||||
get connectionPool(): Pool | null {
|
||||
return this.pgPool;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get configuration
|
||||
*/
|
||||
get configuration(): QuestDBClientConfig {
|
||||
return { ...this.config };
|
||||
}
|
||||
|
||||
private buildConfig(config?: Partial<QuestDBClientConfig>): QuestDBClientConfig {
|
||||
return {
|
||||
host: config?.host || questdbConfig.QUESTDB_HOST,
|
||||
httpPort: config?.httpPort || questdbConfig.QUESTDB_HTTP_PORT,
|
||||
pgPort: config?.pgPort || questdbConfig.QUESTDB_PG_PORT,
|
||||
influxPort: config?.influxPort || questdbConfig.QUESTDB_INFLUX_PORT,
|
||||
user: config?.user || questdbConfig.QUESTDB_USER,
|
||||
password: config?.password || questdbConfig.QUESTDB_PASSWORD,
|
||||
database: config?.database || questdbConfig.QUESTDB_DEFAULT_DATABASE,
|
||||
tls: {
|
||||
enabled: questdbConfig.QUESTDB_TLS_ENABLED,
|
||||
verifyServerCert: questdbConfig.QUESTDB_TLS_VERIFY_SERVER_CERT,
|
||||
...config?.tls
|
||||
},
|
||||
timeouts: {
|
||||
connection: questdbConfig.QUESTDB_CONNECTION_TIMEOUT,
|
||||
request: questdbConfig.QUESTDB_REQUEST_TIMEOUT,
|
||||
...config?.timeouts
|
||||
},
|
||||
retryAttempts: questdbConfig.QUESTDB_RETRY_ATTEMPTS,
|
||||
...config
|
||||
};
|
||||
}
|
||||
|
||||
private buildPgPoolConfig(): any {
|
||||
return {
|
||||
host: this.config.host,
|
||||
port: this.config.pgPort,
|
||||
database: this.config.database,
|
||||
user: this.config.user,
|
||||
password: this.config.password,
|
||||
connectionTimeoutMillis: this.config.timeouts?.connection,
|
||||
query_timeout: this.config.timeouts?.request,
|
||||
ssl: this.config.tls?.enabled ? {
|
||||
rejectUnauthorized: this.config.tls.verifyServerCert
|
||||
} : false,
|
||||
min: 2,
|
||||
max: 10
|
||||
};
|
||||
}
|
||||
|
||||
private mapDataType(typeId: number): string {
|
||||
// Map PostgreSQL type IDs to QuestDB types
|
||||
const typeMap: Record<number, string> = {
|
||||
16: 'BOOLEAN',
|
||||
20: 'LONG',
|
||||
21: 'INT',
|
||||
23: 'INT',
|
||||
25: 'STRING',
|
||||
700: 'FLOAT',
|
||||
701: 'DOUBLE',
|
||||
1043: 'STRING',
|
||||
1082: 'DATE',
|
||||
1114: 'TIMESTAMP',
|
||||
1184: 'TIMESTAMP'
|
||||
};
|
||||
|
||||
return typeMap[typeId] || 'STRING';
|
||||
}
|
||||
|
||||
private delay(ms: number): Promise<void> {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
}
|
||||
}
|
||||
63
libs/questdb-client/src/factory.ts
Normal file
63
libs/questdb-client/src/factory.ts
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
import { QuestDBClient } from './client';
|
||||
import { questdbConfig } from '@stock-bot/config';
|
||||
import type { QuestDBClientConfig, QuestDBConnectionOptions } from './types';
|
||||
|
||||
/**
|
||||
* Factory function to create a QuestDB client instance
|
||||
*/
|
||||
export function createQuestDBClient(
|
||||
config?: Partial<QuestDBClientConfig>,
|
||||
options?: QuestDBConnectionOptions
|
||||
): QuestDBClient {
|
||||
return new QuestDBClient(config, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a QuestDB client with default configuration
|
||||
*/
|
||||
export function createDefaultQuestDBClient(): QuestDBClient {
|
||||
const config: Partial<QuestDBClientConfig> = {
|
||||
host: questdbConfig.QUESTDB_HOST,
|
||||
httpPort: questdbConfig.QUESTDB_HTTP_PORT,
|
||||
pgPort: questdbConfig.QUESTDB_PG_PORT,
|
||||
influxPort: questdbConfig.QUESTDB_INFLUX_PORT,
|
||||
user: questdbConfig.QUESTDB_USER,
|
||||
password: questdbConfig.QUESTDB_PASSWORD
|
||||
};
|
||||
|
||||
return new QuestDBClient(config);
|
||||
}
|
||||
|
||||
/**
|
||||
* Singleton QuestDB client instance
|
||||
*/
|
||||
let defaultClient: QuestDBClient | null = null;
|
||||
|
||||
/**
|
||||
* Get or create the default QuestDB client instance
|
||||
*/
|
||||
export function getQuestDBClient(): QuestDBClient {
|
||||
if (!defaultClient) {
|
||||
defaultClient = createDefaultQuestDBClient();
|
||||
}
|
||||
return defaultClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to QuestDB using the default client
|
||||
*/
|
||||
export async function connectQuestDB(): Promise<QuestDBClient> {
|
||||
const client = getQuestDBClient();
|
||||
await client.connect();
|
||||
return client;
|
||||
}
|
||||
|
||||
/**
|
||||
* Disconnect from QuestDB
|
||||
*/
|
||||
export async function disconnectQuestDB(): Promise<void> {
|
||||
if (defaultClient) {
|
||||
await defaultClient.disconnect();
|
||||
defaultClient = null;
|
||||
}
|
||||
}
|
||||
233
libs/questdb-client/src/health.ts
Normal file
233
libs/questdb-client/src/health.ts
Normal file
|
|
@ -0,0 +1,233 @@
|
|||
import { Logger } from '@stock-bot/logger';
|
||||
import type { HealthStatus, PerformanceMetrics, QueryResult } from './types';
|
||||
|
||||
// Interface to avoid circular dependency
|
||||
interface QuestDBClientInterface {
|
||||
query<T = any>(sql: string, params?: any[]): Promise<QueryResult<T>>;
|
||||
isPgPoolHealthy(): boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* QuestDB Health Monitor
|
||||
*
|
||||
* Monitors connection health, performance metrics, and provides
|
||||
* automatic recovery capabilities for the QuestDB client.
|
||||
*/
|
||||
export class QuestDBHealthMonitor {
|
||||
private readonly logger: Logger;
|
||||
private healthCheckInterval: NodeJS.Timeout | null = null;
|
||||
private lastHealthCheck: Date | null = null;
|
||||
private performanceMetrics: PerformanceMetrics = {
|
||||
totalQueries: 0,
|
||||
successfulQueries: 0,
|
||||
failedQueries: 0,
|
||||
averageResponseTime: 0,
|
||||
lastQueryTime: null,
|
||||
connectionUptime: 0,
|
||||
memoryUsage: 0
|
||||
};
|
||||
constructor(private readonly client: QuestDBClientInterface) {
|
||||
this.logger = new Logger('QuestDBHealthMonitor');
|
||||
}
|
||||
|
||||
/**
|
||||
* Start health monitoring
|
||||
*/
|
||||
public startMonitoring(intervalMs: number = 30000): void {
|
||||
if (this.healthCheckInterval) {
|
||||
this.stopMonitoring();
|
||||
}
|
||||
|
||||
this.logger.info(`Starting health monitoring with ${intervalMs}ms interval`);
|
||||
|
||||
this.healthCheckInterval = setInterval(async () => {
|
||||
try {
|
||||
await this.performHealthCheck();
|
||||
} catch (error) {
|
||||
this.logger.error('Health check failed', { error });
|
||||
}
|
||||
}, intervalMs);
|
||||
|
||||
// Perform initial health check
|
||||
this.performHealthCheck().catch(error => {
|
||||
this.logger.error('Initial health check failed', { error });
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop health monitoring
|
||||
*/
|
||||
public stopMonitoring(): void {
|
||||
if (this.healthCheckInterval) {
|
||||
clearInterval(this.healthCheckInterval);
|
||||
this.healthCheckInterval = null;
|
||||
this.logger.info('Health monitoring stopped');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a health check
|
||||
*/
|
||||
public async performHealthCheck(): Promise<HealthStatus> {
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
// Test basic connectivity with a simple query
|
||||
await this.client.query('SELECT 1 as health_check');
|
||||
|
||||
const responseTime = Date.now() - startTime;
|
||||
this.lastHealthCheck = new Date();
|
||||
|
||||
const status: HealthStatus = {
|
||||
isHealthy: true,
|
||||
lastCheck: this.lastHealthCheck,
|
||||
responseTime,
|
||||
message: 'Connection healthy',
|
||||
details: {
|
||||
pgPool: this.client.isPgPoolHealthy(),
|
||||
httpEndpoint: true, // Will be implemented when HTTP client is added
|
||||
uptime: this.getUptime()
|
||||
}
|
||||
};
|
||||
|
||||
this.logger.debug('Health check passed', { responseTime });
|
||||
return status;
|
||||
|
||||
} catch (error) {
|
||||
const responseTime = Date.now() - startTime;
|
||||
this.lastHealthCheck = new Date();
|
||||
|
||||
const status: HealthStatus = {
|
||||
isHealthy: false,
|
||||
lastCheck: this.lastHealthCheck,
|
||||
responseTime,
|
||||
message: `Health check failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
error: error instanceof Error ? error : new Error('Unknown error'),
|
||||
details: {
|
||||
pgPool: false,
|
||||
httpEndpoint: false,
|
||||
uptime: this.getUptime()
|
||||
}
|
||||
};
|
||||
|
||||
this.logger.error('Health check failed', { error, responseTime });
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current health status
|
||||
*/
|
||||
public async getHealthStatus(): Promise<HealthStatus> {
|
||||
if (!this.lastHealthCheck || Date.now() - this.lastHealthCheck.getTime() > 60000) {
|
||||
return await this.performHealthCheck();
|
||||
}
|
||||
|
||||
// Return cached status if recent
|
||||
return {
|
||||
isHealthy: true,
|
||||
lastCheck: this.lastHealthCheck,
|
||||
responseTime: 0,
|
||||
message: 'Using cached health status',
|
||||
details: {
|
||||
pgPool: this.client.isPgPoolHealthy(),
|
||||
httpEndpoint: true,
|
||||
uptime: this.getUptime()
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Record query performance metrics
|
||||
*/
|
||||
public recordQuery(success: boolean, responseTime: number): void {
|
||||
this.performanceMetrics.totalQueries++;
|
||||
this.performanceMetrics.lastQueryTime = new Date();
|
||||
|
||||
if (success) {
|
||||
this.performanceMetrics.successfulQueries++;
|
||||
} else {
|
||||
this.performanceMetrics.failedQueries++;
|
||||
}
|
||||
|
||||
// Update rolling average response time
|
||||
const totalResponseTime = this.performanceMetrics.averageResponseTime *
|
||||
(this.performanceMetrics.totalQueries - 1) + responseTime;
|
||||
this.performanceMetrics.averageResponseTime =
|
||||
totalResponseTime / this.performanceMetrics.totalQueries;
|
||||
|
||||
// Update memory usage
|
||||
this.performanceMetrics.memoryUsage = process.memoryUsage().heapUsed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get performance metrics
|
||||
*/
|
||||
public getPerformanceMetrics(): PerformanceMetrics {
|
||||
return { ...this.performanceMetrics };
|
||||
}
|
||||
|
||||
/**
|
||||
* Get connection uptime in seconds
|
||||
*/
|
||||
private getUptime(): number {
|
||||
return Math.floor(process.uptime());
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset performance metrics
|
||||
*/
|
||||
public resetMetrics(): void {
|
||||
this.performanceMetrics = {
|
||||
totalQueries: 0,
|
||||
successfulQueries: 0,
|
||||
failedQueries: 0,
|
||||
averageResponseTime: 0,
|
||||
lastQueryTime: null,
|
||||
connectionUptime: this.getUptime(),
|
||||
memoryUsage: process.memoryUsage().heapUsed
|
||||
};
|
||||
|
||||
this.logger.info('Performance metrics reset');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get health summary for monitoring dashboards
|
||||
*/
|
||||
public async getHealthSummary(): Promise<{
|
||||
status: HealthStatus;
|
||||
metrics: PerformanceMetrics;
|
||||
recommendations: string[];
|
||||
}> {
|
||||
const status = await this.getHealthStatus();
|
||||
const metrics = this.getPerformanceMetrics();
|
||||
const recommendations: string[] = [];
|
||||
|
||||
// Generate recommendations based on metrics
|
||||
if (metrics.failedQueries > metrics.successfulQueries * 0.1) {
|
||||
recommendations.push('High error rate detected - check query patterns');
|
||||
}
|
||||
|
||||
if (metrics.averageResponseTime > 1000) {
|
||||
recommendations.push('High response times - consider query optimization');
|
||||
}
|
||||
|
||||
if (metrics.memoryUsage > 100 * 1024 * 1024) { // 100MB
|
||||
recommendations.push('High memory usage - monitor for memory leaks');
|
||||
}
|
||||
|
||||
return {
|
||||
status,
|
||||
metrics,
|
||||
recommendations
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup resources
|
||||
*/
|
||||
public destroy(): void {
|
||||
this.stopMonitoring();
|
||||
this.logger.info('Health monitor destroyed');
|
||||
}
|
||||
}
|
||||
32
libs/questdb-client/src/index.ts
Normal file
32
libs/questdb-client/src/index.ts
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
/**
|
||||
* QuestDB Client Library for Stock Bot
|
||||
*
|
||||
* Provides high-performance time-series data access with support for
|
||||
* InfluxDB Line Protocol, SQL queries, and PostgreSQL wire protocol.
|
||||
*/
|
||||
|
||||
export { QuestDBClient } from './client';
|
||||
export { QuestDBHealthMonitor } from './health';
|
||||
export { QuestDBQueryBuilder } from './query-builder';
|
||||
export { QuestDBInfluxWriter } from './influx-writer';
|
||||
export { QuestDBSchemaManager } from './schema';
|
||||
|
||||
// Types
|
||||
export type {
|
||||
QuestDBClientConfig,
|
||||
QuestDBConnectionOptions,
|
||||
QuestDBHealthStatus,
|
||||
QuestDBMetrics,
|
||||
TableNames,
|
||||
OHLCVData,
|
||||
TradeData,
|
||||
QuoteData,
|
||||
IndicatorData,
|
||||
PerformanceData,
|
||||
RiskMetrics,
|
||||
QueryResult,
|
||||
InsertResult
|
||||
} from './types';
|
||||
|
||||
// Utils
|
||||
export { createQuestDBClient, getQuestDBClient } from './factory';
|
||||
436
libs/questdb-client/src/influx-writer.ts
Normal file
436
libs/questdb-client/src/influx-writer.ts
Normal file
|
|
@ -0,0 +1,436 @@
|
|||
import { Logger } from '@stock-bot/logger';
|
||||
import type {
|
||||
InfluxLineData,
|
||||
InfluxWriteOptions,
|
||||
BaseTimeSeriesData
|
||||
} from './types';
|
||||
|
||||
// Interface to avoid circular dependency
|
||||
interface QuestDBClientInterface {
|
||||
getHttpUrl(): string;
|
||||
}
|
||||
|
||||
/**
|
||||
* QuestDB InfluxDB Line Protocol Writer
|
||||
*
|
||||
* Provides high-performance data ingestion using InfluxDB Line Protocol
|
||||
* which QuestDB supports natively for optimal time-series data insertion.
|
||||
*/
|
||||
export class QuestDBInfluxWriter {
|
||||
private readonly logger: Logger;
|
||||
private writeBuffer: string[] = [];
|
||||
private flushTimer: NodeJS.Timeout | null = null;
|
||||
private readonly defaultOptions: Required<InfluxWriteOptions> = {
|
||||
batchSize: 1000,
|
||||
flushInterval: 5000,
|
||||
autoFlush: true,
|
||||
precision: 'ms',
|
||||
retryAttempts: 3,
|
||||
retryDelay: 1000
|
||||
};
|
||||
constructor(private readonly client: QuestDBClientInterface) {
|
||||
this.logger = new Logger('QuestDBInfluxWriter');
|
||||
}
|
||||
|
||||
/**
|
||||
* Write single data point using InfluxDB Line Protocol
|
||||
*/
|
||||
public async writePoint(
|
||||
measurement: string,
|
||||
tags: Record<string, string>,
|
||||
fields: Record<string, number | string | boolean>,
|
||||
timestamp?: Date,
|
||||
options?: Partial<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
const line = this.buildLineProtocol(measurement, tags, fields, timestamp);
|
||||
const opts = { ...this.defaultOptions, ...options };
|
||||
|
||||
if (opts.autoFlush && this.writeBuffer.length === 0) {
|
||||
// Single point write - send immediately
|
||||
await this.sendLines([line], opts);
|
||||
} else {
|
||||
// Add to buffer
|
||||
this.writeBuffer.push(line);
|
||||
|
||||
if (opts.autoFlush) {
|
||||
this.scheduleFlush(opts);
|
||||
}
|
||||
|
||||
// Flush if buffer is full
|
||||
if (this.writeBuffer.length >= opts.batchSize) {
|
||||
await this.flush(opts);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write multiple data points
|
||||
*/
|
||||
public async writePoints(
|
||||
data: InfluxLineData[],
|
||||
options?: Partial<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
const opts = { ...this.defaultOptions, ...options };
|
||||
const lines = data.map(point =>
|
||||
this.buildLineProtocol(point.measurement, point.tags, point.fields, point.timestamp)
|
||||
);
|
||||
|
||||
if (opts.autoFlush) {
|
||||
// Send immediately for batch writes
|
||||
await this.sendLines(lines, opts);
|
||||
} else {
|
||||
// Add to buffer
|
||||
this.writeBuffer.push(...lines);
|
||||
|
||||
// Flush if buffer exceeds batch size
|
||||
while (this.writeBuffer.length >= opts.batchSize) {
|
||||
const batch = this.writeBuffer.splice(0, opts.batchSize);
|
||||
await this.sendLines(batch, opts);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write OHLCV data optimized for QuestDB
|
||||
*/
|
||||
public async writeOHLCV(
|
||||
symbol: string,
|
||||
exchange: string,
|
||||
data: {
|
||||
timestamp: Date;
|
||||
open: number;
|
||||
high: number;
|
||||
low: number;
|
||||
close: number;
|
||||
volume: number;
|
||||
}[],
|
||||
options?: Partial<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
const influxData: InfluxLineData[] = data.map(candle => ({
|
||||
measurement: 'ohlcv_data',
|
||||
tags: {
|
||||
symbol,
|
||||
exchange,
|
||||
data_source: 'market_feed'
|
||||
},
|
||||
fields: {
|
||||
open: candle.open,
|
||||
high: candle.high,
|
||||
low: candle.low,
|
||||
close: candle.close,
|
||||
volume: candle.volume
|
||||
},
|
||||
timestamp: candle.timestamp
|
||||
}));
|
||||
|
||||
await this.writePoints(influxData, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write market analytics data
|
||||
*/
|
||||
public async writeMarketAnalytics(
|
||||
symbol: string,
|
||||
exchange: string,
|
||||
analytics: {
|
||||
timestamp: Date;
|
||||
rsi?: number;
|
||||
macd?: number;
|
||||
signal?: number;
|
||||
histogram?: number;
|
||||
bollinger_upper?: number;
|
||||
bollinger_lower?: number;
|
||||
volume_sma?: number;
|
||||
},
|
||||
options?: Partial<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
const fields: Record<string, number> = {};
|
||||
|
||||
// Only include defined values
|
||||
Object.entries(analytics).forEach(([key, value]) => {
|
||||
if (key !== 'timestamp' && value !== undefined && value !== null) {
|
||||
fields[key] = value as number;
|
||||
}
|
||||
});
|
||||
|
||||
if (Object.keys(fields).length === 0) {
|
||||
this.logger.warn('No analytics fields to write', { symbol, timestamp: analytics.timestamp });
|
||||
return;
|
||||
}
|
||||
|
||||
await this.writePoint(
|
||||
'market_analytics',
|
||||
{ symbol, exchange },
|
||||
fields,
|
||||
analytics.timestamp,
|
||||
options
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write trade execution data
|
||||
*/
|
||||
public async writeTradeExecution(
|
||||
execution: {
|
||||
symbol: string;
|
||||
side: 'buy' | 'sell';
|
||||
quantity: number;
|
||||
price: number;
|
||||
timestamp: Date;
|
||||
executionTime: number;
|
||||
orderId?: string;
|
||||
strategy?: string;
|
||||
},
|
||||
options?: Partial<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
const tags: Record<string, string> = {
|
||||
symbol: execution.symbol,
|
||||
side: execution.side
|
||||
};
|
||||
|
||||
if (execution.orderId) {
|
||||
tags.order_id = execution.orderId;
|
||||
}
|
||||
|
||||
if (execution.strategy) {
|
||||
tags.strategy = execution.strategy;
|
||||
}
|
||||
|
||||
await this.writePoint(
|
||||
'trade_executions',
|
||||
tags,
|
||||
{
|
||||
quantity: execution.quantity,
|
||||
price: execution.price,
|
||||
execution_time: execution.executionTime
|
||||
},
|
||||
execution.timestamp,
|
||||
options
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write performance metrics
|
||||
*/
|
||||
public async writePerformanceMetrics(
|
||||
metrics: {
|
||||
timestamp: Date;
|
||||
operation: string;
|
||||
responseTime: number;
|
||||
success: boolean;
|
||||
errorCode?: string;
|
||||
},
|
||||
options?: Partial<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
const tags: Record<string, string> = {
|
||||
operation: metrics.operation,
|
||||
success: metrics.success.toString()
|
||||
};
|
||||
|
||||
if (metrics.errorCode) {
|
||||
tags.error_code = metrics.errorCode;
|
||||
}
|
||||
|
||||
await this.writePoint(
|
||||
'performance_metrics',
|
||||
tags,
|
||||
{ response_time: metrics.responseTime },
|
||||
metrics.timestamp,
|
||||
options
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Manually flush the write buffer
|
||||
*/
|
||||
public async flush(options?: Partial<InfluxWriteOptions>): Promise<void> {
|
||||
if (this.writeBuffer.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const opts = { ...this.defaultOptions, ...options };
|
||||
const lines = this.writeBuffer.splice(0); // Clear buffer
|
||||
|
||||
if (this.flushTimer) {
|
||||
clearTimeout(this.flushTimer);
|
||||
this.flushTimer = null;
|
||||
}
|
||||
|
||||
await this.sendLines(lines, opts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current buffer size
|
||||
*/
|
||||
public getBufferSize(): number {
|
||||
return this.writeBuffer.length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the buffer without writing
|
||||
*/
|
||||
public clearBuffer(): void {
|
||||
this.writeBuffer.length = 0;
|
||||
if (this.flushTimer) {
|
||||
clearTimeout(this.flushTimer);
|
||||
this.flushTimer = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build InfluxDB Line Protocol string
|
||||
*/
|
||||
private buildLineProtocol(
|
||||
measurement: string,
|
||||
tags: Record<string, string>,
|
||||
fields: Record<string, number | string | boolean>,
|
||||
timestamp?: Date
|
||||
): string {
|
||||
// Escape special characters in measurement name
|
||||
const escapedMeasurement = measurement.replace(/[, =]/g, '\\$&');
|
||||
|
||||
// Build tags string
|
||||
const tagString = Object.entries(tags)
|
||||
.filter(([_, value]) => value !== undefined && value !== null)
|
||||
.map(([key, value]) => `${this.escapeTagKey(key)}=${this.escapeTagValue(value)}`)
|
||||
.join(',');
|
||||
|
||||
// Build fields string
|
||||
const fieldString = Object.entries(fields)
|
||||
.filter(([_, value]) => value !== undefined && value !== null)
|
||||
.map(([key, value]) => `${this.escapeFieldKey(key)}=${this.formatFieldValue(value)}`)
|
||||
.join(',');
|
||||
|
||||
// Build timestamp
|
||||
const timestampString = timestamp ?
|
||||
Math.floor(timestamp.getTime() * 1000000).toString() : // Convert to nanoseconds
|
||||
'';
|
||||
|
||||
// Combine parts
|
||||
let line = escapedMeasurement;
|
||||
if (tagString) {
|
||||
line += `,${tagString}`;
|
||||
}
|
||||
line += ` ${fieldString}`;
|
||||
if (timestampString) {
|
||||
line += ` ${timestampString}`;
|
||||
}
|
||||
|
||||
return line;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send lines to QuestDB via HTTP endpoint
|
||||
*/
|
||||
private async sendLines(
|
||||
lines: string[],
|
||||
options: Required<InfluxWriteOptions>
|
||||
): Promise<void> {
|
||||
if (lines.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const payload = lines.join('\n');
|
||||
let attempt = 0;
|
||||
|
||||
while (attempt <= options.retryAttempts) {
|
||||
try {
|
||||
// QuestDB InfluxDB Line Protocol endpoint
|
||||
const response = await fetch(`${this.client.getHttpUrl()}/write`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'text/plain',
|
||||
},
|
||||
body: payload
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
|
||||
}
|
||||
|
||||
this.logger.debug(`Successfully wrote ${lines.length} lines to QuestDB`);
|
||||
return;
|
||||
|
||||
} catch (error) {
|
||||
attempt++;
|
||||
this.logger.error(`Write attempt ${attempt} failed`, {
|
||||
error,
|
||||
linesCount: lines.length,
|
||||
willRetry: attempt <= options.retryAttempts
|
||||
});
|
||||
|
||||
if (attempt <= options.retryAttempts) {
|
||||
await this.sleep(options.retryDelay * attempt); // Exponential backoff
|
||||
} else {
|
||||
throw new Error(`Failed to write to QuestDB after ${options.retryAttempts} attempts: ${error}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule automatic flush
|
||||
*/
|
||||
private scheduleFlush(options: Required<InfluxWriteOptions>): void {
|
||||
if (this.flushTimer || !options.autoFlush) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.flushTimer = setTimeout(async () => {
|
||||
try {
|
||||
await this.flush(options);
|
||||
} catch (error) {
|
||||
this.logger.error('Scheduled flush failed', { error });
|
||||
}
|
||||
}, options.flushInterval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Format field value for InfluxDB Line Protocol
|
||||
*/
|
||||
private formatFieldValue(value: number | string | boolean): string {
|
||||
if (typeof value === 'string') {
|
||||
return `"${value.replace(/"/g, '\\"')}"`;
|
||||
} else if (typeof value === 'boolean') {
|
||||
return value ? 'true' : 'false';
|
||||
} else {
|
||||
return value.toString();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape tag key
|
||||
*/
|
||||
private escapeTagKey(key: string): string {
|
||||
return key.replace(/[, =]/g, '\\$&');
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape tag value
|
||||
*/
|
||||
private escapeTagValue(value: string): string {
|
||||
return value.replace(/[, =]/g, '\\$&');
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape field key
|
||||
*/
|
||||
private escapeFieldKey(key: string): string {
|
||||
return key.replace(/[, =]/g, '\\$&');
|
||||
}
|
||||
|
||||
/**
|
||||
* Sleep utility
|
||||
*/
|
||||
private sleep(ms: number): Promise<void> {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup resources
|
||||
*/
|
||||
public destroy(): void {
|
||||
this.clearBuffer();
|
||||
this.logger.info('InfluxDB writer destroyed');
|
||||
}
|
||||
}
|
||||
368
libs/questdb-client/src/query-builder.ts
Normal file
368
libs/questdb-client/src/query-builder.ts
Normal file
|
|
@ -0,0 +1,368 @@
|
|||
import { Logger } from '@stock-bot/logger';
|
||||
import type {
|
||||
QueryResult,
|
||||
TimeSeriesQuery,
|
||||
AggregationQuery,
|
||||
TimeRange,
|
||||
TableNames
|
||||
} from './types';
|
||||
|
||||
// Interface to avoid circular dependency
|
||||
interface QuestDBClientInterface {
|
||||
query<T = any>(sql: string, params?: any[]): Promise<QueryResult<T>>;
|
||||
}
|
||||
|
||||
/**
|
||||
* QuestDB Query Builder
|
||||
*
|
||||
* Provides a fluent interface for building optimized time-series queries
|
||||
* with support for QuestDB-specific functions and optimizations.
|
||||
*/
|
||||
export class QuestDBQueryBuilder {
|
||||
private readonly logger: Logger;
|
||||
private query!: {
|
||||
select: string[];
|
||||
from: string;
|
||||
where: string[];
|
||||
groupBy: string[];
|
||||
orderBy: string[];
|
||||
limit?: number;
|
||||
sampleBy?: string;
|
||||
latestBy?: string[];
|
||||
timeRange?: TimeRange;
|
||||
};
|
||||
constructor(private readonly client: QuestDBClientInterface) {
|
||||
this.logger = new Logger('QuestDBQueryBuilder');
|
||||
this.reset();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the query builder
|
||||
*/
|
||||
private reset(): QuestDBQueryBuilder {
|
||||
this.query = {
|
||||
select: [],
|
||||
from: '',
|
||||
where: [],
|
||||
groupBy: [],
|
||||
orderBy: [],
|
||||
sampleBy: undefined,
|
||||
latestBy: undefined,
|
||||
timeRange: undefined
|
||||
};
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Start a new query
|
||||
*/
|
||||
public static create(client: QuestDBClientInterface): QuestDBQueryBuilder {
|
||||
return new QuestDBQueryBuilder(client);
|
||||
}
|
||||
|
||||
/**
|
||||
* Select columns
|
||||
*/
|
||||
public select(...columns: string[]): QuestDBQueryBuilder {
|
||||
this.query.select.push(...columns);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Select with aggregation functions
|
||||
*/
|
||||
public selectAgg(aggregations: Record<string, string>): QuestDBQueryBuilder {
|
||||
Object.entries(aggregations).forEach(([alias, expression]) => {
|
||||
this.query.select.push(`${expression} as ${alias}`);
|
||||
});
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* From table
|
||||
*/
|
||||
public from(table: TableNames | string): QuestDBQueryBuilder {
|
||||
this.query.from = table;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Where condition
|
||||
*/
|
||||
public where(condition: string): QuestDBQueryBuilder {
|
||||
this.query.where.push(condition);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Where symbol equals
|
||||
*/
|
||||
public whereSymbol(symbol: string): QuestDBQueryBuilder {
|
||||
this.query.where.push(`symbol = '${symbol}'`);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Where symbols in list
|
||||
*/
|
||||
public whereSymbolIn(symbols: string[]): QuestDBQueryBuilder {
|
||||
const symbolList = symbols.map(s => `'${s}'`).join(', ');
|
||||
this.query.where.push(`symbol IN (${symbolList})`);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Where exchange equals
|
||||
*/
|
||||
public whereExchange(exchange: string): QuestDBQueryBuilder {
|
||||
this.query.where.push(`exchange = '${exchange}'`);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Time range filter
|
||||
*/
|
||||
public whereTimeRange(startTime: Date, endTime: Date): QuestDBQueryBuilder {
|
||||
this.query.timeRange = { startTime, endTime };
|
||||
this.query.where.push(
|
||||
`timestamp >= '${startTime.toISOString()}' AND timestamp <= '${endTime.toISOString()}'`
|
||||
);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Last N hours
|
||||
*/
|
||||
public whereLastHours(hours: number): QuestDBQueryBuilder {
|
||||
this.query.where.push(`timestamp > dateadd('h', -${hours}, now())`);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Last N days
|
||||
*/
|
||||
public whereLastDays(days: number): QuestDBQueryBuilder {
|
||||
this.query.where.push(`timestamp > dateadd('d', -${days}, now())`);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Group by columns
|
||||
*/
|
||||
public groupBy(...columns: string[]): QuestDBQueryBuilder {
|
||||
this.query.groupBy.push(...columns);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Order by column
|
||||
*/
|
||||
public orderBy(column: string, direction: 'ASC' | 'DESC' = 'ASC'): QuestDBQueryBuilder {
|
||||
this.query.orderBy.push(`${column} ${direction}`);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Order by timestamp descending (most recent first)
|
||||
*/
|
||||
public orderByTimeDesc(): QuestDBQueryBuilder {
|
||||
this.query.orderBy.push('timestamp DESC');
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Limit results
|
||||
*/
|
||||
public limit(count: number): QuestDBQueryBuilder {
|
||||
this.query.limit = count;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sample by time interval (QuestDB specific)
|
||||
*/
|
||||
public sampleBy(interval: string): QuestDBQueryBuilder {
|
||||
this.query.sampleBy = interval;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Latest by columns (QuestDB specific)
|
||||
*/
|
||||
public latestBy(...columns: string[]): QuestDBQueryBuilder {
|
||||
this.query.latestBy = columns;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build and execute the query
|
||||
*/
|
||||
public async execute<T = any>(): Promise<QueryResult<T>> {
|
||||
const sql = this.build();
|
||||
this.logger.debug('Executing query', { sql });
|
||||
|
||||
try {
|
||||
const result = await this.client.query<T>(sql);
|
||||
this.reset(); // Reset for next query
|
||||
return result;
|
||||
} catch (error) {
|
||||
this.logger.error('Query execution failed', { sql, error });
|
||||
this.reset(); // Reset even on error
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the SQL query string
|
||||
*/
|
||||
public build(): string {
|
||||
if (!this.query.from) {
|
||||
throw new Error('FROM clause is required');
|
||||
}
|
||||
|
||||
if (this.query.select.length === 0) {
|
||||
this.query.select.push('*');
|
||||
}
|
||||
|
||||
let sql = `SELECT ${this.query.select.join(', ')} FROM ${this.query.from}`;
|
||||
|
||||
// Add WHERE clause
|
||||
if (this.query.where.length > 0) {
|
||||
sql += ` WHERE ${this.query.where.join(' AND ')}`;
|
||||
}
|
||||
|
||||
// Add LATEST BY (QuestDB specific - must come before GROUP BY)
|
||||
if (this.query.latestBy && this.query.latestBy.length > 0) {
|
||||
sql += ` LATEST BY ${this.query.latestBy.join(', ')}`;
|
||||
}
|
||||
|
||||
// Add SAMPLE BY (QuestDB specific)
|
||||
if (this.query.sampleBy) {
|
||||
sql += ` SAMPLE BY ${this.query.sampleBy}`;
|
||||
}
|
||||
|
||||
// Add GROUP BY
|
||||
if (this.query.groupBy.length > 0) {
|
||||
sql += ` GROUP BY ${this.query.groupBy.join(', ')}`;
|
||||
}
|
||||
|
||||
// Add ORDER BY
|
||||
if (this.query.orderBy.length > 0) {
|
||||
sql += ` ORDER BY ${this.query.orderBy.join(', ')}`;
|
||||
}
|
||||
|
||||
// Add LIMIT
|
||||
if (this.query.limit) {
|
||||
sql += ` LIMIT ${this.query.limit}`;
|
||||
}
|
||||
|
||||
return sql;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the built query without executing
|
||||
*/
|
||||
public toSQL(): string {
|
||||
return this.build();
|
||||
}
|
||||
|
||||
// Predefined query methods for common use cases
|
||||
/**
|
||||
* Get latest OHLCV data for symbols
|
||||
*/
|
||||
public static latestOHLCV(
|
||||
client: QuestDBClientInterface,
|
||||
symbols: string[],
|
||||
exchange?: string
|
||||
): QuestDBQueryBuilder {
|
||||
const builder = QuestDBQueryBuilder.create(client)
|
||||
.select('symbol', 'timestamp', 'open', 'high', 'low', 'close', 'volume')
|
||||
.from('ohlcv_data')
|
||||
.whereSymbolIn(symbols)
|
||||
.latestBy('symbol')
|
||||
.orderByTimeDesc();
|
||||
|
||||
if (exchange) {
|
||||
builder.whereExchange(exchange);
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
/**
|
||||
* Get OHLCV data with time sampling
|
||||
*/
|
||||
public static ohlcvTimeSeries(
|
||||
client: QuestDBClientInterface,
|
||||
symbol: string,
|
||||
interval: string,
|
||||
hours: number = 24
|
||||
): QuestDBQueryBuilder {
|
||||
return QuestDBQueryBuilder.create(client)
|
||||
.selectAgg({
|
||||
'first_open': 'first(open)',
|
||||
'max_high': 'max(high)',
|
||||
'min_low': 'min(low)',
|
||||
'last_close': 'last(close)',
|
||||
'sum_volume': 'sum(volume)'
|
||||
})
|
||||
.from('ohlcv_data')
|
||||
.whereSymbol(symbol)
|
||||
.whereLastHours(hours)
|
||||
.sampleBy(interval)
|
||||
.orderByTimeDesc();
|
||||
}
|
||||
/**
|
||||
* Get market analytics data
|
||||
*/
|
||||
public static marketAnalytics(
|
||||
client: QuestDBClientInterface,
|
||||
symbols: string[],
|
||||
hours: number = 1
|
||||
): QuestDBQueryBuilder {
|
||||
return QuestDBQueryBuilder.create(client)
|
||||
.select('symbol', 'timestamp', 'rsi', 'macd', 'bollinger_upper', 'bollinger_lower', 'volume_sma')
|
||||
.from('market_analytics')
|
||||
.whereSymbolIn(symbols)
|
||||
.whereLastHours(hours)
|
||||
.orderBy('symbol')
|
||||
.orderByTimeDesc();
|
||||
}
|
||||
/**
|
||||
* Get performance metrics for a time range
|
||||
*/
|
||||
public static performanceMetrics(
|
||||
client: QuestDBClientInterface,
|
||||
startTime: Date,
|
||||
endTime: Date
|
||||
): QuestDBQueryBuilder {
|
||||
return QuestDBQueryBuilder.create(client)
|
||||
.selectAgg({
|
||||
'total_trades': 'count(*)',
|
||||
'avg_response_time': 'avg(response_time)',
|
||||
'max_response_time': 'max(response_time)',
|
||||
'error_rate': 'sum(case when success = false then 1 else 0 end) * 100.0 / count(*)'
|
||||
})
|
||||
.from('performance_metrics')
|
||||
.whereTimeRange(startTime, endTime)
|
||||
.sampleBy('1m');
|
||||
}
|
||||
/**
|
||||
* Get trade execution data
|
||||
*/
|
||||
public static tradeExecutions(
|
||||
client: QuestDBClientInterface,
|
||||
symbol?: string,
|
||||
hours: number = 24
|
||||
): QuestDBQueryBuilder {
|
||||
const builder = QuestDBQueryBuilder.create(client)
|
||||
.select('symbol', 'timestamp', 'side', 'quantity', 'price', 'execution_time')
|
||||
.from('trade_executions')
|
||||
.whereLastHours(hours)
|
||||
.orderByTimeDesc();
|
||||
|
||||
if (symbol) {
|
||||
builder.whereSymbol(symbol);
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
404
libs/questdb-client/src/schema.ts
Normal file
404
libs/questdb-client/src/schema.ts
Normal file
|
|
@ -0,0 +1,404 @@
|
|||
import { Logger } from '@stock-bot/logger';
|
||||
import type { TableSchema, IndexDefinition, TableNames, QueryResult } from './types';
|
||||
|
||||
// Interface to avoid circular dependency
|
||||
interface QuestDBClientInterface {
|
||||
query<T = any>(sql: string, params?: any[]): Promise<QueryResult<T>>;
|
||||
}
|
||||
|
||||
/**
|
||||
* QuestDB Schema Manager
|
||||
*
|
||||
* Manages database schemas, table creation, and optimization
|
||||
* for time-series data storage in QuestDB.
|
||||
*/
|
||||
export class QuestDBSchemaManager {
|
||||
private readonly logger: Logger;
|
||||
private readonly schemas: Map<string, TableSchema> = new Map();
|
||||
constructor(private readonly client: QuestDBClientInterface) {
|
||||
this.logger = new Logger('QuestDBSchemaManager');
|
||||
this.initializeSchemas();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize predefined schemas
|
||||
*/
|
||||
private initializeSchemas(): void {
|
||||
// OHLCV Data Table
|
||||
this.schemas.set('ohlcv_data', {
|
||||
tableName: 'ohlcv_data',
|
||||
columns: [
|
||||
{ name: 'symbol', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'exchange', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'timestamp', type: 'TIMESTAMP', nullable: false, designated: true },
|
||||
{ name: 'open', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'high', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'low', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'close', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'volume', type: 'LONG', nullable: false },
|
||||
{ name: 'data_source', type: 'SYMBOL', nullable: true }
|
||||
],
|
||||
partitionBy: 'DAY',
|
||||
orderBy: ['symbol', 'timestamp'],
|
||||
indices: [
|
||||
{ columns: ['symbol'], type: 'HASH' },
|
||||
{ columns: ['exchange'], type: 'HASH' }
|
||||
]
|
||||
});
|
||||
|
||||
// Market Analytics Table
|
||||
this.schemas.set('market_analytics', {
|
||||
tableName: 'market_analytics',
|
||||
columns: [
|
||||
{ name: 'symbol', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'exchange', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'timestamp', type: 'TIMESTAMP', nullable: false, designated: true },
|
||||
{ name: 'rsi', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'macd', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'signal', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'histogram', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'bollinger_upper', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'bollinger_lower', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'volume_sma', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'timeframe', type: 'SYMBOL', nullable: true }
|
||||
],
|
||||
partitionBy: 'DAY',
|
||||
orderBy: ['symbol', 'timestamp'],
|
||||
indices: [
|
||||
{ columns: ['symbol'], type: 'HASH' },
|
||||
{ columns: ['timeframe'], type: 'HASH' }
|
||||
]
|
||||
});
|
||||
|
||||
// Trade Executions Table
|
||||
this.schemas.set('trade_executions', {
|
||||
tableName: 'trade_executions',
|
||||
columns: [
|
||||
{ name: 'symbol', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'timestamp', type: 'TIMESTAMP', nullable: false, designated: true },
|
||||
{ name: 'side', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'quantity', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'price', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'execution_time', type: 'LONG', nullable: false },
|
||||
{ name: 'order_id', type: 'SYMBOL', nullable: true },
|
||||
{ name: 'strategy', type: 'SYMBOL', nullable: true },
|
||||
{ name: 'commission', type: 'DOUBLE', nullable: true }
|
||||
],
|
||||
partitionBy: 'DAY',
|
||||
orderBy: ['symbol', 'timestamp'],
|
||||
indices: [
|
||||
{ columns: ['symbol'], type: 'HASH' },
|
||||
{ columns: ['order_id'], type: 'HASH' },
|
||||
{ columns: ['strategy'], type: 'HASH' }
|
||||
]
|
||||
});
|
||||
|
||||
// Performance Metrics Table
|
||||
this.schemas.set('performance_metrics', {
|
||||
tableName: 'performance_metrics',
|
||||
columns: [
|
||||
{ name: 'timestamp', type: 'TIMESTAMP', nullable: false, designated: true },
|
||||
{ name: 'operation', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'response_time', type: 'LONG', nullable: false },
|
||||
{ name: 'success', type: 'BOOLEAN', nullable: false },
|
||||
{ name: 'error_code', type: 'SYMBOL', nullable: true },
|
||||
{ name: 'component', type: 'SYMBOL', nullable: true }
|
||||
],
|
||||
partitionBy: 'HOUR',
|
||||
orderBy: ['operation', 'timestamp'],
|
||||
indices: [
|
||||
{ columns: ['operation'], type: 'HASH' },
|
||||
{ columns: ['success'], type: 'HASH' }
|
||||
]
|
||||
});
|
||||
|
||||
// Portfolio Positions Table
|
||||
this.schemas.set('portfolio_positions', {
|
||||
tableName: 'portfolio_positions',
|
||||
columns: [
|
||||
{ name: 'portfolio_id', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'symbol', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'timestamp', type: 'TIMESTAMP', nullable: false, designated: true },
|
||||
{ name: 'quantity', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'avg_cost', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'market_value', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'unrealized_pnl', type: 'DOUBLE', nullable: false },
|
||||
{ name: 'realized_pnl', type: 'DOUBLE', nullable: false }
|
||||
],
|
||||
partitionBy: 'DAY',
|
||||
orderBy: ['portfolio_id', 'symbol', 'timestamp'],
|
||||
indices: [
|
||||
{ columns: ['portfolio_id'], type: 'HASH' },
|
||||
{ columns: ['symbol'], type: 'HASH' }
|
||||
]
|
||||
});
|
||||
|
||||
// Risk Metrics Table
|
||||
this.schemas.set('risk_metrics', {
|
||||
tableName: 'risk_metrics',
|
||||
columns: [
|
||||
{ name: 'portfolio_id', type: 'SYMBOL', nullable: false },
|
||||
{ name: 'timestamp', type: 'TIMESTAMP', nullable: false, designated: true },
|
||||
{ name: 'var_1d', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'var_5d', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'expected_shortfall', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'beta', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'sharpe_ratio', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'max_drawdown', type: 'DOUBLE', nullable: true },
|
||||
{ name: 'volatility', type: 'DOUBLE', nullable: true }
|
||||
],
|
||||
partitionBy: 'DAY',
|
||||
orderBy: ['portfolio_id', 'timestamp'],
|
||||
indices: [
|
||||
{ columns: ['portfolio_id'], type: 'HASH' }
|
||||
]
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create all tables
|
||||
*/
|
||||
public async createAllTables(): Promise<void> {
|
||||
this.logger.info('Creating all QuestDB tables');
|
||||
|
||||
for (const [tableName, schema] of this.schemas) {
|
||||
try {
|
||||
await this.createTable(schema);
|
||||
this.logger.info(`Table ${tableName} created successfully`);
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to create table ${tableName}`, { error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a single table
|
||||
*/
|
||||
public async createTable(schema: TableSchema): Promise<void> {
|
||||
const sql = this.buildCreateTableSQL(schema);
|
||||
|
||||
try {
|
||||
await this.client.query(sql);
|
||||
this.logger.info(`Table ${schema.tableName} created`, { sql });
|
||||
} catch (error) {
|
||||
// Check if table already exists
|
||||
if (error instanceof Error && error.message.includes('already exists')) {
|
||||
this.logger.info(`Table ${schema.tableName} already exists`);
|
||||
return;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Drop a table
|
||||
*/
|
||||
public async dropTable(tableName: string): Promise<void> {
|
||||
const sql = `DROP TABLE IF EXISTS ${tableName}`;
|
||||
|
||||
try {
|
||||
await this.client.query(sql);
|
||||
this.logger.info(`Table ${tableName} dropped`);
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to drop table ${tableName}`, { error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if table exists
|
||||
*/
|
||||
public async tableExists(tableName: string): Promise<boolean> {
|
||||
try {
|
||||
const result = await this.client.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM information_schema.tables
|
||||
WHERE table_name = '${tableName}'
|
||||
`);
|
||||
|
||||
return result.rows.length > 0 && result.rows[0].count > 0;
|
||||
} catch (error) {
|
||||
this.logger.error(`Error checking if table exists: ${tableName}`, { error });
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get table schema
|
||||
*/
|
||||
public getSchema(tableName: string): TableSchema | undefined {
|
||||
return this.schemas.get(tableName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add custom schema
|
||||
*/
|
||||
public addSchema(schema: TableSchema): void {
|
||||
this.schemas.set(schema.tableName, schema);
|
||||
this.logger.info(`Schema added for table: ${schema.tableName}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all schema names
|
||||
*/
|
||||
public getSchemaNames(): string[] {
|
||||
return Array.from(this.schemas.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize table (rebuild indices, etc.)
|
||||
*/
|
||||
public async optimizeTable(tableName: string): Promise<void> {
|
||||
const schema = this.schemas.get(tableName);
|
||||
if (!schema) {
|
||||
throw new Error(`Schema not found for table: ${tableName}`);
|
||||
}
|
||||
|
||||
// QuestDB automatically optimizes, but we can analyze table stats
|
||||
try {
|
||||
const stats = await this.getTableStats(tableName);
|
||||
this.logger.info(`Table ${tableName} stats`, stats);
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to optimize table ${tableName}`, { error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get table statistics
|
||||
*/
|
||||
public async getTableStats(tableName: string): Promise<any> {
|
||||
try {
|
||||
const result = await this.client.query(`
|
||||
SELECT
|
||||
COUNT(*) as row_count,
|
||||
MIN(timestamp) as min_timestamp,
|
||||
MAX(timestamp) as max_timestamp
|
||||
FROM ${tableName}
|
||||
`);
|
||||
|
||||
return result.rows[0] || {};
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to get table stats for ${tableName}`, { error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncate table (remove all data but keep structure)
|
||||
*/
|
||||
public async truncateTable(tableName: string): Promise<void> {
|
||||
try {
|
||||
await this.client.query(`TRUNCATE TABLE ${tableName}`);
|
||||
this.logger.info(`Table ${tableName} truncated`);
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to truncate table ${tableName}`, { error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create table partitions for future dates
|
||||
*/
|
||||
public async createPartitions(tableName: string, days: number = 30): Promise<void> {
|
||||
// QuestDB handles partitioning automatically based on the PARTITION BY clause
|
||||
// This method is for future extensibility
|
||||
this.logger.info(`Partitioning is automatic for table ${tableName}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build CREATE TABLE SQL statement
|
||||
*/
|
||||
private buildCreateTableSQL(schema: TableSchema): string {
|
||||
const columns = schema.columns.map(col => {
|
||||
let columnDef = `${col.name} ${col.type}`;
|
||||
|
||||
if (!col.nullable) {
|
||||
columnDef += ' NOT NULL';
|
||||
}
|
||||
|
||||
return columnDef;
|
||||
}).join(', ');
|
||||
|
||||
let sql = `CREATE TABLE IF NOT EXISTS ${schema.tableName} (${columns})`;
|
||||
|
||||
// Add designated timestamp
|
||||
const timestampColumn = schema.columns.find(col => col.designated);
|
||||
if (timestampColumn) {
|
||||
sql += ` timestamp(${timestampColumn.name})`;
|
||||
}
|
||||
|
||||
// Add partition by
|
||||
if (schema.partitionBy) {
|
||||
sql += ` PARTITION BY ${schema.partitionBy}`;
|
||||
}
|
||||
|
||||
return sql;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build index creation SQL (for future use)
|
||||
*/
|
||||
private buildCreateIndexSQL(tableName: string, index: IndexDefinition): string {
|
||||
const indexName = `idx_${tableName}_${index.columns.join('_')}`;
|
||||
const columns = index.columns.join(', ');
|
||||
|
||||
// QuestDB uses different index syntax, this is for future compatibility
|
||||
return `CREATE INDEX ${indexName} ON ${tableName} (${columns})`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate schema definition
|
||||
*/
|
||||
private validateSchema(schema: TableSchema): void {
|
||||
if (!schema.tableName) {
|
||||
throw new Error('Table name is required');
|
||||
}
|
||||
|
||||
if (!schema.columns || schema.columns.length === 0) {
|
||||
throw new Error('At least one column is required');
|
||||
}
|
||||
|
||||
const timestampColumns = schema.columns.filter(col => col.designated);
|
||||
if (timestampColumns.length > 1) {
|
||||
throw new Error('Only one designated timestamp column is allowed');
|
||||
}
|
||||
|
||||
if (timestampColumns.length === 0) {
|
||||
throw new Error('A designated timestamp column is required for time-series tables');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get table creation status
|
||||
*/
|
||||
public async getTableCreationStatus(): Promise<Record<string, boolean>> {
|
||||
const status: Record<string, boolean> = {};
|
||||
|
||||
for (const tableName of this.schemas.keys()) {
|
||||
status[tableName] = await this.tableExists(tableName);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize database schema
|
||||
*/
|
||||
public async initializeDatabase(): Promise<void> {
|
||||
this.logger.info('Initializing QuestDB schema');
|
||||
|
||||
// Validate all schemas first
|
||||
for (const schema of this.schemas.values()) {
|
||||
this.validateSchema(schema);
|
||||
}
|
||||
|
||||
// Create all tables
|
||||
await this.createAllTables();
|
||||
|
||||
// Get creation status
|
||||
const status = await this.getTableCreationStatus();
|
||||
this.logger.info('Database initialization complete', { tableStatus: status });
|
||||
}
|
||||
}
|
||||
284
libs/questdb-client/src/types.ts
Normal file
284
libs/questdb-client/src/types.ts
Normal file
|
|
@ -0,0 +1,284 @@
|
|||
/**
|
||||
* QuestDB Client Configuration and Types
|
||||
*/
|
||||
|
||||
/**
|
||||
* QuestDB Client Configuration
|
||||
*/
|
||||
export interface QuestDBClientConfig {
|
||||
host: string;
|
||||
httpPort: number;
|
||||
pgPort: number;
|
||||
influxPort: number;
|
||||
user?: string;
|
||||
password?: string;
|
||||
database?: string;
|
||||
tls?: {
|
||||
enabled: boolean;
|
||||
verifyServerCert: boolean;
|
||||
};
|
||||
timeouts?: {
|
||||
connection: number;
|
||||
request: number;
|
||||
};
|
||||
retryAttempts?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* QuestDB Connection Options
|
||||
*/
|
||||
export interface QuestDBConnectionOptions {
|
||||
protocol?: 'http' | 'pg' | 'influx';
|
||||
retryAttempts?: number;
|
||||
retryDelay?: number;
|
||||
healthCheckInterval?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Health Status Types
|
||||
*/
|
||||
export type QuestDBHealthStatus = 'healthy' | 'degraded' | 'unhealthy';
|
||||
|
||||
export interface QuestDBHealthCheck {
|
||||
status: QuestDBHealthStatus;
|
||||
timestamp: Date;
|
||||
latency: number;
|
||||
protocols: {
|
||||
http: boolean;
|
||||
pg: boolean;
|
||||
influx: boolean;
|
||||
};
|
||||
errors?: string[];
|
||||
}
|
||||
|
||||
export interface QuestDBMetrics {
|
||||
queriesPerSecond: number;
|
||||
insertsPerSecond: number;
|
||||
averageQueryTime: number;
|
||||
errorRate: number;
|
||||
dataIngestionRate: number;
|
||||
storageSize: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Table Names for Time-Series Data
|
||||
*/
|
||||
export type TableNames =
|
||||
| 'ohlcv'
|
||||
| 'trades'
|
||||
| 'quotes'
|
||||
| 'indicators'
|
||||
| 'performance'
|
||||
| 'risk_metrics'
|
||||
| 'market_events'
|
||||
| 'strategy_signals'
|
||||
| 'portfolio_snapshots';
|
||||
|
||||
/**
|
||||
* Time-Series Data Types
|
||||
*/
|
||||
export interface BaseTimeSeriesData {
|
||||
timestamp: Date;
|
||||
symbol?: string;
|
||||
}
|
||||
|
||||
export interface OHLCVData extends BaseTimeSeriesData {
|
||||
open: number;
|
||||
high: number;
|
||||
low: number;
|
||||
close: number;
|
||||
volume: number;
|
||||
timeframe: string; // '1m', '5m', '1h', '1d', etc.
|
||||
source: string;
|
||||
}
|
||||
|
||||
export interface TradeData extends BaseTimeSeriesData {
|
||||
trade_id: string;
|
||||
price: number;
|
||||
quantity: number;
|
||||
side: 'buy' | 'sell';
|
||||
exchange: string;
|
||||
conditions?: string[];
|
||||
}
|
||||
|
||||
export interface QuoteData extends BaseTimeSeriesData {
|
||||
bid_price: number;
|
||||
bid_size: number;
|
||||
ask_price: number;
|
||||
ask_size: number;
|
||||
exchange: string;
|
||||
spread: number;
|
||||
}
|
||||
|
||||
export interface IndicatorData extends BaseTimeSeriesData {
|
||||
indicator_name: string;
|
||||
value: number;
|
||||
parameters?: Record<string, any>;
|
||||
timeframe: string;
|
||||
}
|
||||
|
||||
export interface PerformanceData extends BaseTimeSeriesData {
|
||||
portfolio_id: string;
|
||||
total_value: number;
|
||||
cash_balance: number;
|
||||
unrealized_pnl: number;
|
||||
realized_pnl: number;
|
||||
daily_return: number;
|
||||
cumulative_return: number;
|
||||
}
|
||||
|
||||
export interface RiskMetrics extends BaseTimeSeriesData {
|
||||
portfolio_id?: string;
|
||||
strategy_id?: string;
|
||||
metric_name: string;
|
||||
value: number;
|
||||
threshold?: number;
|
||||
status: 'normal' | 'warning' | 'breach';
|
||||
}
|
||||
|
||||
/**
|
||||
* Query Result Types
|
||||
*/
|
||||
export interface QueryResult<T = any> {
|
||||
rows: T[];
|
||||
rowCount: number;
|
||||
executionTime: number;
|
||||
metadata?: {
|
||||
columns: Array<{
|
||||
name: string;
|
||||
type: string;
|
||||
}>;
|
||||
};
|
||||
}
|
||||
|
||||
export interface InsertResult {
|
||||
rowsInserted: number;
|
||||
executionTime: number;
|
||||
errors?: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Schema Definition Types
|
||||
*/
|
||||
export interface ColumnDefinition {
|
||||
name: string;
|
||||
type: 'SYMBOL' | 'STRING' | 'DOUBLE' | 'FLOAT' | 'LONG' | 'INT' | 'BOOLEAN' | 'TIMESTAMP' | 'DATE' | 'BINARY';
|
||||
indexed?: boolean;
|
||||
capacity?: number; // For SYMBOL type
|
||||
}
|
||||
|
||||
export interface TableDefinition {
|
||||
name: string;
|
||||
columns: ColumnDefinition[];
|
||||
partitionBy?: 'NONE' | 'DAY' | 'MONTH' | 'YEAR';
|
||||
timestamp?: string; // Column name to use as designated timestamp
|
||||
dedup?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Connection Pool Types
|
||||
*/
|
||||
export interface ConnectionPoolConfig {
|
||||
minConnections: number;
|
||||
maxConnections: number;
|
||||
idleTimeout: number;
|
||||
acquireTimeout: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Health Monitoring Types
|
||||
*/
|
||||
export interface HealthStatus {
|
||||
isHealthy: boolean;
|
||||
lastCheck: Date;
|
||||
responseTime: number;
|
||||
message: string;
|
||||
error?: Error;
|
||||
details?: {
|
||||
pgPool: boolean;
|
||||
httpEndpoint: boolean;
|
||||
uptime: number;
|
||||
};
|
||||
}
|
||||
|
||||
export interface PerformanceMetrics {
|
||||
totalQueries: number;
|
||||
successfulQueries: number;
|
||||
failedQueries: number;
|
||||
averageResponseTime: number;
|
||||
lastQueryTime: Date | null;
|
||||
connectionUptime: number;
|
||||
memoryUsage: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Query Builder Types
|
||||
*/
|
||||
export interface TimeSeriesQuery {
|
||||
table: TableNames | string;
|
||||
columns?: string[];
|
||||
timeRange?: TimeRange;
|
||||
groupBy?: string[];
|
||||
aggregations?: Record<string, string>;
|
||||
sampleBy?: string;
|
||||
latestBy?: string[];
|
||||
orderBy?: Array<{ column: string; direction: 'ASC' | 'DESC' }>;
|
||||
limit?: number;
|
||||
}
|
||||
|
||||
export interface AggregationQuery {
|
||||
aggregations: Record<string, string>;
|
||||
groupBy?: string[];
|
||||
having?: string[];
|
||||
}
|
||||
|
||||
export interface TimeRange {
|
||||
startTime: Date;
|
||||
endTime: Date;
|
||||
}
|
||||
|
||||
/**
|
||||
* InfluxDB Line Protocol Types
|
||||
*/
|
||||
export interface InfluxLineData {
|
||||
measurement: string;
|
||||
tags: Record<string, string>;
|
||||
fields: Record<string, number | string | boolean>;
|
||||
timestamp?: Date;
|
||||
}
|
||||
|
||||
export interface InfluxWriteOptions {
|
||||
batchSize?: number;
|
||||
flushInterval?: number;
|
||||
autoFlush?: boolean;
|
||||
precision?: 'ns' | 'us' | 'ms' | 's';
|
||||
retryAttempts?: number;
|
||||
retryDelay?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Schema Management Types
|
||||
*/
|
||||
export interface TableSchema {
|
||||
tableName: string;
|
||||
columns: ColumnSchema[];
|
||||
partitionBy?: 'NONE' | 'HOUR' | 'DAY' | 'MONTH' | 'YEAR';
|
||||
orderBy?: string[];
|
||||
indices?: IndexDefinition[];
|
||||
dedup?: boolean;
|
||||
}
|
||||
|
||||
export interface ColumnSchema {
|
||||
name: string;
|
||||
type: 'SYMBOL' | 'STRING' | 'DOUBLE' | 'FLOAT' | 'LONG' | 'INT' | 'BOOLEAN' | 'TIMESTAMP' | 'DATE' | 'BINARY';
|
||||
nullable?: boolean;
|
||||
designated?: boolean; // For designated timestamp column
|
||||
capacity?: number; // For SYMBOL type
|
||||
indexed?: boolean;
|
||||
}
|
||||
|
||||
export interface IndexDefinition {
|
||||
columns: string[];
|
||||
type: 'HASH' | 'BTREE';
|
||||
unique?: boolean;
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue