added initial py analytics / rust core / ts orchestrator services
This commit is contained in:
parent
680b5fd2ae
commit
c862ed496b
62 changed files with 13459 additions and 0 deletions
79
apps/stock/analytics/src/api/app.py
Normal file
79
apps/stock/analytics/src/api/app.py
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
from fastapi import FastAPI, HTTPException
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from contextlib import asynccontextmanager
|
||||
import logging
|
||||
from typing import Dict, Any
|
||||
|
||||
from .endpoints import optimization, analytics, models
|
||||
from ..analytics.performance import PerformanceAnalyzer
|
||||
from ..analytics.regime import RegimeDetector
|
||||
from ..optimization.portfolio_optimizer import PortfolioOptimizer
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Global instances
|
||||
performance_analyzer = PerformanceAnalyzer()
|
||||
regime_detector = RegimeDetector()
|
||||
portfolio_optimizer = PortfolioOptimizer()
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Startup
|
||||
logger.info("Starting Trading Analytics Service...")
|
||||
# Initialize connections, load models, etc.
|
||||
yield
|
||||
# Shutdown
|
||||
logger.info("Shutting down Trading Analytics Service...")
|
||||
|
||||
# Create FastAPI app
|
||||
app = FastAPI(
|
||||
title="Trading Analytics Service",
|
||||
description="Complex analytics, optimization, and ML inference for trading",
|
||||
version="0.1.0",
|
||||
lifespan=lifespan
|
||||
)
|
||||
|
||||
# Configure CORS
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"], # Configure appropriately for production
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# Include routers
|
||||
app.include_router(optimization.router, prefix="/optimize", tags=["optimization"])
|
||||
app.include_router(analytics.router, prefix="/analytics", tags=["analytics"])
|
||||
app.include_router(models.router, prefix="/models", tags=["models"])
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
return {
|
||||
"service": "Trading Analytics",
|
||||
"status": "operational",
|
||||
"version": "0.1.0"
|
||||
}
|
||||
|
||||
@app.get("/health")
|
||||
async def health_check():
|
||||
return {
|
||||
"status": "healthy",
|
||||
"components": {
|
||||
"performance_analyzer": "operational",
|
||||
"regime_detector": "operational",
|
||||
"portfolio_optimizer": "operational"
|
||||
}
|
||||
}
|
||||
|
||||
# Dependency injection
|
||||
def get_performance_analyzer():
|
||||
return performance_analyzer
|
||||
|
||||
def get_regime_detector():
|
||||
return regime_detector
|
||||
|
||||
def get_portfolio_optimizer():
|
||||
return portfolio_optimizer
|
||||
163
apps/stock/analytics/src/api/endpoints/analytics.py
Normal file
163
apps/stock/analytics/src/api/endpoints/analytics.py
Normal file
|
|
@ -0,0 +1,163 @@
|
|||
from fastapi import APIRouter, HTTPException, Query, Depends
|
||||
from datetime import datetime, date
|
||||
from typing import List, Optional
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
from ...analytics.performance import PerformanceAnalyzer
|
||||
from ...analytics.regime import RegimeDetector
|
||||
from ..app import get_performance_analyzer, get_regime_detector
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@router.get("/performance/{portfolio_id}")
|
||||
async def get_performance_metrics(
|
||||
portfolio_id: str,
|
||||
start_date: datetime = Query(..., description="Start date for analysis"),
|
||||
end_date: datetime = Query(..., description="End date for analysis"),
|
||||
analyzer: PerformanceAnalyzer = Depends(get_performance_analyzer)
|
||||
):
|
||||
"""
|
||||
Calculate comprehensive performance metrics for a portfolio
|
||||
"""
|
||||
try:
|
||||
# In real implementation, would fetch data from database
|
||||
# For now, using mock data
|
||||
metrics = analyzer.calculate_metrics(
|
||||
portfolio_id=portfolio_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date
|
||||
)
|
||||
|
||||
return metrics
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to calculate performance metrics: {str(e)}")
|
||||
|
||||
@router.get("/risk/{portfolio_id}")
|
||||
async def get_risk_metrics(
|
||||
portfolio_id: str,
|
||||
window: int = Query(252, description="Rolling window for risk calculations"),
|
||||
analyzer: PerformanceAnalyzer = Depends(get_performance_analyzer)
|
||||
):
|
||||
"""
|
||||
Calculate risk metrics including VaR and CVaR
|
||||
"""
|
||||
try:
|
||||
risk_metrics = analyzer.calculate_risk_metrics(
|
||||
portfolio_id=portfolio_id,
|
||||
window=window
|
||||
)
|
||||
|
||||
return risk_metrics
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to calculate risk metrics: {str(e)}")
|
||||
|
||||
@router.get("/regime")
|
||||
async def detect_market_regime(
|
||||
lookback_days: int = Query(60, description="Days to look back for regime detection"),
|
||||
detector: RegimeDetector = Depends(get_regime_detector)
|
||||
):
|
||||
"""
|
||||
Detect current market regime using various indicators
|
||||
"""
|
||||
try:
|
||||
regime = detector.detect_current_regime(lookback_days=lookback_days)
|
||||
|
||||
return {
|
||||
"regime": regime['regime'],
|
||||
"confidence": regime['confidence'],
|
||||
"indicators": regime['indicators'],
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to detect market regime: {str(e)}")
|
||||
|
||||
@router.post("/correlation")
|
||||
async def calculate_correlation_matrix(
|
||||
symbols: List[str],
|
||||
start_date: Optional[date] = None,
|
||||
end_date: Optional[date] = None,
|
||||
method: str = Query("pearson", pattern="^(pearson|spearman|kendall)$")
|
||||
):
|
||||
"""
|
||||
Calculate correlation matrix for given symbols
|
||||
"""
|
||||
try:
|
||||
# In real implementation, would fetch price data
|
||||
# For now, return mock correlation matrix
|
||||
n = len(symbols)
|
||||
|
||||
# Generate realistic correlation matrix
|
||||
np.random.seed(42)
|
||||
A = np.random.randn(n, n)
|
||||
correlation_matrix = np.dot(A, A.T)
|
||||
|
||||
# Normalize to correlation
|
||||
D = np.sqrt(np.diag(np.diag(correlation_matrix)))
|
||||
correlation_matrix = np.linalg.inv(D) @ correlation_matrix @ np.linalg.inv(D)
|
||||
|
||||
return {
|
||||
"symbols": symbols,
|
||||
"matrix": correlation_matrix.tolist(),
|
||||
"method": method
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to calculate correlation: {str(e)}")
|
||||
|
||||
@router.get("/backtest/{backtest_id}")
|
||||
async def analyze_backtest_results(
|
||||
backtest_id: str,
|
||||
analyzer: PerformanceAnalyzer = Depends(get_performance_analyzer)
|
||||
):
|
||||
"""
|
||||
Analyze results from a completed backtest
|
||||
"""
|
||||
try:
|
||||
analysis = analyzer.analyze_backtest(backtest_id)
|
||||
|
||||
return {
|
||||
"backtest_id": backtest_id,
|
||||
"metrics": analysis['metrics'],
|
||||
"statistics": analysis['statistics'],
|
||||
"risk_analysis": analysis['risk_analysis'],
|
||||
"trade_analysis": analysis['trade_analysis']
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to analyze backtest: {str(e)}")
|
||||
|
||||
@router.post("/attribution")
|
||||
async def performance_attribution(
|
||||
portfolio_id: str,
|
||||
benchmark: str,
|
||||
start_date: date,
|
||||
end_date: date,
|
||||
method: str = Query("brinson", pattern="^(brinson|factor|risk)$")
|
||||
):
|
||||
"""
|
||||
Perform performance attribution analysis
|
||||
"""
|
||||
try:
|
||||
# Placeholder for attribution analysis
|
||||
return {
|
||||
"portfolio_id": portfolio_id,
|
||||
"benchmark": benchmark,
|
||||
"period": {
|
||||
"start": start_date.isoformat(),
|
||||
"end": end_date.isoformat()
|
||||
},
|
||||
"method": method,
|
||||
"attribution": {
|
||||
"allocation_effect": 0.0023,
|
||||
"selection_effect": 0.0045,
|
||||
"interaction_effect": 0.0001,
|
||||
"total_effect": 0.0069
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to perform attribution: {str(e)}")
|
||||
182
apps/stock/analytics/src/api/endpoints/models.py
Normal file
182
apps/stock/analytics/src/api/endpoints/models.py
Normal file
|
|
@ -0,0 +1,182 @@
|
|||
from fastapi import APIRouter, HTTPException, UploadFile, File
|
||||
from pydantic import BaseModel
|
||||
from typing import Dict, Any, List, Optional
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
import json
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
# In-memory model storage (in production, use proper model registry)
|
||||
loaded_models = {}
|
||||
|
||||
class PredictionRequest(BaseModel):
|
||||
model_id: str
|
||||
features: Dict[str, float]
|
||||
|
||||
class PredictionResponse(BaseModel):
|
||||
model_id: str
|
||||
prediction: float
|
||||
probability: Optional[Dict[str, float]] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
|
||||
class ModelInfo(BaseModel):
|
||||
model_id: str
|
||||
name: str
|
||||
version: str
|
||||
type: str
|
||||
input_features: List[str]
|
||||
output_shape: List[int]
|
||||
metadata: Dict[str, Any]
|
||||
|
||||
@router.post("/predict", response_model=PredictionResponse)
|
||||
async def predict(request: PredictionRequest):
|
||||
"""
|
||||
Run inference on a loaded model
|
||||
"""
|
||||
try:
|
||||
if request.model_id not in loaded_models:
|
||||
raise HTTPException(status_code=404, detail=f"Model {request.model_id} not found")
|
||||
|
||||
model_info = loaded_models[request.model_id]
|
||||
session = model_info['session']
|
||||
|
||||
# Prepare input
|
||||
input_features = model_info['input_features']
|
||||
input_array = np.array([[request.features.get(f, 0.0) for f in input_features]], dtype=np.float32)
|
||||
|
||||
# Run inference
|
||||
input_name = session.get_inputs()[0].name
|
||||
output = session.run(None, {input_name: input_array})
|
||||
|
||||
# Process output
|
||||
prediction = float(output[0][0])
|
||||
|
||||
# For classification models, get probabilities
|
||||
probability = None
|
||||
if model_info['type'] == 'classification' and len(output[0][0]) > 1:
|
||||
probability = {
|
||||
f"class_{i}": float(p)
|
||||
for i, p in enumerate(output[0][0])
|
||||
}
|
||||
|
||||
return PredictionResponse(
|
||||
model_id=request.model_id,
|
||||
prediction=prediction,
|
||||
probability=probability,
|
||||
metadata={
|
||||
"model_version": model_info['version'],
|
||||
"timestamp": np.datetime64('now').tolist()
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Prediction failed: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Prediction failed: {str(e)}")
|
||||
|
||||
@router.post("/load")
|
||||
async def load_model(
|
||||
model_id: str,
|
||||
model_file: UploadFile = File(...),
|
||||
metadata: str = None
|
||||
):
|
||||
"""
|
||||
Load an ONNX model for inference
|
||||
"""
|
||||
try:
|
||||
# Read model file
|
||||
content = await model_file.read()
|
||||
|
||||
# Create ONNX session
|
||||
session = ort.InferenceSession(content)
|
||||
|
||||
# Parse metadata
|
||||
model_metadata = json.loads(metadata) if metadata else {}
|
||||
|
||||
# Extract model info
|
||||
input_features = [inp.name for inp in session.get_inputs()]
|
||||
output_shape = [out.shape for out in session.get_outputs()]
|
||||
|
||||
# Store model
|
||||
loaded_models[model_id] = {
|
||||
'session': session,
|
||||
'input_features': model_metadata.get('feature_names', input_features),
|
||||
'type': model_metadata.get('model_type', 'regression'),
|
||||
'version': model_metadata.get('version', '1.0'),
|
||||
'metadata': model_metadata
|
||||
}
|
||||
|
||||
return {
|
||||
"message": f"Model {model_id} loaded successfully",
|
||||
"input_features": input_features,
|
||||
"output_shape": output_shape
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load model: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Failed to load model: {str(e)}")
|
||||
|
||||
@router.get("/list", response_model=List[ModelInfo])
|
||||
async def list_models():
|
||||
"""
|
||||
List all loaded models
|
||||
"""
|
||||
models = []
|
||||
|
||||
for model_id, info in loaded_models.items():
|
||||
session = info['session']
|
||||
models.append(ModelInfo(
|
||||
model_id=model_id,
|
||||
name=info['metadata'].get('name', model_id),
|
||||
version=info['version'],
|
||||
type=info['type'],
|
||||
input_features=info['input_features'],
|
||||
output_shape=[out.shape for out in session.get_outputs()],
|
||||
metadata=info['metadata']
|
||||
))
|
||||
|
||||
return models
|
||||
|
||||
@router.delete("/{model_id}")
|
||||
async def unload_model(model_id: str):
|
||||
"""
|
||||
Unload a model from memory
|
||||
"""
|
||||
if model_id not in loaded_models:
|
||||
raise HTTPException(status_code=404, detail=f"Model {model_id} not found")
|
||||
|
||||
del loaded_models[model_id]
|
||||
|
||||
return {"message": f"Model {model_id} unloaded successfully"}
|
||||
|
||||
@router.post("/batch_predict")
|
||||
async def batch_predict(
|
||||
model_id: str,
|
||||
features: List[Dict[str, float]]
|
||||
):
|
||||
"""
|
||||
Run batch predictions
|
||||
"""
|
||||
try:
|
||||
if model_id not in loaded_models:
|
||||
raise HTTPException(status_code=404, detail=f"Model {model_id} not found")
|
||||
|
||||
predictions = []
|
||||
|
||||
for feature_set in features:
|
||||
request = PredictionRequest(model_id=model_id, features=feature_set)
|
||||
result = await predict(request)
|
||||
predictions.append(result.dict())
|
||||
|
||||
return {
|
||||
"model_id": model_id,
|
||||
"predictions": predictions,
|
||||
"count": len(predictions)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Batch prediction failed: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Batch prediction failed: {str(e)}")
|
||||
120
apps/stock/analytics/src/api/endpoints/optimization.py
Normal file
120
apps/stock/analytics/src/api/endpoints/optimization.py
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
from fastapi import APIRouter, HTTPException, Depends
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List, Optional, Dict
|
||||
import numpy as np
|
||||
|
||||
from ...optimization.portfolio_optimizer import PortfolioOptimizer
|
||||
from ..app import get_portfolio_optimizer
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
class OptimizationConstraints(BaseModel):
|
||||
min_weight: Optional[float] = Field(0.0, ge=0.0, le=1.0)
|
||||
max_weight: Optional[float] = Field(1.0, ge=0.0, le=1.0)
|
||||
target_return: Optional[float] = None
|
||||
max_risk: Optional[float] = None
|
||||
|
||||
class PortfolioOptimizationRequest(BaseModel):
|
||||
symbols: List[str]
|
||||
returns: List[List[float]]
|
||||
constraints: Optional[OptimizationConstraints] = None
|
||||
method: str = Field("mean_variance", pattern="^(mean_variance|min_variance|max_sharpe|risk_parity|black_litterman)$")
|
||||
|
||||
class PortfolioWeights(BaseModel):
|
||||
symbols: List[str]
|
||||
weights: List[float]
|
||||
expected_return: float
|
||||
expected_risk: float
|
||||
sharpe_ratio: float
|
||||
|
||||
@router.post("/portfolio", response_model=PortfolioWeights)
|
||||
async def optimize_portfolio(
|
||||
request: PortfolioOptimizationRequest,
|
||||
optimizer: PortfolioOptimizer = Depends(get_portfolio_optimizer)
|
||||
):
|
||||
"""
|
||||
Optimize portfolio weights using various methods
|
||||
"""
|
||||
try:
|
||||
# Convert returns to numpy array
|
||||
returns_array = np.array(request.returns)
|
||||
|
||||
# Validate dimensions
|
||||
if len(request.symbols) != returns_array.shape[1]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Number of symbols must match number of return columns"
|
||||
)
|
||||
|
||||
# Run optimization
|
||||
result = optimizer.optimize(
|
||||
returns=returns_array,
|
||||
method=request.method,
|
||||
constraints=request.constraints.dict() if request.constraints else None
|
||||
)
|
||||
|
||||
return PortfolioWeights(
|
||||
symbols=request.symbols,
|
||||
weights=result['weights'].tolist(),
|
||||
expected_return=float(result['expected_return']),
|
||||
expected_risk=float(result['expected_risk']),
|
||||
sharpe_ratio=float(result['sharpe_ratio'])
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Optimization failed: {str(e)}")
|
||||
|
||||
@router.post("/efficient_frontier")
|
||||
async def calculate_efficient_frontier(
|
||||
request: PortfolioOptimizationRequest,
|
||||
num_portfolios: int = 100,
|
||||
optimizer: PortfolioOptimizer = Depends(get_portfolio_optimizer)
|
||||
):
|
||||
"""
|
||||
Calculate the efficient frontier for a set of assets
|
||||
"""
|
||||
try:
|
||||
returns_array = np.array(request.returns)
|
||||
|
||||
frontier = optimizer.calculate_efficient_frontier(
|
||||
returns=returns_array,
|
||||
num_portfolios=num_portfolios
|
||||
)
|
||||
|
||||
return {
|
||||
"symbols": request.symbols,
|
||||
"frontier": frontier
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to calculate efficient frontier: {str(e)}")
|
||||
|
||||
@router.post("/rebalance")
|
||||
async def suggest_rebalance(
|
||||
current_weights: Dict[str, float],
|
||||
target_weights: Dict[str, float],
|
||||
constraints: Optional[Dict[str, float]] = None
|
||||
):
|
||||
"""
|
||||
Suggest trades to rebalance portfolio from current to target weights
|
||||
"""
|
||||
try:
|
||||
# Calculate differences
|
||||
trades = {}
|
||||
for symbol in target_weights:
|
||||
current = current_weights.get(symbol, 0.0)
|
||||
target = target_weights[symbol]
|
||||
diff = target - current
|
||||
|
||||
if abs(diff) > 0.001: # Ignore tiny differences
|
||||
trades[symbol] = diff
|
||||
|
||||
return {
|
||||
"trades": trades,
|
||||
"total_turnover": sum(abs(t) for t in trades.values())
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Rebalance calculation failed: {str(e)}")
|
||||
Loading…
Add table
Add a link
Reference in a new issue