Compare commits
7 Commits
e6d69ed04d
...
regime-imb
| Author | SHA1 | Date | |
|---|---|---|---|
| 1af0aab5fa | |||
| df37366603 | |||
| 7e4a6874a2 | |||
| c4ecb29d4c | |||
| 0c82c4f366 | |||
| 1e4cb87da3 | |||
| 10bb371054 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -171,3 +171,7 @@ cython_debug/
|
|||||||
./logs/
|
./logs/
|
||||||
*.csv
|
*.csv
|
||||||
research/regime_results.html
|
research/regime_results.html
|
||||||
|
data/backtest_runs.db
|
||||||
|
.gitignore
|
||||||
|
live_trading/regime_model.pkl
|
||||||
|
live_trading/positions.json
|
||||||
|
|||||||
3
api/__init__.py
Normal file
3
api/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
FastAPI backend for Lowkey Backtest UI.
|
||||||
|
"""
|
||||||
47
api/main.py
Normal file
47
api/main.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
"""
|
||||||
|
FastAPI application entry point for Lowkey Backtest UI.
|
||||||
|
|
||||||
|
Run with: uvicorn api.main:app --reload
|
||||||
|
"""
|
||||||
|
from contextlib import asynccontextmanager
|
||||||
|
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
|
|
||||||
|
from api.models.database import init_db
|
||||||
|
from api.routers import backtest, data, strategies
|
||||||
|
|
||||||
|
|
||||||
|
@asynccontextmanager
|
||||||
|
async def lifespan(app: FastAPI):
|
||||||
|
"""Initialize database on startup."""
|
||||||
|
init_db()
|
||||||
|
yield
|
||||||
|
|
||||||
|
|
||||||
|
app = FastAPI(
|
||||||
|
title="Lowkey Backtest API",
|
||||||
|
description="API for running and analyzing trading strategy backtests",
|
||||||
|
version="0.1.0",
|
||||||
|
lifespan=lifespan,
|
||||||
|
)
|
||||||
|
|
||||||
|
# CORS configuration for local development
|
||||||
|
app.add_middleware(
|
||||||
|
CORSMiddleware,
|
||||||
|
allow_origins=["http://localhost:5173", "http://127.0.0.1:5173"],
|
||||||
|
allow_credentials=True,
|
||||||
|
allow_methods=["*"],
|
||||||
|
allow_headers=["*"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Register routers
|
||||||
|
app.include_router(strategies.router, prefix="/api", tags=["strategies"])
|
||||||
|
app.include_router(data.router, prefix="/api", tags=["data"])
|
||||||
|
app.include_router(backtest.router, prefix="/api", tags=["backtest"])
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/api/health")
|
||||||
|
async def health_check():
|
||||||
|
"""Health check endpoint."""
|
||||||
|
return {"status": "ok", "service": "lowkey-backtest-api"}
|
||||||
3
api/models/__init__.py
Normal file
3
api/models/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pydantic schemas and database models.
|
||||||
|
"""
|
||||||
99
api/models/database.py
Normal file
99
api/models/database.py
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
"""
|
||||||
|
SQLAlchemy database models and session management for backtest run persistence.
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from sqlalchemy import JSON, Column, DateTime, Float, Integer, String, Text, create_engine
|
||||||
|
from sqlalchemy.orm import DeclarativeBase, Session, sessionmaker
|
||||||
|
|
||||||
|
# Database file location
|
||||||
|
DB_PATH = Path(__file__).parent.parent.parent / "data" / "backtest_runs.db"
|
||||||
|
DATABASE_URL = f"sqlite:///{DB_PATH}"
|
||||||
|
|
||||||
|
engine = create_engine(DATABASE_URL, connect_args={"check_same_thread": False})
|
||||||
|
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||||
|
|
||||||
|
|
||||||
|
class Base(DeclarativeBase):
|
||||||
|
"""Base class for SQLAlchemy models."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class BacktestRun(Base):
|
||||||
|
"""
|
||||||
|
Persisted backtest run record.
|
||||||
|
|
||||||
|
Stores all information needed to display and compare runs.
|
||||||
|
"""
|
||||||
|
__tablename__ = "backtest_runs"
|
||||||
|
|
||||||
|
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||||
|
run_id = Column(String(36), unique=True, nullable=False, index=True)
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
strategy = Column(String(50), nullable=False, index=True)
|
||||||
|
symbol = Column(String(20), nullable=False, index=True)
|
||||||
|
exchange = Column(String(20), nullable=False, default="okx")
|
||||||
|
market_type = Column(String(20), nullable=False)
|
||||||
|
timeframe = Column(String(10), nullable=False)
|
||||||
|
leverage = Column(Integer, nullable=False, default=1)
|
||||||
|
params = Column(JSON, nullable=False, default=dict)
|
||||||
|
|
||||||
|
# Date range
|
||||||
|
start_date = Column(String(20), nullable=True)
|
||||||
|
end_date = Column(String(20), nullable=True)
|
||||||
|
|
||||||
|
# Metrics (denormalized for quick listing)
|
||||||
|
total_return = Column(Float, nullable=False)
|
||||||
|
benchmark_return = Column(Float, nullable=False, default=0.0)
|
||||||
|
alpha = Column(Float, nullable=False, default=0.0)
|
||||||
|
sharpe_ratio = Column(Float, nullable=False)
|
||||||
|
max_drawdown = Column(Float, nullable=False)
|
||||||
|
win_rate = Column(Float, nullable=False)
|
||||||
|
total_trades = Column(Integer, nullable=False)
|
||||||
|
profit_factor = Column(Float, nullable=True)
|
||||||
|
total_fees = Column(Float, nullable=False, default=0.0)
|
||||||
|
total_funding = Column(Float, nullable=False, default=0.0)
|
||||||
|
liquidation_count = Column(Integer, nullable=False, default=0)
|
||||||
|
liquidation_loss = Column(Float, nullable=False, default=0.0)
|
||||||
|
adjusted_return = Column(Float, nullable=True)
|
||||||
|
|
||||||
|
# Full data (JSON serialized)
|
||||||
|
equity_curve = Column(Text, nullable=False) # JSON array
|
||||||
|
trades = Column(Text, nullable=False) # JSON array
|
||||||
|
|
||||||
|
# Metadata
|
||||||
|
created_at = Column(DateTime, nullable=False, default=lambda: datetime.now(timezone.utc))
|
||||||
|
|
||||||
|
def set_equity_curve(self, data: list[dict]):
|
||||||
|
"""Serialize equity curve to JSON string."""
|
||||||
|
self.equity_curve = json.dumps(data)
|
||||||
|
|
||||||
|
def get_equity_curve(self) -> list[dict]:
|
||||||
|
"""Deserialize equity curve from JSON string."""
|
||||||
|
return json.loads(self.equity_curve) if self.equity_curve else []
|
||||||
|
|
||||||
|
def set_trades(self, data: list[dict]):
|
||||||
|
"""Serialize trades to JSON string."""
|
||||||
|
self.trades = json.dumps(data)
|
||||||
|
|
||||||
|
def get_trades(self) -> list[dict]:
|
||||||
|
"""Deserialize trades from JSON string."""
|
||||||
|
return json.loads(self.trades) if self.trades else []
|
||||||
|
|
||||||
|
|
||||||
|
def init_db():
|
||||||
|
"""Create database tables if they don't exist."""
|
||||||
|
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
Base.metadata.create_all(bind=engine)
|
||||||
|
|
||||||
|
|
||||||
|
def get_db() -> Session:
|
||||||
|
"""Get database session (dependency injection)."""
|
||||||
|
db = SessionLocal()
|
||||||
|
try:
|
||||||
|
yield db
|
||||||
|
finally:
|
||||||
|
db.close()
|
||||||
162
api/models/schemas.py
Normal file
162
api/models/schemas.py
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
"""
|
||||||
|
Pydantic schemas for API request/response models.
|
||||||
|
"""
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
# --- Strategy Schemas ---
|
||||||
|
|
||||||
|
class StrategyParam(BaseModel):
|
||||||
|
"""Single strategy parameter definition."""
|
||||||
|
name: str
|
||||||
|
value: Any
|
||||||
|
param_type: str = Field(description="Type: int, float, bool, list")
|
||||||
|
min_value: float | None = None
|
||||||
|
max_value: float | None = None
|
||||||
|
description: str | None = None
|
||||||
|
|
||||||
|
|
||||||
|
class StrategyInfo(BaseModel):
|
||||||
|
"""Strategy information with parameters."""
|
||||||
|
name: str
|
||||||
|
display_name: str
|
||||||
|
market_type: str
|
||||||
|
default_leverage: int
|
||||||
|
default_params: dict[str, Any]
|
||||||
|
grid_params: dict[str, Any]
|
||||||
|
|
||||||
|
|
||||||
|
class StrategiesResponse(BaseModel):
|
||||||
|
"""Response for GET /api/strategies."""
|
||||||
|
strategies: list[StrategyInfo]
|
||||||
|
|
||||||
|
|
||||||
|
# --- Symbol/Data Schemas ---
|
||||||
|
|
||||||
|
class SymbolInfo(BaseModel):
|
||||||
|
"""Available symbol information."""
|
||||||
|
symbol: str
|
||||||
|
exchange: str
|
||||||
|
market_type: str
|
||||||
|
timeframes: list[str]
|
||||||
|
start_date: str | None = None
|
||||||
|
end_date: str | None = None
|
||||||
|
row_count: int = 0
|
||||||
|
|
||||||
|
|
||||||
|
class DataStatusResponse(BaseModel):
|
||||||
|
"""Response for GET /api/data/status."""
|
||||||
|
symbols: list[SymbolInfo]
|
||||||
|
|
||||||
|
|
||||||
|
# --- Backtest Schemas ---
|
||||||
|
|
||||||
|
class BacktestRequest(BaseModel):
|
||||||
|
"""Request body for POST /api/backtest."""
|
||||||
|
strategy: str
|
||||||
|
symbol: str
|
||||||
|
exchange: str = "okx"
|
||||||
|
timeframe: str = "1h"
|
||||||
|
market_type: str = "perpetual"
|
||||||
|
start_date: str | None = None
|
||||||
|
end_date: str | None = None
|
||||||
|
init_cash: float = 10000.0
|
||||||
|
leverage: int | None = None
|
||||||
|
fees: float | None = None
|
||||||
|
slippage: float = 0.001
|
||||||
|
sl_stop: float | None = None
|
||||||
|
tp_stop: float | None = None
|
||||||
|
sl_trail: bool = False
|
||||||
|
params: dict[str, Any] = Field(default_factory=dict)
|
||||||
|
|
||||||
|
|
||||||
|
class TradeRecord(BaseModel):
|
||||||
|
"""Single trade record."""
|
||||||
|
entry_time: str
|
||||||
|
exit_time: str | None = None
|
||||||
|
entry_price: float
|
||||||
|
exit_price: float | None = None
|
||||||
|
size: float
|
||||||
|
direction: str
|
||||||
|
pnl: float | None = None
|
||||||
|
return_pct: float | None = None
|
||||||
|
status: str = "closed"
|
||||||
|
|
||||||
|
|
||||||
|
class EquityPoint(BaseModel):
|
||||||
|
"""Single point on equity curve."""
|
||||||
|
timestamp: str
|
||||||
|
value: float
|
||||||
|
drawdown: float = 0.0
|
||||||
|
|
||||||
|
|
||||||
|
class BacktestMetrics(BaseModel):
|
||||||
|
"""Backtest performance metrics."""
|
||||||
|
total_return: float
|
||||||
|
benchmark_return: float = 0.0
|
||||||
|
alpha: float = 0.0
|
||||||
|
sharpe_ratio: float
|
||||||
|
max_drawdown: float
|
||||||
|
win_rate: float
|
||||||
|
total_trades: int
|
||||||
|
profit_factor: float | None = None
|
||||||
|
avg_trade_return: float | None = None
|
||||||
|
total_fees: float = 0.0
|
||||||
|
total_funding: float = 0.0
|
||||||
|
liquidation_count: int = 0
|
||||||
|
liquidation_loss: float = 0.0
|
||||||
|
adjusted_return: float | None = None
|
||||||
|
|
||||||
|
|
||||||
|
class BacktestResult(BaseModel):
|
||||||
|
"""Complete backtest result."""
|
||||||
|
run_id: str
|
||||||
|
strategy: str
|
||||||
|
symbol: str
|
||||||
|
market_type: str
|
||||||
|
timeframe: str
|
||||||
|
start_date: str
|
||||||
|
end_date: str
|
||||||
|
leverage: int
|
||||||
|
params: dict[str, Any]
|
||||||
|
metrics: BacktestMetrics
|
||||||
|
equity_curve: list[EquityPoint]
|
||||||
|
trades: list[TradeRecord]
|
||||||
|
created_at: str
|
||||||
|
|
||||||
|
|
||||||
|
class BacktestSummary(BaseModel):
|
||||||
|
"""Summary for backtest list view."""
|
||||||
|
run_id: str
|
||||||
|
strategy: str
|
||||||
|
symbol: str
|
||||||
|
market_type: str
|
||||||
|
timeframe: str
|
||||||
|
total_return: float
|
||||||
|
sharpe_ratio: float
|
||||||
|
max_drawdown: float
|
||||||
|
total_trades: int
|
||||||
|
created_at: str
|
||||||
|
params: dict[str, Any]
|
||||||
|
|
||||||
|
|
||||||
|
class BacktestListResponse(BaseModel):
|
||||||
|
"""Response for GET /api/backtests."""
|
||||||
|
runs: list[BacktestSummary]
|
||||||
|
total: int
|
||||||
|
|
||||||
|
|
||||||
|
# --- Comparison Schemas ---
|
||||||
|
|
||||||
|
class CompareRequest(BaseModel):
|
||||||
|
"""Request body for POST /api/compare."""
|
||||||
|
run_ids: list[str] = Field(min_length=2, max_length=5)
|
||||||
|
|
||||||
|
|
||||||
|
class CompareResult(BaseModel):
|
||||||
|
"""Comparison of multiple backtest runs."""
|
||||||
|
runs: list[BacktestResult]
|
||||||
|
param_diff: dict[str, list[Any]]
|
||||||
3
api/routers/__init__.py
Normal file
3
api/routers/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
API routers for backtest, strategies, and data endpoints.
|
||||||
|
"""
|
||||||
193
api/routers/backtest.py
Normal file
193
api/routers/backtest.py
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
"""
|
||||||
|
Backtest execution and history endpoints.
|
||||||
|
"""
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
|
||||||
|
from api.models.database import get_db
|
||||||
|
from api.models.schemas import (
|
||||||
|
BacktestListResponse,
|
||||||
|
BacktestRequest,
|
||||||
|
BacktestResult,
|
||||||
|
CompareRequest,
|
||||||
|
CompareResult,
|
||||||
|
)
|
||||||
|
from api.services.runner import get_runner
|
||||||
|
from api.services.storage import get_storage
|
||||||
|
from engine.logging_config import get_logger
|
||||||
|
|
||||||
|
router = APIRouter()
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/backtest", response_model=BacktestResult)
|
||||||
|
async def run_backtest(
|
||||||
|
request: BacktestRequest,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Execute a backtest with the specified configuration.
|
||||||
|
|
||||||
|
Runs the strategy on historical data and returns metrics,
|
||||||
|
equity curve, and trade records. Results are automatically saved.
|
||||||
|
"""
|
||||||
|
runner = get_runner()
|
||||||
|
storage = get_storage()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Execute backtest
|
||||||
|
result = runner.run(request)
|
||||||
|
|
||||||
|
# Save to database
|
||||||
|
storage.save_run(db, result)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Backtest completed and saved: %s (return=%.2f%%, sharpe=%.2f)",
|
||||||
|
result.run_id,
|
||||||
|
result.metrics.total_return,
|
||||||
|
result.metrics.sharpe_ratio,
|
||||||
|
)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
except KeyError as e:
|
||||||
|
raise HTTPException(status_code=400, detail=f"Invalid strategy: {e}")
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Data not found: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Backtest failed: %s", e, exc_info=True)
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/backtests", response_model=BacktestListResponse)
|
||||||
|
async def list_backtests(
|
||||||
|
limit: int = Query(50, ge=1, le=200),
|
||||||
|
offset: int = Query(0, ge=0),
|
||||||
|
strategy: str | None = None,
|
||||||
|
symbol: str | None = None,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
List saved backtest runs with optional filtering.
|
||||||
|
|
||||||
|
Returns summaries for quick display in the history sidebar.
|
||||||
|
"""
|
||||||
|
storage = get_storage()
|
||||||
|
|
||||||
|
runs, total = storage.list_runs(
|
||||||
|
db,
|
||||||
|
limit=limit,
|
||||||
|
offset=offset,
|
||||||
|
strategy=strategy,
|
||||||
|
symbol=symbol,
|
||||||
|
)
|
||||||
|
|
||||||
|
return BacktestListResponse(runs=runs, total=total)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/backtest/{run_id}", response_model=BacktestResult)
|
||||||
|
async def get_backtest(
|
||||||
|
run_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Retrieve a specific backtest run by ID.
|
||||||
|
|
||||||
|
Returns full result including equity curve and trades.
|
||||||
|
"""
|
||||||
|
storage = get_storage()
|
||||||
|
|
||||||
|
result = storage.get_run(db, run_id)
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Run not found: {run_id}")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete("/backtest/{run_id}")
|
||||||
|
async def delete_backtest(
|
||||||
|
run_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Delete a backtest run.
|
||||||
|
"""
|
||||||
|
storage = get_storage()
|
||||||
|
|
||||||
|
deleted = storage.delete_run(db, run_id)
|
||||||
|
|
||||||
|
if not deleted:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Run not found: {run_id}")
|
||||||
|
|
||||||
|
return {"status": "deleted", "run_id": run_id}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/compare", response_model=CompareResult)
|
||||||
|
async def compare_runs(
|
||||||
|
request: CompareRequest,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Compare multiple backtest runs (2-5 runs).
|
||||||
|
|
||||||
|
Returns full results for each run plus parameter differences.
|
||||||
|
"""
|
||||||
|
storage = get_storage()
|
||||||
|
|
||||||
|
runs = storage.get_runs_by_ids(db, request.run_ids)
|
||||||
|
|
||||||
|
if len(runs) != len(request.run_ids):
|
||||||
|
found_ids = {r.run_id for r in runs}
|
||||||
|
missing = [rid for rid in request.run_ids if rid not in found_ids]
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=404,
|
||||||
|
detail=f"Runs not found: {missing}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate parameter differences
|
||||||
|
param_diff = _calculate_param_diff(runs)
|
||||||
|
|
||||||
|
return CompareResult(runs=runs, param_diff=param_diff)
|
||||||
|
|
||||||
|
|
||||||
|
def _calculate_param_diff(runs: list[BacktestResult]) -> dict[str, list[Any]]:
|
||||||
|
"""
|
||||||
|
Find parameters that differ between runs.
|
||||||
|
|
||||||
|
Returns dict mapping param name to list of values (one per run).
|
||||||
|
"""
|
||||||
|
if not runs:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Collect all param keys
|
||||||
|
all_keys: set[str] = set()
|
||||||
|
for run in runs:
|
||||||
|
all_keys.update(run.params.keys())
|
||||||
|
|
||||||
|
# Also include strategy and key config
|
||||||
|
all_keys.update(['strategy', 'symbol', 'leverage', 'timeframe'])
|
||||||
|
|
||||||
|
diff: dict[str, list[Any]] = {}
|
||||||
|
|
||||||
|
for key in sorted(all_keys):
|
||||||
|
values = []
|
||||||
|
for run in runs:
|
||||||
|
if key == 'strategy':
|
||||||
|
values.append(run.strategy)
|
||||||
|
elif key == 'symbol':
|
||||||
|
values.append(run.symbol)
|
||||||
|
elif key == 'leverage':
|
||||||
|
values.append(run.leverage)
|
||||||
|
elif key == 'timeframe':
|
||||||
|
values.append(run.timeframe)
|
||||||
|
else:
|
||||||
|
values.append(run.params.get(key))
|
||||||
|
|
||||||
|
# Only include if values differ
|
||||||
|
if len(set(str(v) for v in values)) > 1:
|
||||||
|
diff[key] = values
|
||||||
|
|
||||||
|
return diff
|
||||||
97
api/routers/data.py
Normal file
97
api/routers/data.py
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
"""
|
||||||
|
Data status and symbol information endpoints.
|
||||||
|
"""
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
from fastapi import APIRouter
|
||||||
|
|
||||||
|
from api.models.schemas import DataStatusResponse, SymbolInfo
|
||||||
|
|
||||||
|
router = APIRouter()
|
||||||
|
|
||||||
|
# Base path for CCXT data
|
||||||
|
DATA_BASE = Path(__file__).parent.parent.parent / "data" / "ccxt"
|
||||||
|
|
||||||
|
|
||||||
|
def _scan_available_data() -> list[SymbolInfo]:
|
||||||
|
"""
|
||||||
|
Scan the data directory for available symbols and timeframes.
|
||||||
|
|
||||||
|
Returns list of SymbolInfo with date ranges and row counts.
|
||||||
|
"""
|
||||||
|
symbols = []
|
||||||
|
|
||||||
|
if not DATA_BASE.exists():
|
||||||
|
return symbols
|
||||||
|
|
||||||
|
# Structure: data/ccxt/{exchange}/{market_type}/{symbol}/{timeframe}.csv
|
||||||
|
for exchange_dir in DATA_BASE.iterdir():
|
||||||
|
if not exchange_dir.is_dir():
|
||||||
|
continue
|
||||||
|
exchange = exchange_dir.name
|
||||||
|
|
||||||
|
for market_dir in exchange_dir.iterdir():
|
||||||
|
if not market_dir.is_dir():
|
||||||
|
continue
|
||||||
|
market_type = market_dir.name
|
||||||
|
|
||||||
|
for symbol_dir in market_dir.iterdir():
|
||||||
|
if not symbol_dir.is_dir():
|
||||||
|
continue
|
||||||
|
symbol = symbol_dir.name
|
||||||
|
|
||||||
|
# Find all timeframes
|
||||||
|
timeframes = []
|
||||||
|
start_date = None
|
||||||
|
end_date = None
|
||||||
|
row_count = 0
|
||||||
|
|
||||||
|
for csv_file in symbol_dir.glob("*.csv"):
|
||||||
|
tf = csv_file.stem
|
||||||
|
timeframes.append(tf)
|
||||||
|
|
||||||
|
# Read first and last rows for date range
|
||||||
|
try:
|
||||||
|
df = pd.read_csv(csv_file, parse_dates=['timestamp'])
|
||||||
|
if not df.empty:
|
||||||
|
row_count = len(df)
|
||||||
|
start_date = df['timestamp'].min().strftime("%Y-%m-%d")
|
||||||
|
end_date = df['timestamp'].max().strftime("%Y-%m-%d")
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if timeframes:
|
||||||
|
symbols.append(SymbolInfo(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=exchange,
|
||||||
|
market_type=market_type,
|
||||||
|
timeframes=sorted(timeframes),
|
||||||
|
start_date=start_date,
|
||||||
|
end_date=end_date,
|
||||||
|
row_count=row_count,
|
||||||
|
))
|
||||||
|
|
||||||
|
return symbols
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/symbols", response_model=DataStatusResponse)
|
||||||
|
async def get_symbols():
|
||||||
|
"""
|
||||||
|
Get list of available symbols with their data ranges.
|
||||||
|
|
||||||
|
Scans the local data directory for downloaded OHLCV data.
|
||||||
|
"""
|
||||||
|
symbols = _scan_available_data()
|
||||||
|
return DataStatusResponse(symbols=symbols)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/data/status", response_model=DataStatusResponse)
|
||||||
|
async def get_data_status():
|
||||||
|
"""
|
||||||
|
Get detailed data inventory status.
|
||||||
|
|
||||||
|
Alias for /symbols with additional metadata.
|
||||||
|
"""
|
||||||
|
symbols = _scan_available_data()
|
||||||
|
return DataStatusResponse(symbols=symbols)
|
||||||
67
api/routers/strategies.py
Normal file
67
api/routers/strategies.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
"""
|
||||||
|
Strategy information endpoints.
|
||||||
|
"""
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from fastapi import APIRouter
|
||||||
|
|
||||||
|
from api.models.schemas import StrategiesResponse, StrategyInfo
|
||||||
|
from strategies.factory import get_registry
|
||||||
|
|
||||||
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
||||||
|
def _serialize_param_value(value: Any) -> Any:
|
||||||
|
"""Convert numpy arrays and other types to JSON-serializable format."""
|
||||||
|
if isinstance(value, np.ndarray):
|
||||||
|
return value.tolist()
|
||||||
|
if isinstance(value, (np.integer, np.floating)):
|
||||||
|
return value.item()
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
def _get_display_name(name: str) -> str:
|
||||||
|
"""Convert strategy key to display name."""
|
||||||
|
display_names = {
|
||||||
|
"rsi": "RSI Strategy",
|
||||||
|
"macross": "MA Crossover",
|
||||||
|
"meta_st": "Meta Supertrend",
|
||||||
|
"regime": "Regime Reversion (ML)",
|
||||||
|
}
|
||||||
|
return display_names.get(name, name.replace("_", " ").title())
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/strategies", response_model=StrategiesResponse)
|
||||||
|
async def get_strategies():
|
||||||
|
"""
|
||||||
|
Get list of available strategies with their parameters.
|
||||||
|
|
||||||
|
Returns strategy names, default parameters, and grid search ranges.
|
||||||
|
"""
|
||||||
|
registry = get_registry()
|
||||||
|
strategies = []
|
||||||
|
|
||||||
|
for name, config in registry.items():
|
||||||
|
strategy_instance = config.strategy_class()
|
||||||
|
|
||||||
|
# Serialize parameters (convert numpy arrays to lists)
|
||||||
|
default_params = {
|
||||||
|
k: _serialize_param_value(v)
|
||||||
|
for k, v in config.default_params.items()
|
||||||
|
}
|
||||||
|
grid_params = {
|
||||||
|
k: _serialize_param_value(v)
|
||||||
|
for k, v in config.grid_params.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
strategies.append(StrategyInfo(
|
||||||
|
name=name,
|
||||||
|
display_name=_get_display_name(name),
|
||||||
|
market_type=strategy_instance.default_market_type.value,
|
||||||
|
default_leverage=strategy_instance.default_leverage,
|
||||||
|
default_params=default_params,
|
||||||
|
grid_params=grid_params,
|
||||||
|
))
|
||||||
|
|
||||||
|
return StrategiesResponse(strategies=strategies)
|
||||||
3
api/services/__init__.py
Normal file
3
api/services/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Business logic services for backtest execution and storage.
|
||||||
|
"""
|
||||||
300
api/services/runner.py
Normal file
300
api/services/runner.py
Normal file
@@ -0,0 +1,300 @@
|
|||||||
|
"""
|
||||||
|
Backtest runner service that wraps the existing Backtester engine.
|
||||||
|
"""
|
||||||
|
import uuid
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
from api.models.schemas import (
|
||||||
|
BacktestMetrics,
|
||||||
|
BacktestRequest,
|
||||||
|
BacktestResult,
|
||||||
|
EquityPoint,
|
||||||
|
TradeRecord,
|
||||||
|
)
|
||||||
|
from engine.backtester import Backtester
|
||||||
|
from engine.data_manager import DataManager
|
||||||
|
from engine.logging_config import get_logger
|
||||||
|
from engine.market import MarketType
|
||||||
|
from strategies.factory import get_strategy
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BacktestRunner:
|
||||||
|
"""
|
||||||
|
Service for executing backtests via the API.
|
||||||
|
|
||||||
|
Wraps the existing Backtester engine and converts results
|
||||||
|
to API response format.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.dm = DataManager()
|
||||||
|
self.bt = Backtester(self.dm)
|
||||||
|
|
||||||
|
def run(self, request: BacktestRequest) -> BacktestResult:
|
||||||
|
"""
|
||||||
|
Execute a backtest and return structured results.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
request: BacktestRequest with strategy, symbol, and parameters
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
BacktestResult with metrics, equity curve, and trades
|
||||||
|
"""
|
||||||
|
# Get strategy instance
|
||||||
|
strategy, default_params = get_strategy(request.strategy, is_grid=False)
|
||||||
|
|
||||||
|
# Merge default params with request params
|
||||||
|
params = {**default_params, **request.params}
|
||||||
|
|
||||||
|
# Convert market type string to enum
|
||||||
|
market_type = MarketType(request.market_type)
|
||||||
|
|
||||||
|
# Override strategy market type if specified
|
||||||
|
strategy.default_market_type = market_type
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Running backtest: %s on %s (%s), params=%s",
|
||||||
|
request.strategy, request.symbol, request.timeframe, params
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute backtest
|
||||||
|
result = self.bt.run_strategy(
|
||||||
|
strategy=strategy,
|
||||||
|
exchange_id=request.exchange,
|
||||||
|
symbol=request.symbol,
|
||||||
|
timeframe=request.timeframe,
|
||||||
|
start_date=request.start_date,
|
||||||
|
end_date=request.end_date,
|
||||||
|
init_cash=request.init_cash,
|
||||||
|
fees=request.fees,
|
||||||
|
slippage=request.slippage,
|
||||||
|
sl_stop=request.sl_stop,
|
||||||
|
tp_stop=request.tp_stop,
|
||||||
|
sl_trail=request.sl_trail,
|
||||||
|
leverage=request.leverage,
|
||||||
|
**params
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract data from portfolio
|
||||||
|
portfolio = result.portfolio
|
||||||
|
|
||||||
|
# Build trade records
|
||||||
|
trades = self._build_trade_records(portfolio)
|
||||||
|
|
||||||
|
# Build equity curve (trimmed to trading period)
|
||||||
|
equity_curve = self._build_equity_curve(portfolio)
|
||||||
|
|
||||||
|
# Build metrics
|
||||||
|
metrics = self._build_metrics(result, portfolio)
|
||||||
|
|
||||||
|
# Get date range from actual trading period (first trade to end)
|
||||||
|
idx = portfolio.wrapper.index
|
||||||
|
end_date = idx[-1].strftime("%Y-%m-%d %H:%M")
|
||||||
|
|
||||||
|
# Use first trade time as start if trades exist
|
||||||
|
trades_df = portfolio.trades.records_readable
|
||||||
|
if not trades_df.empty:
|
||||||
|
first_entry_col = 'Entry Timestamp' if 'Entry Timestamp' in trades_df.columns else 'Entry Time'
|
||||||
|
if first_entry_col in trades_df.columns:
|
||||||
|
first_trade_time = pd.to_datetime(trades_df[first_entry_col].iloc[0])
|
||||||
|
start_date = first_trade_time.strftime("%Y-%m-%d %H:%M")
|
||||||
|
else:
|
||||||
|
start_date = idx[0].strftime("%Y-%m-%d %H:%M")
|
||||||
|
else:
|
||||||
|
start_date = idx[0].strftime("%Y-%m-%d %H:%M")
|
||||||
|
|
||||||
|
return BacktestResult(
|
||||||
|
run_id=str(uuid.uuid4()),
|
||||||
|
strategy=request.strategy,
|
||||||
|
symbol=request.symbol,
|
||||||
|
market_type=result.market_type.value,
|
||||||
|
timeframe=request.timeframe,
|
||||||
|
start_date=start_date,
|
||||||
|
end_date=end_date,
|
||||||
|
leverage=result.leverage,
|
||||||
|
params=params,
|
||||||
|
metrics=metrics,
|
||||||
|
equity_curve=equity_curve,
|
||||||
|
trades=trades,
|
||||||
|
created_at=datetime.now(timezone.utc).isoformat(),
|
||||||
|
)
|
||||||
|
|
||||||
|
def _build_equity_curve(self, portfolio) -> list[EquityPoint]:
|
||||||
|
"""Extract equity curve with drawdown from portfolio, starting from first trade."""
|
||||||
|
value_series = portfolio.value()
|
||||||
|
drawdown_series = portfolio.drawdown()
|
||||||
|
|
||||||
|
# Handle multi-column case (from grid search)
|
||||||
|
if hasattr(value_series, 'columns') and len(value_series.columns) > 1:
|
||||||
|
value_series = value_series.iloc[:, 0]
|
||||||
|
drawdown_series = drawdown_series.iloc[:, 0]
|
||||||
|
elif hasattr(value_series, 'columns'):
|
||||||
|
value_series = value_series.iloc[:, 0]
|
||||||
|
drawdown_series = drawdown_series.iloc[:, 0]
|
||||||
|
|
||||||
|
# Find first trade time to trim equity curve
|
||||||
|
first_trade_idx = 0
|
||||||
|
trades_df = portfolio.trades.records_readable
|
||||||
|
if not trades_df.empty:
|
||||||
|
first_entry_col = 'Entry Timestamp' if 'Entry Timestamp' in trades_df.columns else 'Entry Time'
|
||||||
|
if first_entry_col in trades_df.columns:
|
||||||
|
first_trade_time = pd.to_datetime(trades_df[first_entry_col].iloc[0])
|
||||||
|
# Find index in value_series closest to first trade
|
||||||
|
if hasattr(value_series.index, 'get_indexer'):
|
||||||
|
first_trade_idx = value_series.index.get_indexer([first_trade_time], method='nearest')[0]
|
||||||
|
# Start a few bars before first trade for context
|
||||||
|
first_trade_idx = max(0, first_trade_idx - 5)
|
||||||
|
|
||||||
|
# Slice from first trade onwards
|
||||||
|
value_series = value_series.iloc[first_trade_idx:]
|
||||||
|
drawdown_series = drawdown_series.iloc[first_trade_idx:]
|
||||||
|
|
||||||
|
points = []
|
||||||
|
for i, (ts, val) in enumerate(value_series.items()):
|
||||||
|
dd = drawdown_series.iloc[i] if i < len(drawdown_series) else 0.0
|
||||||
|
points.append(EquityPoint(
|
||||||
|
timestamp=ts.isoformat(),
|
||||||
|
value=float(val),
|
||||||
|
drawdown=float(dd) * 100, # Convert to percentage
|
||||||
|
))
|
||||||
|
|
||||||
|
return points
|
||||||
|
|
||||||
|
def _build_trade_records(self, portfolio) -> list[TradeRecord]:
|
||||||
|
"""Extract trade records from portfolio."""
|
||||||
|
trades_df = portfolio.trades.records_readable
|
||||||
|
|
||||||
|
if trades_df.empty:
|
||||||
|
return []
|
||||||
|
|
||||||
|
records = []
|
||||||
|
for _, row in trades_df.iterrows():
|
||||||
|
# Handle different column names in vectorbt
|
||||||
|
entry_time = row.get('Entry Timestamp', row.get('Entry Time', ''))
|
||||||
|
exit_time = row.get('Exit Timestamp', row.get('Exit Time', ''))
|
||||||
|
|
||||||
|
records.append(TradeRecord(
|
||||||
|
entry_time=str(entry_time) if pd.notna(entry_time) else "",
|
||||||
|
exit_time=str(exit_time) if pd.notna(exit_time) else None,
|
||||||
|
entry_price=float(row.get('Avg Entry Price', row.get('Entry Price', 0))),
|
||||||
|
exit_price=float(row.get('Avg Exit Price', row.get('Exit Price', 0)))
|
||||||
|
if pd.notna(row.get('Avg Exit Price', row.get('Exit Price'))) else None,
|
||||||
|
size=float(row.get('Size', 0)),
|
||||||
|
direction=str(row.get('Direction', 'Long')),
|
||||||
|
pnl=float(row.get('PnL', 0)) if pd.notna(row.get('PnL')) else None,
|
||||||
|
return_pct=float(row.get('Return', 0)) * 100
|
||||||
|
if pd.notna(row.get('Return')) else None,
|
||||||
|
status="closed" if pd.notna(exit_time) else "open",
|
||||||
|
))
|
||||||
|
|
||||||
|
return records
|
||||||
|
|
||||||
|
def _build_metrics(self, result, portfolio) -> BacktestMetrics:
|
||||||
|
"""Build metrics from backtest result."""
|
||||||
|
stats = portfolio.stats()
|
||||||
|
|
||||||
|
# Extract values, handling potential multi-column results
|
||||||
|
def get_stat(key: str, default: float = 0.0) -> float:
|
||||||
|
val = stats.get(key, default)
|
||||||
|
if hasattr(val, 'mean'):
|
||||||
|
return float(val.mean())
|
||||||
|
return float(val) if pd.notna(val) else default
|
||||||
|
|
||||||
|
total_return = portfolio.total_return()
|
||||||
|
if hasattr(total_return, 'mean'):
|
||||||
|
total_return = total_return.mean()
|
||||||
|
|
||||||
|
# Calculate benchmark return from first trade to end (not full period)
|
||||||
|
# This gives accurate comparison when strategy has training period
|
||||||
|
close = portfolio.close
|
||||||
|
benchmark_return = 0.0
|
||||||
|
|
||||||
|
if hasattr(close, 'iloc'):
|
||||||
|
# Find first trade entry time
|
||||||
|
trades_df = portfolio.trades.records_readable
|
||||||
|
if not trades_df.empty:
|
||||||
|
# Get the first trade entry timestamp
|
||||||
|
first_entry_col = 'Entry Timestamp' if 'Entry Timestamp' in trades_df.columns else 'Entry Time'
|
||||||
|
if first_entry_col in trades_df.columns:
|
||||||
|
first_trade_time = pd.to_datetime(trades_df[first_entry_col].iloc[0])
|
||||||
|
|
||||||
|
# Find the price at first trade
|
||||||
|
if hasattr(close.index, 'get_indexer'):
|
||||||
|
# Find closest index to first trade time
|
||||||
|
idx = close.index.get_indexer([first_trade_time], method='nearest')[0]
|
||||||
|
start_price = close.iloc[idx]
|
||||||
|
else:
|
||||||
|
start_price = close.iloc[0]
|
||||||
|
|
||||||
|
end_price = close.iloc[-1]
|
||||||
|
|
||||||
|
if hasattr(start_price, 'mean'):
|
||||||
|
start_price = start_price.mean()
|
||||||
|
if hasattr(end_price, 'mean'):
|
||||||
|
end_price = end_price.mean()
|
||||||
|
|
||||||
|
benchmark_return = ((end_price - start_price) / start_price)
|
||||||
|
else:
|
||||||
|
# No trades - use full period
|
||||||
|
start_price = close.iloc[0]
|
||||||
|
end_price = close.iloc[-1]
|
||||||
|
if hasattr(start_price, 'mean'):
|
||||||
|
start_price = start_price.mean()
|
||||||
|
if hasattr(end_price, 'mean'):
|
||||||
|
end_price = end_price.mean()
|
||||||
|
benchmark_return = ((end_price - start_price) / start_price)
|
||||||
|
|
||||||
|
# Alpha = strategy return - benchmark return
|
||||||
|
alpha = float(total_return) - float(benchmark_return)
|
||||||
|
|
||||||
|
sharpe = portfolio.sharpe_ratio()
|
||||||
|
if hasattr(sharpe, 'mean'):
|
||||||
|
sharpe = sharpe.mean()
|
||||||
|
|
||||||
|
max_dd = portfolio.max_drawdown()
|
||||||
|
if hasattr(max_dd, 'mean'):
|
||||||
|
max_dd = max_dd.mean()
|
||||||
|
|
||||||
|
win_rate = portfolio.trades.win_rate()
|
||||||
|
if hasattr(win_rate, 'mean'):
|
||||||
|
win_rate = win_rate.mean()
|
||||||
|
|
||||||
|
trade_count = portfolio.trades.count()
|
||||||
|
if hasattr(trade_count, 'mean'):
|
||||||
|
trade_count = int(trade_count.mean())
|
||||||
|
else:
|
||||||
|
trade_count = int(trade_count)
|
||||||
|
|
||||||
|
return BacktestMetrics(
|
||||||
|
total_return=float(total_return) * 100,
|
||||||
|
benchmark_return=float(benchmark_return) * 100,
|
||||||
|
alpha=float(alpha) * 100,
|
||||||
|
sharpe_ratio=float(sharpe) if pd.notna(sharpe) else 0.0,
|
||||||
|
max_drawdown=float(max_dd) * 100,
|
||||||
|
win_rate=float(win_rate) * 100 if pd.notna(win_rate) else 0.0,
|
||||||
|
total_trades=trade_count,
|
||||||
|
profit_factor=get_stat('Profit Factor'),
|
||||||
|
avg_trade_return=get_stat('Avg Winning Trade [%]'),
|
||||||
|
total_fees=get_stat('Total Fees Paid'),
|
||||||
|
total_funding=result.total_funding_paid,
|
||||||
|
liquidation_count=result.liquidation_count,
|
||||||
|
liquidation_loss=result.total_liquidation_loss,
|
||||||
|
adjusted_return=result.adjusted_return,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Singleton instance
|
||||||
|
_runner: BacktestRunner | None = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_runner() -> BacktestRunner:
|
||||||
|
"""Get or create the backtest runner instance."""
|
||||||
|
global _runner
|
||||||
|
if _runner is None:
|
||||||
|
_runner = BacktestRunner()
|
||||||
|
return _runner
|
||||||
225
api/services/storage.py
Normal file
225
api/services/storage.py
Normal file
@@ -0,0 +1,225 @@
|
|||||||
|
"""
|
||||||
|
Storage service for persisting and retrieving backtest runs.
|
||||||
|
"""
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
|
||||||
|
from api.models.database import BacktestRun
|
||||||
|
from api.models.schemas import (
|
||||||
|
BacktestResult,
|
||||||
|
BacktestSummary,
|
||||||
|
EquityPoint,
|
||||||
|
BacktestMetrics,
|
||||||
|
TradeRecord,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class StorageService:
|
||||||
|
"""
|
||||||
|
Service for saving and loading backtest runs from SQLite.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def save_run(self, db: Session, result: BacktestResult) -> BacktestRun:
|
||||||
|
"""
|
||||||
|
Persist a backtest result to the database.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
db: Database session
|
||||||
|
result: BacktestResult to save
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created BacktestRun record
|
||||||
|
"""
|
||||||
|
run = BacktestRun(
|
||||||
|
run_id=result.run_id,
|
||||||
|
strategy=result.strategy,
|
||||||
|
symbol=result.symbol,
|
||||||
|
market_type=result.market_type,
|
||||||
|
timeframe=result.timeframe,
|
||||||
|
leverage=result.leverage,
|
||||||
|
params=result.params,
|
||||||
|
start_date=result.start_date,
|
||||||
|
end_date=result.end_date,
|
||||||
|
total_return=result.metrics.total_return,
|
||||||
|
benchmark_return=result.metrics.benchmark_return,
|
||||||
|
alpha=result.metrics.alpha,
|
||||||
|
sharpe_ratio=result.metrics.sharpe_ratio,
|
||||||
|
max_drawdown=result.metrics.max_drawdown,
|
||||||
|
win_rate=result.metrics.win_rate,
|
||||||
|
total_trades=result.metrics.total_trades,
|
||||||
|
profit_factor=result.metrics.profit_factor,
|
||||||
|
total_fees=result.metrics.total_fees,
|
||||||
|
total_funding=result.metrics.total_funding,
|
||||||
|
liquidation_count=result.metrics.liquidation_count,
|
||||||
|
liquidation_loss=result.metrics.liquidation_loss,
|
||||||
|
adjusted_return=result.metrics.adjusted_return,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Serialize complex data
|
||||||
|
run.set_equity_curve([p.model_dump() for p in result.equity_curve])
|
||||||
|
run.set_trades([t.model_dump() for t in result.trades])
|
||||||
|
|
||||||
|
db.add(run)
|
||||||
|
db.commit()
|
||||||
|
db.refresh(run)
|
||||||
|
|
||||||
|
return run
|
||||||
|
|
||||||
|
def get_run(self, db: Session, run_id: str) -> BacktestResult | None:
|
||||||
|
"""
|
||||||
|
Retrieve a backtest run by ID.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
db: Database session
|
||||||
|
run_id: UUID of the run
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
BacktestResult or None if not found
|
||||||
|
"""
|
||||||
|
run = db.query(BacktestRun).filter(BacktestRun.run_id == run_id).first()
|
||||||
|
|
||||||
|
if not run:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self._to_result(run)
|
||||||
|
|
||||||
|
def list_runs(
|
||||||
|
self,
|
||||||
|
db: Session,
|
||||||
|
limit: int = 50,
|
||||||
|
offset: int = 0,
|
||||||
|
strategy: str | None = None,
|
||||||
|
symbol: str | None = None,
|
||||||
|
) -> tuple[list[BacktestSummary], int]:
|
||||||
|
"""
|
||||||
|
List backtest runs with optional filtering.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
db: Database session
|
||||||
|
limit: Maximum number of runs to return
|
||||||
|
offset: Offset for pagination
|
||||||
|
strategy: Filter by strategy name
|
||||||
|
symbol: Filter by symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (list of summaries, total count)
|
||||||
|
"""
|
||||||
|
query = db.query(BacktestRun)
|
||||||
|
|
||||||
|
if strategy:
|
||||||
|
query = query.filter(BacktestRun.strategy == strategy)
|
||||||
|
if symbol:
|
||||||
|
query = query.filter(BacktestRun.symbol == symbol)
|
||||||
|
|
||||||
|
total = query.count()
|
||||||
|
|
||||||
|
runs = query.order_by(BacktestRun.created_at.desc()).offset(offset).limit(limit).all()
|
||||||
|
|
||||||
|
summaries = [self._to_summary(run) for run in runs]
|
||||||
|
|
||||||
|
return summaries, total
|
||||||
|
|
||||||
|
def get_runs_by_ids(self, db: Session, run_ids: list[str]) -> list[BacktestResult]:
|
||||||
|
"""
|
||||||
|
Retrieve multiple runs by their IDs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
db: Database session
|
||||||
|
run_ids: List of run UUIDs
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of BacktestResults (preserves order)
|
||||||
|
"""
|
||||||
|
runs = db.query(BacktestRun).filter(BacktestRun.run_id.in_(run_ids)).all()
|
||||||
|
|
||||||
|
# Create lookup and preserve order
|
||||||
|
run_map = {run.run_id: run for run in runs}
|
||||||
|
results = []
|
||||||
|
|
||||||
|
for run_id in run_ids:
|
||||||
|
if run_id in run_map:
|
||||||
|
results.append(self._to_result(run_map[run_id]))
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def delete_run(self, db: Session, run_id: str) -> bool:
|
||||||
|
"""
|
||||||
|
Delete a backtest run.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
db: Database session
|
||||||
|
run_id: UUID of the run to delete
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if deleted, False if not found
|
||||||
|
"""
|
||||||
|
run = db.query(BacktestRun).filter(BacktestRun.run_id == run_id).first()
|
||||||
|
|
||||||
|
if not run:
|
||||||
|
return False
|
||||||
|
|
||||||
|
db.delete(run)
|
||||||
|
db.commit()
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _to_result(self, run: BacktestRun) -> BacktestResult:
|
||||||
|
"""Convert database record to BacktestResult schema."""
|
||||||
|
equity_data = run.get_equity_curve()
|
||||||
|
trades_data = run.get_trades()
|
||||||
|
|
||||||
|
return BacktestResult(
|
||||||
|
run_id=run.run_id,
|
||||||
|
strategy=run.strategy,
|
||||||
|
symbol=run.symbol,
|
||||||
|
market_type=run.market_type,
|
||||||
|
timeframe=run.timeframe,
|
||||||
|
start_date=run.start_date or "",
|
||||||
|
end_date=run.end_date or "",
|
||||||
|
leverage=run.leverage,
|
||||||
|
params=run.params or {},
|
||||||
|
metrics=BacktestMetrics(
|
||||||
|
total_return=run.total_return,
|
||||||
|
benchmark_return=run.benchmark_return or 0.0,
|
||||||
|
alpha=run.alpha or 0.0,
|
||||||
|
sharpe_ratio=run.sharpe_ratio,
|
||||||
|
max_drawdown=run.max_drawdown,
|
||||||
|
win_rate=run.win_rate,
|
||||||
|
total_trades=run.total_trades,
|
||||||
|
profit_factor=run.profit_factor,
|
||||||
|
total_fees=run.total_fees,
|
||||||
|
total_funding=run.total_funding,
|
||||||
|
liquidation_count=run.liquidation_count,
|
||||||
|
liquidation_loss=run.liquidation_loss,
|
||||||
|
adjusted_return=run.adjusted_return,
|
||||||
|
),
|
||||||
|
equity_curve=[EquityPoint(**p) for p in equity_data],
|
||||||
|
trades=[TradeRecord(**t) for t in trades_data],
|
||||||
|
created_at=run.created_at.isoformat() if run.created_at else "",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _to_summary(self, run: BacktestRun) -> BacktestSummary:
|
||||||
|
"""Convert database record to BacktestSummary schema."""
|
||||||
|
return BacktestSummary(
|
||||||
|
run_id=run.run_id,
|
||||||
|
strategy=run.strategy,
|
||||||
|
symbol=run.symbol,
|
||||||
|
market_type=run.market_type,
|
||||||
|
timeframe=run.timeframe,
|
||||||
|
total_return=run.total_return,
|
||||||
|
sharpe_ratio=run.sharpe_ratio,
|
||||||
|
max_drawdown=run.max_drawdown,
|
||||||
|
total_trades=run.total_trades,
|
||||||
|
created_at=run.created_at.isoformat() if run.created_at else "",
|
||||||
|
params=run.params or {},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Singleton instance
|
||||||
|
_storage: StorageService | None = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_storage() -> StorageService:
|
||||||
|
"""Get or create the storage service instance."""
|
||||||
|
global _storage
|
||||||
|
if _storage is None:
|
||||||
|
_storage = StorageService()
|
||||||
|
return _storage
|
||||||
98
check_demo_account.py
Normal file
98
check_demo_account.py
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Check OKX demo account positions and recent orders.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
uv run python check_demo_account.py
|
||||||
|
"""
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent))
|
||||||
|
|
||||||
|
from live_trading.config import OKXConfig
|
||||||
|
import ccxt
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Check demo account status."""
|
||||||
|
config = OKXConfig()
|
||||||
|
|
||||||
|
print(f"\n{'='*60}")
|
||||||
|
print(f" OKX Demo Account Check")
|
||||||
|
print(f"{'='*60}")
|
||||||
|
print(f" Demo Mode: {config.demo_mode}")
|
||||||
|
print(f" API Key: {config.api_key[:8]}..." if config.api_key else " API Key: NOT SET")
|
||||||
|
print(f"{'='*60}\n")
|
||||||
|
|
||||||
|
exchange = ccxt.okx({
|
||||||
|
'apiKey': config.api_key,
|
||||||
|
'secret': config.secret,
|
||||||
|
'password': config.password,
|
||||||
|
'sandbox': config.demo_mode,
|
||||||
|
'options': {'defaultType': 'swap'},
|
||||||
|
'enableRateLimit': True,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Check balance
|
||||||
|
print("--- BALANCE ---")
|
||||||
|
balance = exchange.fetch_balance()
|
||||||
|
usdt = balance.get('USDT', {})
|
||||||
|
print(f"USDT Total: {usdt.get('total', 0):.2f}")
|
||||||
|
print(f"USDT Free: {usdt.get('free', 0):.2f}")
|
||||||
|
print(f"USDT Used: {usdt.get('used', 0):.2f}")
|
||||||
|
|
||||||
|
# Check all balances
|
||||||
|
print("\n--- ALL NON-ZERO BALANCES ---")
|
||||||
|
for currency, data in balance.items():
|
||||||
|
if isinstance(data, dict) and data.get('total', 0) > 0:
|
||||||
|
print(f"{currency}: total={data.get('total', 0):.6f}, free={data.get('free', 0):.6f}")
|
||||||
|
|
||||||
|
# Check open positions
|
||||||
|
print("\n--- OPEN POSITIONS ---")
|
||||||
|
positions = exchange.fetch_positions()
|
||||||
|
open_positions = [p for p in positions if abs(float(p.get('contracts', 0))) > 0]
|
||||||
|
|
||||||
|
if open_positions:
|
||||||
|
for pos in open_positions:
|
||||||
|
print(f" {pos['symbol']}: {pos['side']} {pos['contracts']} contracts @ {pos.get('entryPrice', 'N/A')}")
|
||||||
|
print(f" Unrealized PnL: {pos.get('unrealizedPnl', 'N/A')}")
|
||||||
|
else:
|
||||||
|
print(" No open positions")
|
||||||
|
|
||||||
|
# Check recent orders (last 50)
|
||||||
|
print("\n--- RECENT ORDERS (last 24h) ---")
|
||||||
|
try:
|
||||||
|
# Fetch closed orders for AVAX
|
||||||
|
orders = exchange.fetch_orders('AVAX/USDT:USDT', limit=20)
|
||||||
|
if orders:
|
||||||
|
for order in orders[-10:]: # Last 10
|
||||||
|
ts = datetime.fromtimestamp(order['timestamp']/1000, tz=timezone.utc)
|
||||||
|
print(f" [{ts.strftime('%H:%M:%S')}] {order['side'].upper()} {order['amount']} AVAX @ {order.get('average', order.get('price', 'market'))}")
|
||||||
|
print(f" Status: {order['status']}, Filled: {order.get('filled', 0)}, ID: {order['id']}")
|
||||||
|
else:
|
||||||
|
print(" No recent AVAX orders")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Could not fetch orders: {e}")
|
||||||
|
|
||||||
|
# Check order history more broadly
|
||||||
|
print("\n--- ORDER HISTORY (AVAX) ---")
|
||||||
|
try:
|
||||||
|
# Try fetching my trades
|
||||||
|
trades = exchange.fetch_my_trades('AVAX/USDT:USDT', limit=10)
|
||||||
|
if trades:
|
||||||
|
for trade in trades[-5:]:
|
||||||
|
ts = datetime.fromtimestamp(trade['timestamp']/1000, tz=timezone.utc)
|
||||||
|
print(f" [{ts.strftime('%Y-%m-%d %H:%M:%S')}] {trade['side'].upper()} {trade['amount']} @ {trade['price']}")
|
||||||
|
print(f" Fee: {trade.get('fee', {}).get('cost', 'N/A')} {trade.get('fee', {}).get('currency', '')}")
|
||||||
|
else:
|
||||||
|
print(" No recent AVAX trades")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Could not fetch trades: {e}")
|
||||||
|
|
||||||
|
print(f"\n{'='*60}\n")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
28
check_symbols.py
Normal file
28
check_symbols.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
import ccxt
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def main():
|
||||||
|
try:
|
||||||
|
exchange = ccxt.okx()
|
||||||
|
print("Loading markets...")
|
||||||
|
markets = exchange.load_markets()
|
||||||
|
|
||||||
|
# Filter for ETH perpetuals
|
||||||
|
eth_perps = [
|
||||||
|
symbol for symbol, market in markets.items()
|
||||||
|
if 'ETH' in symbol and 'USDT' in symbol and market.get('swap') and market.get('linear')
|
||||||
|
]
|
||||||
|
|
||||||
|
print(f"\nFound {len(eth_perps)} ETH Linear Perps:")
|
||||||
|
for symbol in eth_perps:
|
||||||
|
market = markets[symbol]
|
||||||
|
print(f" CCXT Symbol: {symbol}")
|
||||||
|
print(f" Exchange ID: {market['id']}")
|
||||||
|
print(f" Type: {market['type']}")
|
||||||
|
print("-" * 30)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
BIN
data/multi_pair_model.pkl
Normal file
BIN
data/multi_pair_model.pkl
Normal file
Binary file not shown.
@@ -132,9 +132,22 @@ class Backtester:
|
|||||||
**strategy_params
|
**strategy_params
|
||||||
)
|
)
|
||||||
|
|
||||||
# Normalize signals to 4-tuple format
|
# Normalize signals to 5-tuple format
|
||||||
signals = self._normalize_signals(signals, close_price, market_config)
|
signals = self._normalize_signals(signals, close_price, market_config)
|
||||||
long_entries, long_exits, short_entries, short_exits = signals
|
long_entries, long_exits, short_entries, short_exits, size = signals
|
||||||
|
|
||||||
|
# Default size if None
|
||||||
|
if size is None:
|
||||||
|
size = 1.0
|
||||||
|
|
||||||
|
# Convert leverage multiplier to raw value (USD) for vbt
|
||||||
|
# This works around "SizeType.Percent reversal" error
|
||||||
|
# Effectively "Fixed Fractional" sizing based on Initial Capital
|
||||||
|
# (Does not compound, but safe for backtesting)
|
||||||
|
if isinstance(size, pd.Series):
|
||||||
|
size = size * init_cash
|
||||||
|
else:
|
||||||
|
size = size * init_cash
|
||||||
|
|
||||||
# Process liquidations - inject forced exits at liquidation points
|
# Process liquidations - inject forced exits at liquidation points
|
||||||
liquidation_events: list[LiquidationEvent] = []
|
liquidation_events: list[LiquidationEvent] = []
|
||||||
@@ -164,7 +177,8 @@ class Backtester:
|
|||||||
long_entries, long_exits,
|
long_entries, long_exits,
|
||||||
short_entries, short_exits,
|
short_entries, short_exits,
|
||||||
init_cash, effective_fees, slippage, timeframe,
|
init_cash, effective_fees, slippage, timeframe,
|
||||||
sl_stop, tp_stop, sl_trail, effective_leverage
|
sl_stop, tp_stop, sl_trail, effective_leverage,
|
||||||
|
size=size
|
||||||
)
|
)
|
||||||
|
|
||||||
# Calculate adjusted returns accounting for liquidation losses
|
# Calculate adjusted returns accounting for liquidation losses
|
||||||
@@ -242,18 +256,28 @@ class Backtester:
|
|||||||
market_config
|
market_config
|
||||||
) -> tuple:
|
) -> tuple:
|
||||||
"""
|
"""
|
||||||
Normalize strategy signals to 4-tuple format.
|
Normalize strategy signals to 5-tuple format.
|
||||||
|
|
||||||
Handles backward compatibility with 2-tuple (long-only) returns.
|
Returns:
|
||||||
|
(long_entries, long_exits, short_entries, short_exits, size)
|
||||||
"""
|
"""
|
||||||
|
# Default size is None (will be treated as 1.0 or default later)
|
||||||
|
size = None
|
||||||
|
|
||||||
if len(signals) == 2:
|
if len(signals) == 2:
|
||||||
long_entries, long_exits = signals
|
long_entries, long_exits = signals
|
||||||
short_entries = BaseStrategy.create_empty_signals(long_entries)
|
short_entries = BaseStrategy.create_empty_signals(long_entries)
|
||||||
short_exits = BaseStrategy.create_empty_signals(long_entries)
|
short_exits = BaseStrategy.create_empty_signals(long_entries)
|
||||||
return long_entries, long_exits, short_entries, short_exits
|
return long_entries, long_exits, short_entries, short_exits, size
|
||||||
|
|
||||||
if len(signals) == 4:
|
if len(signals) == 4:
|
||||||
long_entries, long_exits, short_entries, short_exits = signals
|
long_entries, long_exits, short_entries, short_exits = signals
|
||||||
|
elif len(signals) == 5:
|
||||||
|
long_entries, long_exits, short_entries, short_exits, size = signals
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Strategy must return 2, 4, or 5 signal arrays, got {len(signals)}"
|
||||||
|
)
|
||||||
|
|
||||||
# Warn and clear short signals on spot markets
|
# Warn and clear short signals on spot markets
|
||||||
if not market_config.supports_short:
|
if not market_config.supports_short:
|
||||||
@@ -270,11 +294,7 @@ class Backtester:
|
|||||||
short_entries = BaseStrategy.create_empty_signals(long_entries)
|
short_entries = BaseStrategy.create_empty_signals(long_entries)
|
||||||
short_exits = BaseStrategy.create_empty_signals(long_entries)
|
short_exits = BaseStrategy.create_empty_signals(long_entries)
|
||||||
|
|
||||||
return long_entries, long_exits, short_entries, short_exits
|
return long_entries, long_exits, short_entries, short_exits, size
|
||||||
|
|
||||||
raise ValueError(
|
|
||||||
f"Strategy must return 2 or 4 signal arrays, got {len(signals)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def _run_portfolio(
|
def _run_portfolio(
|
||||||
self,
|
self,
|
||||||
@@ -289,7 +309,8 @@ class Backtester:
|
|||||||
sl_stop: float | None,
|
sl_stop: float | None,
|
||||||
tp_stop: float | None,
|
tp_stop: float | None,
|
||||||
sl_trail: bool,
|
sl_trail: bool,
|
||||||
leverage: int
|
leverage: int,
|
||||||
|
size: pd.Series | float = 1.0
|
||||||
) -> vbt.Portfolio:
|
) -> vbt.Portfolio:
|
||||||
"""Select and run appropriate portfolio simulation."""
|
"""Select and run appropriate portfolio simulation."""
|
||||||
has_shorts = (
|
has_shorts = (
|
||||||
@@ -304,14 +325,18 @@ class Backtester:
|
|||||||
long_entries, long_exits,
|
long_entries, long_exits,
|
||||||
short_entries, short_exits,
|
short_entries, short_exits,
|
||||||
init_cash, fees, slippage, freq,
|
init_cash, fees, slippage, freq,
|
||||||
sl_stop, tp_stop, sl_trail, leverage
|
sl_stop, tp_stop, sl_trail, leverage,
|
||||||
|
size=size
|
||||||
)
|
)
|
||||||
|
|
||||||
return run_long_only_portfolio(
|
return run_long_only_portfolio(
|
||||||
close,
|
close,
|
||||||
long_entries, long_exits,
|
long_entries, long_exits,
|
||||||
init_cash, fees, slippage, freq,
|
init_cash, fees, slippage, freq,
|
||||||
sl_stop, tp_stop, sl_trail, leverage
|
sl_stop, tp_stop, sl_trail, leverage,
|
||||||
|
# Long-only doesn't support variable size in current implementation
|
||||||
|
# without modification, but we can add it if needed.
|
||||||
|
# For now, only regime strategy uses it, which is Long/Short.
|
||||||
)
|
)
|
||||||
|
|
||||||
def run_wfa(
|
def run_wfa(
|
||||||
|
|||||||
@@ -133,12 +133,57 @@ class CryptoQuantClient:
|
|||||||
|
|
||||||
return combined_df
|
return combined_df
|
||||||
|
|
||||||
|
def fetch_history_chunked(
|
||||||
|
self,
|
||||||
|
symbols: list[str],
|
||||||
|
metrics: dict,
|
||||||
|
start_date: str,
|
||||||
|
end_date: str,
|
||||||
|
chunk_months: int = 3
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Fetch historical data in chunks to avoid API limits.
|
||||||
|
"""
|
||||||
|
start_dt = datetime.strptime(start_date, "%Y%m%d")
|
||||||
|
end_dt = datetime.strptime(end_date, "%Y%m%d")
|
||||||
|
|
||||||
|
all_data = []
|
||||||
|
|
||||||
|
current = start_dt
|
||||||
|
while current < end_dt:
|
||||||
|
next_chunk = current + timedelta(days=chunk_months * 30)
|
||||||
|
if next_chunk > end_dt:
|
||||||
|
next_chunk = end_dt
|
||||||
|
|
||||||
|
s_str = current.strftime("%Y%m%d")
|
||||||
|
e_str = next_chunk.strftime("%Y%m%d")
|
||||||
|
|
||||||
|
logger.info(f"Processing chunk: {s_str} to {e_str}")
|
||||||
|
chunk_df = self.fetch_multi_metrics(symbols, metrics, s_str, e_str)
|
||||||
|
|
||||||
|
if not chunk_df.empty:
|
||||||
|
all_data.append(chunk_df)
|
||||||
|
|
||||||
|
current = next_chunk + timedelta(days=1)
|
||||||
|
time.sleep(1) # Be nice to API
|
||||||
|
|
||||||
|
if not all_data:
|
||||||
|
return pd.DataFrame()
|
||||||
|
|
||||||
|
# Combine all chunks
|
||||||
|
full_df = pd.concat(all_data)
|
||||||
|
# Remove duplicates if any overlap
|
||||||
|
full_df = full_df[~full_df.index.duplicated(keep='first')]
|
||||||
|
full_df.sort_index(inplace=True)
|
||||||
|
|
||||||
|
return full_df
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
cq = CryptoQuantClient()
|
cq = CryptoQuantClient()
|
||||||
|
|
||||||
# 3 Months Data (Oct 1 2025 - Dec 31 2025)
|
# 12 Months Data (Jan 1 2025 - Jan 14 2026)
|
||||||
start = "20251001"
|
start = "20250101"
|
||||||
end = "20251231"
|
end = "20260114"
|
||||||
|
|
||||||
metrics = {
|
metrics = {
|
||||||
"reserves": "exchange-flows/exchange-reserve",
|
"reserves": "exchange-flows/exchange-reserve",
|
||||||
@@ -147,7 +192,7 @@ if __name__ == "__main__":
|
|||||||
}
|
}
|
||||||
|
|
||||||
print(f"Fetching training data from {start} to {end}...")
|
print(f"Fetching training data from {start} to {end}...")
|
||||||
df = cq.fetch_multi_metrics(["btc", "eth"], metrics, start, end)
|
df = cq.fetch_history_chunked(["btc", "eth"], metrics, start, end)
|
||||||
|
|
||||||
output_file = "data/cq_training_data.csv"
|
output_file = "data/cq_training_data.csv"
|
||||||
os.makedirs("data", exist_ok=True)
|
os.makedirs("data", exist_ok=True)
|
||||||
|
|||||||
@@ -94,8 +94,20 @@ def get_ccxt_symbol(symbol: str, market_type: MarketType) -> str:
|
|||||||
"""
|
"""
|
||||||
if market_type == MarketType.PERPETUAL:
|
if market_type == MarketType.PERPETUAL:
|
||||||
# OKX perpetual format: BTC/USDT:USDT
|
# OKX perpetual format: BTC/USDT:USDT
|
||||||
quote = symbol.split('/')[1] if '/' in symbol else 'USDT'
|
if '/' in symbol:
|
||||||
|
base, quote = symbol.split('/')
|
||||||
return f"{symbol}:{quote}"
|
return f"{symbol}:{quote}"
|
||||||
|
elif '-' in symbol:
|
||||||
|
base, quote = symbol.split('-')
|
||||||
|
return f"{base}/{quote}:{quote}"
|
||||||
|
else:
|
||||||
|
# Assume base is symbol, quote is USDT default
|
||||||
|
return f"{symbol}/USDT:USDT"
|
||||||
|
|
||||||
|
# For spot, normalize dash to slash for CCXT
|
||||||
|
if '-' in symbol:
|
||||||
|
return symbol.replace('-', '/')
|
||||||
|
|
||||||
return symbol
|
return symbol
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -74,75 +74,35 @@ def run_long_short_portfolio(
|
|||||||
sl_stop: float | None,
|
sl_stop: float | None,
|
||||||
tp_stop: float | None,
|
tp_stop: float | None,
|
||||||
sl_trail: bool,
|
sl_trail: bool,
|
||||||
leverage: int
|
leverage: int,
|
||||||
|
size: pd.Series | float = 1.0,
|
||||||
|
size_type: str = 'value' # Changed to 'value' to support reversals/sizing
|
||||||
) -> vbt.Portfolio:
|
) -> vbt.Portfolio:
|
||||||
"""
|
"""
|
||||||
Run a portfolio supporting both long and short positions.
|
Run a portfolio supporting both long and short positions.
|
||||||
|
|
||||||
Runs two separate portfolios (long and short) and combines results.
|
Uses VectorBT's native support for short_entries/short_exits
|
||||||
Each gets half the capital.
|
to simulate a single unified portfolio.
|
||||||
|
|
||||||
Args:
|
|
||||||
close: Close price series
|
|
||||||
long_entries: Long entry signals
|
|
||||||
long_exits: Long exit signals
|
|
||||||
short_entries: Short entry signals
|
|
||||||
short_exits: Short exit signals
|
|
||||||
init_cash: Initial capital
|
|
||||||
fees: Transaction fee percentage
|
|
||||||
slippage: Slippage percentage
|
|
||||||
freq: Data frequency string
|
|
||||||
sl_stop: Stop loss percentage
|
|
||||||
tp_stop: Take profit percentage
|
|
||||||
sl_trail: Enable trailing stop loss
|
|
||||||
leverage: Leverage multiplier
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
VectorBT Portfolio object (long portfolio, short stats logged)
|
|
||||||
"""
|
"""
|
||||||
effective_cash = init_cash * leverage
|
effective_cash = init_cash * leverage
|
||||||
half_cash = effective_cash / 2
|
|
||||||
|
|
||||||
# Run long-only portfolio
|
# If size is passed as value (USD), we don't scale it by leverage here
|
||||||
long_pf = vbt.Portfolio.from_signals(
|
# The backtester has already scaled it by init_cash.
|
||||||
|
# If using 'value', vbt treats it as "Amount of CASH to use for the trade"
|
||||||
|
|
||||||
|
return vbt.Portfolio.from_signals(
|
||||||
close=close,
|
close=close,
|
||||||
entries=long_entries,
|
entries=long_entries,
|
||||||
exits=long_exits,
|
exits=long_exits,
|
||||||
direction='longonly',
|
short_entries=short_entries,
|
||||||
init_cash=half_cash,
|
short_exits=short_exits,
|
||||||
|
init_cash=effective_cash,
|
||||||
fees=fees,
|
fees=fees,
|
||||||
slippage=slippage,
|
slippage=slippage,
|
||||||
freq=freq,
|
freq=freq,
|
||||||
sl_stop=sl_stop,
|
sl_stop=sl_stop,
|
||||||
tp_stop=tp_stop,
|
tp_stop=tp_stop,
|
||||||
sl_trail=sl_trail,
|
sl_trail=sl_trail,
|
||||||
size=1.0,
|
size=size,
|
||||||
size_type='percent',
|
size_type=size_type,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Run short-only portfolio
|
|
||||||
short_pf = vbt.Portfolio.from_signals(
|
|
||||||
close=close,
|
|
||||||
entries=short_entries,
|
|
||||||
exits=short_exits,
|
|
||||||
direction='shortonly',
|
|
||||||
init_cash=half_cash,
|
|
||||||
fees=fees,
|
|
||||||
slippage=slippage,
|
|
||||||
freq=freq,
|
|
||||||
sl_stop=sl_stop,
|
|
||||||
tp_stop=tp_stop,
|
|
||||||
sl_trail=sl_trail,
|
|
||||||
size=1.0,
|
|
||||||
size_type='percent',
|
|
||||||
)
|
|
||||||
|
|
||||||
# Log both portfolio stats
|
|
||||||
# TODO: Implement proper portfolio combination
|
|
||||||
logger.info(
|
|
||||||
"Long portfolio: %.2f%% return, Short portfolio: %.2f%% return",
|
|
||||||
long_pf.total_return().mean() * 100,
|
|
||||||
short_pf.total_return().mean() * 100
|
|
||||||
)
|
|
||||||
|
|
||||||
return long_pf
|
|
||||||
|
|||||||
24
frontend/.gitignore
vendored
Normal file
24
frontend/.gitignore
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# Logs
|
||||||
|
logs
|
||||||
|
*.log
|
||||||
|
npm-debug.log*
|
||||||
|
yarn-debug.log*
|
||||||
|
yarn-error.log*
|
||||||
|
pnpm-debug.log*
|
||||||
|
lerna-debug.log*
|
||||||
|
|
||||||
|
node_modules
|
||||||
|
dist
|
||||||
|
dist-ssr
|
||||||
|
*.local
|
||||||
|
|
||||||
|
# Editor directories and files
|
||||||
|
.vscode/*
|
||||||
|
!.vscode/extensions.json
|
||||||
|
.idea
|
||||||
|
.DS_Store
|
||||||
|
*.suo
|
||||||
|
*.ntvs*
|
||||||
|
*.njsproj
|
||||||
|
*.sln
|
||||||
|
*.sw?
|
||||||
3
frontend/.vscode/extensions.json
vendored
Normal file
3
frontend/.vscode/extensions.json
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
"recommendations": ["Vue.volar"]
|
||||||
|
}
|
||||||
5
frontend/README.md
Normal file
5
frontend/README.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Vue 3 + TypeScript + Vite
|
||||||
|
|
||||||
|
This template should help get you started developing with Vue 3 and TypeScript in Vite. The template uses Vue 3 `<script setup>` SFCs, check out the [script setup docs](https://v3.vuejs.org/api/sfc-script-setup.html#sfc-script-setup) to learn more.
|
||||||
|
|
||||||
|
Learn more about the recommended Project Setup and IDE Support in the [Vue Docs TypeScript Guide](https://vuejs.org/guide/typescript/overview.html#project-setup).
|
||||||
24
frontend/index.html
Normal file
24
frontend/index.html
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
<!doctype html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8" />
|
||||||
|
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
|
<title>Lowkey Backtest</title>
|
||||||
|
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||||
|
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||||
|
<link href="https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@400;500;600;700&family=Inter:wght@400;500;600;700&display=swap" rel="stylesheet">
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
|
||||||
|
}
|
||||||
|
code, pre, .font-mono, input, select {
|
||||||
|
font-family: 'JetBrains Mono', 'Fira Code', monospace;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div id="app"></div>
|
||||||
|
<script type="module" src="/src/main.ts"></script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
2427
frontend/package-lock.json
generated
Normal file
2427
frontend/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
27
frontend/package.json
Normal file
27
frontend/package.json
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
{
|
||||||
|
"name": "frontend",
|
||||||
|
"private": true,
|
||||||
|
"version": "0.0.0",
|
||||||
|
"type": "module",
|
||||||
|
"scripts": {
|
||||||
|
"dev": "vite",
|
||||||
|
"build": "vue-tsc -b && vite build",
|
||||||
|
"preview": "vite preview"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"axios": "^1.13.2",
|
||||||
|
"plotly.js-dist-min": "^3.3.1",
|
||||||
|
"vue": "^3.5.24",
|
||||||
|
"vue-router": "^4.6.4"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"@tailwindcss/vite": "^4.1.18",
|
||||||
|
"@types/node": "^24.10.1",
|
||||||
|
"@vitejs/plugin-vue": "^6.0.1",
|
||||||
|
"@vue/tsconfig": "^0.8.1",
|
||||||
|
"tailwindcss": "^4.1.18",
|
||||||
|
"typescript": "~5.9.3",
|
||||||
|
"vite": "^7.2.4",
|
||||||
|
"vue-tsc": "^3.1.4"
|
||||||
|
}
|
||||||
|
}
|
||||||
1
frontend/public/vite.svg
Normal file
1
frontend/public/vite.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="31.88" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 257"><defs><linearGradient id="IconifyId1813088fe1fbc01fb466" x1="-.828%" x2="57.636%" y1="7.652%" y2="78.411%"><stop offset="0%" stop-color="#41D1FF"></stop><stop offset="100%" stop-color="#BD34FE"></stop></linearGradient><linearGradient id="IconifyId1813088fe1fbc01fb467" x1="43.376%" x2="50.316%" y1="2.242%" y2="89.03%"><stop offset="0%" stop-color="#FFEA83"></stop><stop offset="8.333%" stop-color="#FFDD35"></stop><stop offset="100%" stop-color="#FFA800"></stop></linearGradient></defs><path fill="url(#IconifyId1813088fe1fbc01fb466)" d="M255.153 37.938L134.897 252.976c-2.483 4.44-8.862 4.466-11.382.048L.875 37.958c-2.746-4.814 1.371-10.646 6.827-9.67l120.385 21.517a6.537 6.537 0 0 0 2.322-.004l117.867-21.483c5.438-.991 9.574 4.796 6.877 9.62Z"></path><path fill="url(#IconifyId1813088fe1fbc01fb467)" d="M185.432.063L96.44 17.501a3.268 3.268 0 0 0-2.634 3.014l-5.474 92.456a3.268 3.268 0 0 0 3.997 3.378l24.777-5.718c2.318-.535 4.413 1.507 3.936 3.838l-7.361 36.047c-.495 2.426 1.782 4.5 4.151 3.78l15.304-4.649c2.372-.72 4.652 1.36 4.15 3.788l-11.698 56.621c-.732 3.542 3.979 5.473 5.943 2.437l1.313-2.028l72.516-144.72c1.215-2.423-.88-5.186-3.54-4.672l-25.505 4.922c-2.396.462-4.435-1.77-3.759-4.114l16.646-57.705c.677-2.35-1.37-4.583-3.769-4.113Z"></path></svg>
|
||||||
|
After Width: | Height: | Size: 1.5 KiB |
72
frontend/src/App.vue
Normal file
72
frontend/src/App.vue
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
<script setup lang="ts">
|
||||||
|
import { ref } from 'vue'
|
||||||
|
import { RouterLink, RouterView } from 'vue-router'
|
||||||
|
import RunHistory from '@/components/RunHistory.vue'
|
||||||
|
|
||||||
|
const historyOpen = ref(true)
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<template>
|
||||||
|
<div class="flex h-screen overflow-hidden">
|
||||||
|
<!-- Sidebar Navigation -->
|
||||||
|
<aside class="w-16 bg-bg-secondary border-r border-border flex flex-col items-center py-4 gap-4">
|
||||||
|
<!-- Logo -->
|
||||||
|
<div class="w-10 h-10 rounded-lg bg-accent-blue flex items-center justify-center text-black font-bold text-lg">
|
||||||
|
LB
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Nav Links -->
|
||||||
|
<nav class="flex flex-col gap-2 mt-4">
|
||||||
|
<RouterLink
|
||||||
|
to="/"
|
||||||
|
class="w-10 h-10 rounded-lg flex items-center justify-center hover:bg-bg-hover transition-colors"
|
||||||
|
:class="{ 'bg-bg-tertiary': $route.path === '/' }"
|
||||||
|
title="Dashboard"
|
||||||
|
>
|
||||||
|
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||||
|
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 19v-6a2 2 0 00-2-2H5a2 2 0 00-2 2v6a2 2 0 002 2h2a2 2 0 002-2zm0 0V9a2 2 0 012-2h2a2 2 0 012 2v10m-6 0a2 2 0 002 2h2a2 2 0 002-2m0 0V5a2 2 0 012-2h2a2 2 0 012 2v14a2 2 0 01-2 2h-2a2 2 0 01-2-2z" />
|
||||||
|
</svg>
|
||||||
|
</RouterLink>
|
||||||
|
|
||||||
|
<RouterLink
|
||||||
|
to="/compare"
|
||||||
|
class="w-10 h-10 rounded-lg flex items-center justify-center hover:bg-bg-hover transition-colors"
|
||||||
|
:class="{ 'bg-bg-tertiary': $route.path === '/compare' }"
|
||||||
|
title="Compare Runs"
|
||||||
|
>
|
||||||
|
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||||
|
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 17V7m0 10a2 2 0 01-2 2H5a2 2 0 01-2-2V7a2 2 0 012-2h2a2 2 0 012 2m0 10a2 2 0 002 2h2a2 2 0 002-2M9 7a2 2 0 012-2h2a2 2 0 012 2m0 10V7m0 10a2 2 0 002 2h2a2 2 0 002-2V7a2 2 0 00-2-2h-2a2 2 0 00-2 2" />
|
||||||
|
</svg>
|
||||||
|
</RouterLink>
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
<!-- Spacer -->
|
||||||
|
<div class="flex-1"></div>
|
||||||
|
|
||||||
|
<!-- Toggle History -->
|
||||||
|
<button
|
||||||
|
@click="historyOpen = !historyOpen"
|
||||||
|
class="w-10 h-10 rounded-lg flex items-center justify-center hover:bg-bg-hover transition-colors"
|
||||||
|
:class="{ 'bg-bg-tertiary': historyOpen }"
|
||||||
|
title="Toggle Run History"
|
||||||
|
>
|
||||||
|
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||||
|
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M12 8v4l3 3m6-3a9 9 0 11-18 0 9 9 0 0118 0z" />
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
</aside>
|
||||||
|
|
||||||
|
<!-- Main Content -->
|
||||||
|
<main class="flex-1 overflow-auto">
|
||||||
|
<RouterView />
|
||||||
|
</main>
|
||||||
|
|
||||||
|
<!-- Run History Sidebar -->
|
||||||
|
<aside
|
||||||
|
v-if="historyOpen"
|
||||||
|
class="w-72 bg-bg-secondary border-l border-border overflow-hidden flex flex-col"
|
||||||
|
>
|
||||||
|
<RunHistory />
|
||||||
|
</aside>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
81
frontend/src/api/client.ts
Normal file
81
frontend/src/api/client.ts
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
/**
|
||||||
|
* API client for Lowkey Backtest backend.
|
||||||
|
*/
|
||||||
|
import axios from 'axios'
|
||||||
|
import type {
|
||||||
|
StrategiesResponse,
|
||||||
|
DataStatusResponse,
|
||||||
|
BacktestRequest,
|
||||||
|
BacktestResult,
|
||||||
|
BacktestListResponse,
|
||||||
|
CompareResult,
|
||||||
|
} from './types'
|
||||||
|
|
||||||
|
const api = axios.create({
|
||||||
|
baseURL: '/api',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get list of available strategies with parameters.
|
||||||
|
*/
|
||||||
|
export async function getStrategies(): Promise<StrategiesResponse> {
|
||||||
|
const response = await api.get<StrategiesResponse>('/strategies')
|
||||||
|
return response.data
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get list of available symbols with data status.
|
||||||
|
*/
|
||||||
|
export async function getSymbols(): Promise<DataStatusResponse> {
|
||||||
|
const response = await api.get<DataStatusResponse>('/symbols')
|
||||||
|
return response.data
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Run a backtest with the given configuration.
|
||||||
|
*/
|
||||||
|
export async function runBacktest(request: BacktestRequest): Promise<BacktestResult> {
|
||||||
|
const response = await api.post<BacktestResult>('/backtest', request)
|
||||||
|
return response.data
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get list of saved backtest runs.
|
||||||
|
*/
|
||||||
|
export async function getBacktests(params?: {
|
||||||
|
limit?: number
|
||||||
|
offset?: number
|
||||||
|
strategy?: string
|
||||||
|
symbol?: string
|
||||||
|
}): Promise<BacktestListResponse> {
|
||||||
|
const response = await api.get<BacktestListResponse>('/backtests', { params })
|
||||||
|
return response.data
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a specific backtest run by ID.
|
||||||
|
*/
|
||||||
|
export async function getBacktest(runId: string): Promise<BacktestResult> {
|
||||||
|
const response = await api.get<BacktestResult>(`/backtest/${runId}`)
|
||||||
|
return response.data
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete a backtest run.
|
||||||
|
*/
|
||||||
|
export async function deleteBacktest(runId: string): Promise<void> {
|
||||||
|
await api.delete(`/backtest/${runId}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compare multiple backtest runs.
|
||||||
|
*/
|
||||||
|
export async function compareRuns(runIds: string[]): Promise<CompareResult> {
|
||||||
|
const response = await api.post<CompareResult>('/compare', { run_ids: runIds })
|
||||||
|
return response.data
|
||||||
|
}
|
||||||
|
|
||||||
|
export default api
|
||||||
131
frontend/src/api/types.ts
Normal file
131
frontend/src/api/types.ts
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
/**
|
||||||
|
* TypeScript types matching the FastAPI Pydantic schemas.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Strategy types
|
||||||
|
export interface StrategyInfo {
|
||||||
|
name: string
|
||||||
|
display_name: string
|
||||||
|
market_type: string
|
||||||
|
default_leverage: number
|
||||||
|
default_params: Record<string, unknown>
|
||||||
|
grid_params: Record<string, unknown>
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface StrategiesResponse {
|
||||||
|
strategies: StrategyInfo[]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Symbol/Data types
|
||||||
|
export interface SymbolInfo {
|
||||||
|
symbol: string
|
||||||
|
exchange: string
|
||||||
|
market_type: string
|
||||||
|
timeframes: string[]
|
||||||
|
start_date: string | null
|
||||||
|
end_date: string | null
|
||||||
|
row_count: number
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface DataStatusResponse {
|
||||||
|
symbols: SymbolInfo[]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backtest types
|
||||||
|
export interface BacktestRequest {
|
||||||
|
strategy: string
|
||||||
|
symbol: string
|
||||||
|
exchange?: string
|
||||||
|
timeframe?: string
|
||||||
|
market_type?: string
|
||||||
|
start_date?: string | null
|
||||||
|
end_date?: string | null
|
||||||
|
init_cash?: number
|
||||||
|
leverage?: number | null
|
||||||
|
fees?: number | null
|
||||||
|
slippage?: number
|
||||||
|
sl_stop?: number | null
|
||||||
|
tp_stop?: number | null
|
||||||
|
sl_trail?: boolean
|
||||||
|
params?: Record<string, unknown>
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TradeRecord {
|
||||||
|
entry_time: string
|
||||||
|
exit_time: string | null
|
||||||
|
entry_price: number
|
||||||
|
exit_price: number | null
|
||||||
|
size: number
|
||||||
|
direction: string
|
||||||
|
pnl: number | null
|
||||||
|
return_pct: number | null
|
||||||
|
status: string
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface EquityPoint {
|
||||||
|
timestamp: string
|
||||||
|
value: number
|
||||||
|
drawdown: number
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface BacktestMetrics {
|
||||||
|
total_return: number
|
||||||
|
benchmark_return: number
|
||||||
|
alpha: number
|
||||||
|
sharpe_ratio: number
|
||||||
|
max_drawdown: number
|
||||||
|
win_rate: number
|
||||||
|
total_trades: number
|
||||||
|
profit_factor: number | null
|
||||||
|
avg_trade_return: number | null
|
||||||
|
total_fees: number
|
||||||
|
total_funding: number
|
||||||
|
liquidation_count: number
|
||||||
|
liquidation_loss: number
|
||||||
|
adjusted_return: number | null
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface BacktestResult {
|
||||||
|
run_id: string
|
||||||
|
strategy: string
|
||||||
|
symbol: string
|
||||||
|
market_type: string
|
||||||
|
timeframe: string
|
||||||
|
start_date: string
|
||||||
|
end_date: string
|
||||||
|
leverage: number
|
||||||
|
params: Record<string, unknown>
|
||||||
|
metrics: BacktestMetrics
|
||||||
|
equity_curve: EquityPoint[]
|
||||||
|
trades: TradeRecord[]
|
||||||
|
created_at: string
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface BacktestSummary {
|
||||||
|
run_id: string
|
||||||
|
strategy: string
|
||||||
|
symbol: string
|
||||||
|
market_type: string
|
||||||
|
timeframe: string
|
||||||
|
total_return: number
|
||||||
|
sharpe_ratio: number
|
||||||
|
max_drawdown: number
|
||||||
|
total_trades: number
|
||||||
|
created_at: string
|
||||||
|
params: Record<string, unknown>
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface BacktestListResponse {
|
||||||
|
runs: BacktestSummary[]
|
||||||
|
total: number
|
||||||
|
}
|
||||||
|
|
||||||
|
// Comparison types
|
||||||
|
export interface CompareRequest {
|
||||||
|
run_ids: string[]
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface CompareResult {
|
||||||
|
runs: BacktestResult[]
|
||||||
|
param_diff: Record<string, unknown[]>
|
||||||
|
}
|
||||||
1
frontend/src/assets/vue.svg
Normal file
1
frontend/src/assets/vue.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="37.07" height="36" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 198"><path fill="#41B883" d="M204.8 0H256L128 220.8L0 0h97.92L128 51.2L157.44 0h47.36Z"></path><path fill="#41B883" d="m0 0l128 220.8L256 0h-51.2L128 132.48L50.56 0H0Z"></path><path fill="#35495E" d="M50.56 0L128 133.12L204.8 0h-47.36L128 51.2L97.92 0H50.56Z"></path></svg>
|
||||||
|
After Width: | Height: | Size: 496 B |
186
frontend/src/components/BacktestConfig.vue
Normal file
186
frontend/src/components/BacktestConfig.vue
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
<script setup lang="ts">
|
||||||
|
import { ref, computed, watch, onMounted } from 'vue'
|
||||||
|
import { useBacktest } from '@/composables/useBacktest'
|
||||||
|
import type { BacktestRequest } from '@/api/types'
|
||||||
|
|
||||||
|
const { strategies, symbols, loading, init, executeBacktest } = useBacktest()
|
||||||
|
|
||||||
|
// Form state
|
||||||
|
const selectedStrategy = ref('')
|
||||||
|
const selectedSymbol = ref('')
|
||||||
|
const selectedMarket = ref('perpetual')
|
||||||
|
const timeframe = ref('1h')
|
||||||
|
const initCash = ref(10000)
|
||||||
|
const leverage = ref<number | null>(null)
|
||||||
|
const slStop = ref<number | null>(null)
|
||||||
|
const tpStop = ref<number | null>(null)
|
||||||
|
const params = ref<Record<string, number | boolean>>({})
|
||||||
|
|
||||||
|
// Initialize
|
||||||
|
onMounted(async () => {
|
||||||
|
await init()
|
||||||
|
if (strategies.value.length > 0 && strategies.value[0]) {
|
||||||
|
selectedStrategy.value = strategies.value[0].name
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Get current strategy config
|
||||||
|
const currentStrategy = computed(() =>
|
||||||
|
strategies.value.find(s => s.name === selectedStrategy.value)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Filter symbols by market type
|
||||||
|
const filteredSymbols = computed(() =>
|
||||||
|
symbols.value.filter(s => s.market_type === selectedMarket.value)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Update params when strategy changes
|
||||||
|
watch(selectedStrategy, (name) => {
|
||||||
|
const strategy = strategies.value.find(s => s.name === name)
|
||||||
|
if (strategy) {
|
||||||
|
params.value = { ...strategy.default_params } as Record<string, number | boolean>
|
||||||
|
selectedMarket.value = strategy.market_type
|
||||||
|
leverage.value = strategy.default_leverage > 1 ? strategy.default_leverage : null
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Update symbol when market changes
|
||||||
|
watch([filteredSymbols, selectedMarket], () => {
|
||||||
|
const firstSymbol = filteredSymbols.value[0]
|
||||||
|
if (filteredSymbols.value.length > 0 && firstSymbol && !filteredSymbols.value.find(s => s.symbol === selectedSymbol.value)) {
|
||||||
|
selectedSymbol.value = firstSymbol.symbol
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
async function handleSubmit() {
|
||||||
|
if (!selectedStrategy.value || !selectedSymbol.value) return
|
||||||
|
|
||||||
|
const request: BacktestRequest = {
|
||||||
|
strategy: selectedStrategy.value,
|
||||||
|
symbol: selectedSymbol.value,
|
||||||
|
market_type: selectedMarket.value,
|
||||||
|
timeframe: timeframe.value,
|
||||||
|
init_cash: initCash.value,
|
||||||
|
leverage: leverage.value,
|
||||||
|
sl_stop: slStop.value,
|
||||||
|
tp_stop: tpStop.value,
|
||||||
|
params: params.value,
|
||||||
|
}
|
||||||
|
|
||||||
|
await executeBacktest(request)
|
||||||
|
}
|
||||||
|
|
||||||
|
function getParamType(value: unknown): 'number' | 'boolean' | 'unknown' {
|
||||||
|
if (typeof value === 'boolean') return 'boolean'
|
||||||
|
if (typeof value === 'number') return 'number'
|
||||||
|
return 'unknown'
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<template>
|
||||||
|
<div class="card">
|
||||||
|
<h2 class="text-lg font-semibold mb-4">Backtest Configuration</h2>
|
||||||
|
|
||||||
|
<form @submit.prevent="handleSubmit" class="space-y-4">
|
||||||
|
<!-- Strategy -->
|
||||||
|
<div>
|
||||||
|
<label class="block text-xs text-text-secondary uppercase mb-1">Strategy</label>
|
||||||
|
<select v-model="selectedStrategy" class="w-full">
|
||||||
|
<option v-for="s in strategies" :key="s.name" :value="s.name">
|
||||||
|
{{ s.display_name }}
|
||||||
|
</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Market Type & Symbol -->
|
||||||
|
<div class="grid grid-cols-2 gap-3">
|
||||||
|
<div>
|
||||||
|
<label class="block text-xs text-text-secondary uppercase mb-1">Market</label>
|
||||||
|
<select v-model="selectedMarket" class="w-full">
|
||||||
|
<option value="spot">Spot</option>
|
||||||
|
<option value="perpetual">Perpetual</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label class="block text-xs text-text-secondary uppercase mb-1">Symbol</label>
|
||||||
|
<select v-model="selectedSymbol" class="w-full">
|
||||||
|
<option v-for="s in filteredSymbols" :key="s.symbol" :value="s.symbol">
|
||||||
|
{{ s.symbol }}
|
||||||
|
</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Timeframe & Cash -->
|
||||||
|
<div class="grid grid-cols-2 gap-3">
|
||||||
|
<div>
|
||||||
|
<label class="block text-xs text-text-secondary uppercase mb-1">Timeframe</label>
|
||||||
|
<select v-model="timeframe" class="w-full">
|
||||||
|
<option value="1h">1 Hour</option>
|
||||||
|
<option value="4h">4 Hours</option>
|
||||||
|
<option value="1d">1 Day</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label class="block text-xs text-text-secondary uppercase mb-1">Initial Cash</label>
|
||||||
|
<input type="number" v-model.number="initCash" class="w-full" min="100" step="100" />
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Leverage (perpetual only) -->
|
||||||
|
<div v-if="selectedMarket === 'perpetual'" class="grid grid-cols-3 gap-3">
|
||||||
|
<div>
|
||||||
|
<label class="block text-xs text-text-secondary uppercase mb-1">Leverage</label>
|
||||||
|
<input type="number" v-model.number="leverage" class="w-full" min="1" max="100" placeholder="1" />
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label class="block text-xs text-text-secondary uppercase mb-1">Stop Loss %</label>
|
||||||
|
<input type="number" v-model.number="slStop" class="w-full" min="0" max="100" step="0.1" placeholder="None" />
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label class="block text-xs text-text-secondary uppercase mb-1">Take Profit %</label>
|
||||||
|
<input type="number" v-model.number="tpStop" class="w-full" min="0" max="100" step="0.1" placeholder="None" />
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Strategy Parameters -->
|
||||||
|
<div v-if="currentStrategy && Object.keys(params).length > 0">
|
||||||
|
<h3 class="text-sm font-medium text-text-secondary mb-2">Strategy Parameters</h3>
|
||||||
|
<div class="grid grid-cols-2 gap-3">
|
||||||
|
<div v-for="(value, key) in params" :key="key">
|
||||||
|
<label class="block text-xs text-text-secondary uppercase mb-1">
|
||||||
|
{{ String(key).replace(/_/g, ' ') }}
|
||||||
|
</label>
|
||||||
|
<template v-if="getParamType(value) === 'boolean'">
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
:checked="Boolean(value)"
|
||||||
|
@change="params[key] = ($event.target as HTMLInputElement).checked"
|
||||||
|
class="w-5 h-5"
|
||||||
|
/>
|
||||||
|
</template>
|
||||||
|
<template v-else>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
:value="value"
|
||||||
|
@input="params[key] = parseFloat(($event.target as HTMLInputElement).value)"
|
||||||
|
class="w-full"
|
||||||
|
step="any"
|
||||||
|
/>
|
||||||
|
</template>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Submit -->
|
||||||
|
<button
|
||||||
|
type="submit"
|
||||||
|
class="btn btn-primary w-full"
|
||||||
|
:disabled="loading || !selectedStrategy || !selectedSymbol"
|
||||||
|
>
|
||||||
|
<span v-if="loading" class="spinner"></span>
|
||||||
|
<span v-else>Run Backtest</span>
|
||||||
|
</button>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
88
frontend/src/components/EquityCurve.vue
Normal file
88
frontend/src/components/EquityCurve.vue
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
<script setup lang="ts">
|
||||||
|
import { ref, watch, onMounted, onUnmounted } from 'vue'
|
||||||
|
import Plotly from 'plotly.js-dist-min'
|
||||||
|
import type { EquityPoint } from '@/api/types'
|
||||||
|
|
||||||
|
const props = defineProps<{
|
||||||
|
data: EquityPoint[]
|
||||||
|
title?: string
|
||||||
|
}>()
|
||||||
|
|
||||||
|
const chartRef = ref<HTMLDivElement | null>(null)
|
||||||
|
|
||||||
|
const CHART_COLORS = {
|
||||||
|
equity: '#58a6ff',
|
||||||
|
grid: '#30363d',
|
||||||
|
text: '#8b949e',
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderChart() {
|
||||||
|
if (!chartRef.value || props.data.length === 0) return
|
||||||
|
|
||||||
|
const timestamps = props.data.map(p => p.timestamp)
|
||||||
|
const values = props.data.map(p => p.value)
|
||||||
|
|
||||||
|
const traces: Plotly.Data[] = [
|
||||||
|
{
|
||||||
|
x: timestamps,
|
||||||
|
y: values,
|
||||||
|
type: 'scatter',
|
||||||
|
mode: 'lines',
|
||||||
|
name: 'Portfolio Value',
|
||||||
|
line: { color: CHART_COLORS.equity, width: 2 },
|
||||||
|
hovertemplate: '%{x}<br>Value: $%{y:,.2f}<extra></extra>',
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
const layout: Partial<Plotly.Layout> = {
|
||||||
|
title: props.title ? {
|
||||||
|
text: props.title,
|
||||||
|
font: { color: CHART_COLORS.text, size: 14 },
|
||||||
|
} : undefined,
|
||||||
|
paper_bgcolor: 'transparent',
|
||||||
|
plot_bgcolor: 'transparent',
|
||||||
|
margin: { l: 60, r: 20, t: props.title ? 40 : 20, b: 40 },
|
||||||
|
xaxis: {
|
||||||
|
showgrid: true,
|
||||||
|
gridcolor: CHART_COLORS.grid,
|
||||||
|
tickfont: { color: CHART_COLORS.text, size: 10 },
|
||||||
|
linecolor: CHART_COLORS.grid,
|
||||||
|
},
|
||||||
|
yaxis: {
|
||||||
|
showgrid: true,
|
||||||
|
gridcolor: CHART_COLORS.grid,
|
||||||
|
tickfont: { color: CHART_COLORS.text, size: 10 },
|
||||||
|
linecolor: CHART_COLORS.grid,
|
||||||
|
tickprefix: '$',
|
||||||
|
hoverformat: ',.2f',
|
||||||
|
},
|
||||||
|
showlegend: false,
|
||||||
|
hovermode: 'x unified',
|
||||||
|
}
|
||||||
|
|
||||||
|
const config: Partial<Plotly.Config> = {
|
||||||
|
responsive: true,
|
||||||
|
displayModeBar: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
Plotly.react(chartRef.value, traces, layout, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
watch(() => props.data, renderChart, { deep: true })
|
||||||
|
|
||||||
|
onMounted(() => {
|
||||||
|
renderChart()
|
||||||
|
window.addEventListener('resize', renderChart)
|
||||||
|
})
|
||||||
|
|
||||||
|
onUnmounted(() => {
|
||||||
|
window.removeEventListener('resize', renderChart)
|
||||||
|
if (chartRef.value) {
|
||||||
|
Plotly.purge(chartRef.value)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<template>
|
||||||
|
<div ref="chartRef" class="w-full h-full min-h-[300px]"></div>
|
||||||
|
</template>
|
||||||
41
frontend/src/components/HelloWorld.vue
Normal file
41
frontend/src/components/HelloWorld.vue
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
<script setup lang="ts">
|
||||||
|
import { ref } from 'vue'
|
||||||
|
|
||||||
|
defineProps<{ msg: string }>()
|
||||||
|
|
||||||
|
const count = ref(0)
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<template>
|
||||||
|
<h1>{{ msg }}</h1>
|
||||||
|
|
||||||
|
<div class="card">
|
||||||
|
<button type="button" @click="count++">count is {{ count }}</button>
|
||||||
|
<p>
|
||||||
|
Edit
|
||||||
|
<code>components/HelloWorld.vue</code> to test HMR
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
Check out
|
||||||
|
<a href="https://vuejs.org/guide/quick-start.html#local" target="_blank"
|
||||||
|
>create-vue</a
|
||||||
|
>, the official Vue + Vite starter
|
||||||
|
</p>
|
||||||
|
<p>
|
||||||
|
Learn more about IDE Support for Vue in the
|
||||||
|
<a
|
||||||
|
href="https://vuejs.org/guide/scaling-up/tooling.html#ide-support"
|
||||||
|
target="_blank"
|
||||||
|
>Vue Docs Scaling up Guide</a
|
||||||
|
>.
|
||||||
|
</p>
|
||||||
|
<p class="read-the-docs">Click on the Vite and Vue logos to learn more</p>
|
||||||
|
</template>
|
||||||
|
|
||||||
|
<style scoped>
|
||||||
|
.read-the-docs {
|
||||||
|
color: #888;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
144
frontend/src/components/MetricsPanel.vue
Normal file
144
frontend/src/components/MetricsPanel.vue
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
<script setup lang="ts">
|
||||||
|
import type { BacktestMetrics } from '@/api/types'
|
||||||
|
|
||||||
|
const props = defineProps<{
|
||||||
|
metrics: BacktestMetrics
|
||||||
|
leverage?: number
|
||||||
|
marketType?: string
|
||||||
|
}>()
|
||||||
|
|
||||||
|
function formatPercent(val: number): string {
|
||||||
|
return (val >= 0 ? '+' : '') + val.toFixed(2) + '%'
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatNumber(val: number | null | undefined, decimals = 2): string {
|
||||||
|
if (val === null || val === undefined) return '-'
|
||||||
|
return val.toFixed(decimals)
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatCurrency(val: number): string {
|
||||||
|
return '$' + val.toLocaleString('en-US', { minimumFractionDigits: 2, maximumFractionDigits: 2 })
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<template>
|
||||||
|
<div class="grid grid-cols-2 md:grid-cols-4 gap-4">
|
||||||
|
<!-- Total Return -->
|
||||||
|
<div class="card">
|
||||||
|
<div class="metric-label">Strategy Return</div>
|
||||||
|
<div
|
||||||
|
class="metric-value"
|
||||||
|
:class="metrics.total_return >= 0 ? 'profit' : 'loss'"
|
||||||
|
>
|
||||||
|
{{ formatPercent(metrics.total_return) }}
|
||||||
|
</div>
|
||||||
|
<div v-if="metrics.adjusted_return !== null && metrics.adjusted_return !== metrics.total_return" class="text-xs text-text-muted mt-1">
|
||||||
|
Adj: {{ formatPercent(metrics.adjusted_return) }}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Benchmark Return -->
|
||||||
|
<div class="card">
|
||||||
|
<div class="metric-label">Benchmark (B&H)</div>
|
||||||
|
<div
|
||||||
|
class="metric-value"
|
||||||
|
:class="metrics.benchmark_return >= 0 ? 'profit' : 'loss'"
|
||||||
|
>
|
||||||
|
{{ formatPercent(metrics.benchmark_return) }}
|
||||||
|
</div>
|
||||||
|
<div class="text-xs text-text-muted mt-1">
|
||||||
|
Market change
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Alpha -->
|
||||||
|
<div class="card">
|
||||||
|
<div class="metric-label">Alpha</div>
|
||||||
|
<div
|
||||||
|
class="metric-value"
|
||||||
|
:class="metrics.alpha >= 0 ? 'profit' : 'loss'"
|
||||||
|
>
|
||||||
|
{{ formatPercent(metrics.alpha) }}
|
||||||
|
</div>
|
||||||
|
<div class="text-xs text-text-muted mt-1">
|
||||||
|
vs Buy & Hold
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Sharpe Ratio -->
|
||||||
|
<div class="card">
|
||||||
|
<div class="metric-label">Sharpe Ratio</div>
|
||||||
|
<div
|
||||||
|
class="metric-value"
|
||||||
|
:class="metrics.sharpe_ratio >= 1 ? 'profit' : metrics.sharpe_ratio < 0 ? 'loss' : ''"
|
||||||
|
>
|
||||||
|
{{ formatNumber(metrics.sharpe_ratio) }}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Max Drawdown -->
|
||||||
|
<div class="card">
|
||||||
|
<div class="metric-label">Max Drawdown</div>
|
||||||
|
<div class="metric-value loss">
|
||||||
|
{{ formatPercent(metrics.max_drawdown) }}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Win Rate -->
|
||||||
|
<div class="card">
|
||||||
|
<div class="metric-label">Win Rate</div>
|
||||||
|
<div
|
||||||
|
class="metric-value"
|
||||||
|
:class="metrics.win_rate >= 50 ? 'profit' : 'loss'"
|
||||||
|
>
|
||||||
|
{{ formatNumber(metrics.win_rate, 1) }}%
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Total Trades -->
|
||||||
|
<div class="card">
|
||||||
|
<div class="metric-label">Total Trades</div>
|
||||||
|
<div class="metric-value">
|
||||||
|
{{ metrics.total_trades }}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Profit Factor -->
|
||||||
|
<div class="card">
|
||||||
|
<div class="metric-label">Profit Factor</div>
|
||||||
|
<div
|
||||||
|
class="metric-value"
|
||||||
|
:class="(metrics.profit_factor || 0) >= 1 ? 'profit' : 'loss'"
|
||||||
|
>
|
||||||
|
{{ formatNumber(metrics.profit_factor) }}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Total Fees -->
|
||||||
|
<div class="card">
|
||||||
|
<div class="metric-label">Total Fees</div>
|
||||||
|
<div class="metric-value text-warning">
|
||||||
|
{{ formatCurrency(metrics.total_fees) }}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Funding (perpetual only) -->
|
||||||
|
<div v-if="marketType === 'perpetual'" class="card">
|
||||||
|
<div class="metric-label">Funding Paid</div>
|
||||||
|
<div class="metric-value text-warning">
|
||||||
|
{{ formatCurrency(metrics.total_funding) }}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Liquidations (if any) -->
|
||||||
|
<div v-if="metrics.liquidation_count > 0" class="card">
|
||||||
|
<div class="metric-label">Liquidations</div>
|
||||||
|
<div class="metric-value loss">
|
||||||
|
{{ metrics.liquidation_count }}
|
||||||
|
</div>
|
||||||
|
<div class="text-xs text-text-muted mt-1">
|
||||||
|
Lost: {{ formatCurrency(metrics.liquidation_loss) }}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
141
frontend/src/components/RunHistory.vue
Normal file
141
frontend/src/components/RunHistory.vue
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
<script setup lang="ts">
|
||||||
|
import { onMounted } from 'vue'
|
||||||
|
import { useBacktest } from '@/composables/useBacktest'
|
||||||
|
import { useRouter } from 'vue-router'
|
||||||
|
|
||||||
|
const router = useRouter()
|
||||||
|
const {
|
||||||
|
runs,
|
||||||
|
currentResult,
|
||||||
|
selectedRuns,
|
||||||
|
refreshRuns,
|
||||||
|
loadRun,
|
||||||
|
removeRun,
|
||||||
|
toggleRunSelection
|
||||||
|
} = useBacktest()
|
||||||
|
|
||||||
|
onMounted(() => {
|
||||||
|
refreshRuns()
|
||||||
|
})
|
||||||
|
|
||||||
|
function formatDate(iso: string): string {
|
||||||
|
const d = new Date(iso)
|
||||||
|
return d.toLocaleDateString('en-US', {
|
||||||
|
month: 'short',
|
||||||
|
day: 'numeric',
|
||||||
|
hour: '2-digit',
|
||||||
|
minute: '2-digit'
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatReturn(val: number): string {
|
||||||
|
return (val >= 0 ? '+' : '') + val.toFixed(2) + '%'
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleClick(runId: string) {
|
||||||
|
await loadRun(runId)
|
||||||
|
router.push('/')
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleCheckbox(e: Event, runId: string) {
|
||||||
|
e.stopPropagation()
|
||||||
|
toggleRunSelection(runId)
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleDelete(e: Event, runId: string) {
|
||||||
|
e.stopPropagation()
|
||||||
|
if (confirm('Delete this run?')) {
|
||||||
|
removeRun(runId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<template>
|
||||||
|
<div class="flex flex-col h-full">
|
||||||
|
<!-- Header -->
|
||||||
|
<div class="p-4 border-b border-border">
|
||||||
|
<h2 class="text-sm font-semibold text-text-secondary uppercase tracking-wide">
|
||||||
|
Run History
|
||||||
|
</h2>
|
||||||
|
<p class="text-xs text-text-muted mt-1">
|
||||||
|
{{ runs.length }} runs | {{ selectedRuns.length }} selected
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Run List -->
|
||||||
|
<div class="flex-1 overflow-y-auto">
|
||||||
|
<div
|
||||||
|
v-for="run in runs"
|
||||||
|
:key="run.run_id"
|
||||||
|
@click="handleClick(run.run_id)"
|
||||||
|
class="p-3 border-b border-border-muted cursor-pointer hover:bg-bg-hover transition-colors"
|
||||||
|
:class="{ 'bg-bg-tertiary': currentResult?.run_id === run.run_id }"
|
||||||
|
>
|
||||||
|
<div class="flex items-start gap-2">
|
||||||
|
<!-- Checkbox for comparison -->
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
:checked="selectedRuns.includes(run.run_id)"
|
||||||
|
@click="handleCheckbox($event, run.run_id)"
|
||||||
|
class="mt-1 w-4 h-4 rounded border-border bg-bg-tertiary"
|
||||||
|
/>
|
||||||
|
|
||||||
|
<div class="flex-1 min-w-0">
|
||||||
|
<!-- Strategy & Symbol -->
|
||||||
|
<div class="flex items-center gap-2">
|
||||||
|
<span class="font-medium text-sm truncate">{{ run.strategy }}</span>
|
||||||
|
<span class="text-xs px-1.5 py-0.5 rounded bg-bg-tertiary text-text-secondary">
|
||||||
|
{{ run.symbol }}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Metrics -->
|
||||||
|
<div class="flex items-center gap-3 mt-1">
|
||||||
|
<span
|
||||||
|
class="text-sm font-mono"
|
||||||
|
:class="run.total_return >= 0 ? 'profit' : 'loss'"
|
||||||
|
>
|
||||||
|
{{ formatReturn(run.total_return) }}
|
||||||
|
</span>
|
||||||
|
<span class="text-xs text-text-muted">
|
||||||
|
SR {{ run.sharpe_ratio.toFixed(2) }}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Date -->
|
||||||
|
<div class="text-xs text-text-muted mt-1">
|
||||||
|
{{ formatDate(run.created_at) }}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Delete button -->
|
||||||
|
<button
|
||||||
|
@click="handleDelete($event, run.run_id)"
|
||||||
|
class="p-1 rounded hover:bg-loss/20 text-text-muted hover:text-loss transition-colors"
|
||||||
|
title="Delete run"
|
||||||
|
>
|
||||||
|
<svg class="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||||
|
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M19 7l-.867 12.142A2 2 0 0116.138 21H7.862a2 2 0 01-1.995-1.858L5 7m5 4v6m4-6v6m1-10V4a1 1 0 00-1-1h-4a1 1 0 00-1 1v3M4 7h16" />
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Empty state -->
|
||||||
|
<div v-if="runs.length === 0" class="p-8 text-center text-text-muted">
|
||||||
|
<p>No runs yet.</p>
|
||||||
|
<p class="text-xs mt-1">Run a backtest to see results here.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Compare Button -->
|
||||||
|
<div v-if="selectedRuns.length >= 2" class="p-4 border-t border-border">
|
||||||
|
<router-link
|
||||||
|
to="/compare"
|
||||||
|
class="btn btn-primary w-full"
|
||||||
|
>
|
||||||
|
Compare {{ selectedRuns.length }} Runs
|
||||||
|
</router-link>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
157
frontend/src/components/TradeLog.vue
Normal file
157
frontend/src/components/TradeLog.vue
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
<script setup lang="ts">
|
||||||
|
import { ref, computed } from 'vue'
|
||||||
|
import type { TradeRecord } from '@/api/types'
|
||||||
|
|
||||||
|
const props = defineProps<{
|
||||||
|
trades: TradeRecord[]
|
||||||
|
}>()
|
||||||
|
|
||||||
|
type SortKey = 'entry_time' | 'pnl' | 'return_pct' | 'size'
|
||||||
|
const sortKey = ref<SortKey>('entry_time')
|
||||||
|
const sortDesc = ref(true)
|
||||||
|
|
||||||
|
const sortedTrades = computed(() => {
|
||||||
|
return [...props.trades].sort((a, b) => {
|
||||||
|
let aVal: number | string = 0
|
||||||
|
let bVal: number | string = 0
|
||||||
|
|
||||||
|
switch (sortKey.value) {
|
||||||
|
case 'entry_time':
|
||||||
|
aVal = a.entry_time
|
||||||
|
bVal = b.entry_time
|
||||||
|
break
|
||||||
|
case 'pnl':
|
||||||
|
aVal = a.pnl ?? 0
|
||||||
|
bVal = b.pnl ?? 0
|
||||||
|
break
|
||||||
|
case 'return_pct':
|
||||||
|
aVal = a.return_pct ?? 0
|
||||||
|
bVal = b.return_pct ?? 0
|
||||||
|
break
|
||||||
|
case 'size':
|
||||||
|
aVal = a.size
|
||||||
|
bVal = b.size
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if (aVal < bVal) return sortDesc.value ? 1 : -1
|
||||||
|
if (aVal > bVal) return sortDesc.value ? -1 : 1
|
||||||
|
return 0
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
function toggleSort(key: SortKey) {
|
||||||
|
if (sortKey.value === key) {
|
||||||
|
sortDesc.value = !sortDesc.value
|
||||||
|
} else {
|
||||||
|
sortKey.value = key
|
||||||
|
sortDesc.value = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatDate(iso: string): string {
|
||||||
|
if (!iso) return '-'
|
||||||
|
const d = new Date(iso)
|
||||||
|
return d.toLocaleDateString('en-US', {
|
||||||
|
month: 'short',
|
||||||
|
day: 'numeric',
|
||||||
|
hour: '2-digit',
|
||||||
|
minute: '2-digit',
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatPrice(val: number | null): string {
|
||||||
|
if (val === null) return '-'
|
||||||
|
return val.toLocaleString('en-US', { minimumFractionDigits: 2, maximumFractionDigits: 2 })
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatPnL(val: number | null): string {
|
||||||
|
if (val === null) return '-'
|
||||||
|
const sign = val >= 0 ? '+' : ''
|
||||||
|
return sign + val.toLocaleString('en-US', { minimumFractionDigits: 2, maximumFractionDigits: 2 })
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatReturn(val: number | null): string {
|
||||||
|
if (val === null) return '-'
|
||||||
|
return (val >= 0 ? '+' : '') + val.toFixed(2) + '%'
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<template>
|
||||||
|
<div class="card overflow-hidden">
|
||||||
|
<div class="flex items-center justify-between mb-4">
|
||||||
|
<h3 class="text-sm font-semibold text-text-secondary uppercase tracking-wide">
|
||||||
|
Trade Log
|
||||||
|
</h3>
|
||||||
|
<span class="text-xs text-text-muted">{{ trades.length }} trades</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="overflow-x-auto max-h-[400px] overflow-y-auto">
|
||||||
|
<table class="min-w-full">
|
||||||
|
<thead class="sticky top-0 bg-bg-card">
|
||||||
|
<tr>
|
||||||
|
<th
|
||||||
|
@click="toggleSort('entry_time')"
|
||||||
|
class="cursor-pointer hover:text-text-primary"
|
||||||
|
>
|
||||||
|
Entry Time
|
||||||
|
<span v-if="sortKey === 'entry_time'">{{ sortDesc ? ' v' : ' ^' }}</span>
|
||||||
|
</th>
|
||||||
|
<th>Exit Time</th>
|
||||||
|
<th>Direction</th>
|
||||||
|
<th>Entry</th>
|
||||||
|
<th>Exit</th>
|
||||||
|
<th
|
||||||
|
@click="toggleSort('size')"
|
||||||
|
class="cursor-pointer hover:text-text-primary"
|
||||||
|
>
|
||||||
|
Size
|
||||||
|
<span v-if="sortKey === 'size'">{{ sortDesc ? ' v' : ' ^' }}</span>
|
||||||
|
</th>
|
||||||
|
<th
|
||||||
|
@click="toggleSort('pnl')"
|
||||||
|
class="cursor-pointer hover:text-text-primary"
|
||||||
|
>
|
||||||
|
PnL
|
||||||
|
<span v-if="sortKey === 'pnl'">{{ sortDesc ? ' v' : ' ^' }}</span>
|
||||||
|
</th>
|
||||||
|
<th
|
||||||
|
@click="toggleSort('return_pct')"
|
||||||
|
class="cursor-pointer hover:text-text-primary"
|
||||||
|
>
|
||||||
|
Return
|
||||||
|
<span v-if="sortKey === 'return_pct'">{{ sortDesc ? ' v' : ' ^' }}</span>
|
||||||
|
</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr v-for="(trade, idx) in sortedTrades" :key="idx">
|
||||||
|
<td class="text-text-secondary">{{ formatDate(trade.entry_time) }}</td>
|
||||||
|
<td class="text-text-secondary">{{ formatDate(trade.exit_time || '') }}</td>
|
||||||
|
<td>
|
||||||
|
<span
|
||||||
|
class="px-2 py-0.5 rounded text-xs font-medium"
|
||||||
|
:class="trade.direction === 'Long' ? 'bg-profit/20 text-profit' : 'bg-loss/20 text-loss'"
|
||||||
|
>
|
||||||
|
{{ trade.direction }}
|
||||||
|
</span>
|
||||||
|
</td>
|
||||||
|
<td>${{ formatPrice(trade.entry_price) }}</td>
|
||||||
|
<td>${{ formatPrice(trade.exit_price) }}</td>
|
||||||
|
<td>{{ trade.size.toFixed(4) }}</td>
|
||||||
|
<td :class="(trade.pnl ?? 0) >= 0 ? 'profit' : 'loss'">
|
||||||
|
${{ formatPnL(trade.pnl) }}
|
||||||
|
</td>
|
||||||
|
<td :class="(trade.return_pct ?? 0) >= 0 ? 'profit' : 'loss'">
|
||||||
|
{{ formatReturn(trade.return_pct) }}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
<div v-if="trades.length === 0" class="p-8 text-center text-text-muted">
|
||||||
|
No trades executed.
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
150
frontend/src/composables/useBacktest.ts
Normal file
150
frontend/src/composables/useBacktest.ts
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
/**
|
||||||
|
* Composable for managing backtest state across components.
|
||||||
|
*/
|
||||||
|
import { ref, computed } from 'vue'
|
||||||
|
import type { BacktestResult, BacktestSummary, StrategyInfo, SymbolInfo } from '@/api/types'
|
||||||
|
import { getStrategies, getSymbols, getBacktests, getBacktest, runBacktest, deleteBacktest } from '@/api/client'
|
||||||
|
import type { BacktestRequest } from '@/api/types'
|
||||||
|
|
||||||
|
// Shared state
|
||||||
|
const strategies = ref<StrategyInfo[]>([])
|
||||||
|
const symbols = ref<SymbolInfo[]>([])
|
||||||
|
const runs = ref<BacktestSummary[]>([])
|
||||||
|
const currentResult = ref<BacktestResult | null>(null)
|
||||||
|
const selectedRuns = ref<string[]>([])
|
||||||
|
const loading = ref(false)
|
||||||
|
const error = ref<string | null>(null)
|
||||||
|
|
||||||
|
// Computed
|
||||||
|
const symbolsByMarket = computed(() => {
|
||||||
|
const grouped: Record<string, SymbolInfo[]> = {}
|
||||||
|
for (const s of symbols.value) {
|
||||||
|
const key = `${s.market_type}`
|
||||||
|
if (!grouped[key]) grouped[key] = []
|
||||||
|
grouped[key].push(s)
|
||||||
|
}
|
||||||
|
return grouped
|
||||||
|
})
|
||||||
|
|
||||||
|
export function useBacktest() {
|
||||||
|
/**
|
||||||
|
* Load strategies and symbols on app init.
|
||||||
|
*/
|
||||||
|
async function init() {
|
||||||
|
try {
|
||||||
|
const [stratRes, symRes] = await Promise.all([
|
||||||
|
getStrategies(),
|
||||||
|
getSymbols(),
|
||||||
|
])
|
||||||
|
strategies.value = stratRes.strategies
|
||||||
|
symbols.value = symRes.symbols
|
||||||
|
} catch (e) {
|
||||||
|
error.value = `Failed to load initial data: ${e}`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refresh the run history list.
|
||||||
|
*/
|
||||||
|
async function refreshRuns() {
|
||||||
|
try {
|
||||||
|
const res = await getBacktests({ limit: 100 })
|
||||||
|
runs.value = res.runs
|
||||||
|
} catch (e) {
|
||||||
|
error.value = `Failed to load runs: ${e}`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a new backtest.
|
||||||
|
*/
|
||||||
|
async function executeBacktest(request: BacktestRequest) {
|
||||||
|
loading.value = true
|
||||||
|
error.value = null
|
||||||
|
try {
|
||||||
|
const result = await runBacktest(request)
|
||||||
|
currentResult.value = result
|
||||||
|
await refreshRuns()
|
||||||
|
return result
|
||||||
|
} catch (e: unknown) {
|
||||||
|
const msg = e instanceof Error ? e.message : String(e)
|
||||||
|
error.value = `Backtest failed: ${msg}`
|
||||||
|
throw e
|
||||||
|
} finally {
|
||||||
|
loading.value = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load a specific run by ID.
|
||||||
|
*/
|
||||||
|
async function loadRun(runId: string) {
|
||||||
|
loading.value = true
|
||||||
|
error.value = null
|
||||||
|
try {
|
||||||
|
const result = await getBacktest(runId)
|
||||||
|
currentResult.value = result
|
||||||
|
return result
|
||||||
|
} catch (e) {
|
||||||
|
error.value = `Failed to load run: ${e}`
|
||||||
|
throw e
|
||||||
|
} finally {
|
||||||
|
loading.value = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete a run.
|
||||||
|
*/
|
||||||
|
async function removeRun(runId: string) {
|
||||||
|
try {
|
||||||
|
await deleteBacktest(runId)
|
||||||
|
await refreshRuns()
|
||||||
|
if (currentResult.value?.run_id === runId) {
|
||||||
|
currentResult.value = null
|
||||||
|
}
|
||||||
|
selectedRuns.value = selectedRuns.value.filter(id => id !== runId)
|
||||||
|
} catch (e) {
|
||||||
|
error.value = `Failed to delete run: ${e}`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Toggle run selection for comparison.
|
||||||
|
*/
|
||||||
|
function toggleRunSelection(runId: string) {
|
||||||
|
const idx = selectedRuns.value.indexOf(runId)
|
||||||
|
if (idx >= 0) {
|
||||||
|
selectedRuns.value.splice(idx, 1)
|
||||||
|
} else if (selectedRuns.value.length < 5) {
|
||||||
|
selectedRuns.value.push(runId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear all selections.
|
||||||
|
*/
|
||||||
|
function clearSelections() {
|
||||||
|
selectedRuns.value = []
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
// State
|
||||||
|
strategies,
|
||||||
|
symbols,
|
||||||
|
symbolsByMarket,
|
||||||
|
runs,
|
||||||
|
currentResult,
|
||||||
|
selectedRuns,
|
||||||
|
loading,
|
||||||
|
error,
|
||||||
|
// Actions
|
||||||
|
init,
|
||||||
|
refreshRuns,
|
||||||
|
executeBacktest,
|
||||||
|
loadRun,
|
||||||
|
removeRun,
|
||||||
|
toggleRunSelection,
|
||||||
|
clearSelections,
|
||||||
|
}
|
||||||
|
}
|
||||||
8
frontend/src/main.ts
Normal file
8
frontend/src/main.ts
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
import { createApp } from 'vue'
|
||||||
|
import App from './App.vue'
|
||||||
|
import router from './router'
|
||||||
|
import './style.css'
|
||||||
|
|
||||||
|
const app = createApp(App)
|
||||||
|
app.use(router)
|
||||||
|
app.mount('#app')
|
||||||
21
frontend/src/router/index.ts
Normal file
21
frontend/src/router/index.ts
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
import { createRouter, createWebHistory } from 'vue-router'
|
||||||
|
import DashboardView from '@/views/DashboardView.vue'
|
||||||
|
import CompareView from '@/views/CompareView.vue'
|
||||||
|
|
||||||
|
const router = createRouter({
|
||||||
|
history: createWebHistory(import.meta.env.BASE_URL),
|
||||||
|
routes: [
|
||||||
|
{
|
||||||
|
path: '/',
|
||||||
|
name: 'dashboard',
|
||||||
|
component: DashboardView,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: '/compare',
|
||||||
|
name: 'compare',
|
||||||
|
component: CompareView,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
})
|
||||||
|
|
||||||
|
export default router
|
||||||
198
frontend/src/style.css
Normal file
198
frontend/src/style.css
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
@import "tailwindcss";
|
||||||
|
|
||||||
|
/* QuantConnect-inspired dark theme */
|
||||||
|
@theme {
|
||||||
|
/* Background colors */
|
||||||
|
--color-bg-primary: #0d1117;
|
||||||
|
--color-bg-secondary: #161b22;
|
||||||
|
--color-bg-tertiary: #21262d;
|
||||||
|
--color-bg-card: #1c2128;
|
||||||
|
--color-bg-hover: #30363d;
|
||||||
|
|
||||||
|
/* Text colors */
|
||||||
|
--color-text-primary: #e6edf3;
|
||||||
|
--color-text-secondary: #8b949e;
|
||||||
|
--color-text-muted: #6e7681;
|
||||||
|
|
||||||
|
/* Accent colors */
|
||||||
|
--color-accent-blue: #58a6ff;
|
||||||
|
--color-accent-purple: #a371f7;
|
||||||
|
--color-accent-cyan: #39d4e8;
|
||||||
|
|
||||||
|
/* Status colors */
|
||||||
|
--color-profit: #3fb950;
|
||||||
|
--color-loss: #f85149;
|
||||||
|
--color-warning: #d29922;
|
||||||
|
|
||||||
|
/* Border colors */
|
||||||
|
--color-border: #30363d;
|
||||||
|
--color-border-muted: #21262d;
|
||||||
|
|
||||||
|
/* Chart colors for comparison */
|
||||||
|
--color-chart-1: #58a6ff;
|
||||||
|
--color-chart-2: #a371f7;
|
||||||
|
--color-chart-3: #39d4e8;
|
||||||
|
--color-chart-4: #f0883e;
|
||||||
|
--color-chart-5: #db61a2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Base styles */
|
||||||
|
body {
|
||||||
|
background-color: var(--color-bg-primary);
|
||||||
|
color: var(--color-text-primary);
|
||||||
|
font-family: 'JetBrains Mono', 'Fira Code', 'SF Mono', Consolas, monospace;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Scrollbar styling */
|
||||||
|
::-webkit-scrollbar {
|
||||||
|
width: 8px;
|
||||||
|
height: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
::-webkit-scrollbar-track {
|
||||||
|
background: var(--color-bg-secondary);
|
||||||
|
}
|
||||||
|
|
||||||
|
::-webkit-scrollbar-thumb {
|
||||||
|
background: var(--color-bg-hover);
|
||||||
|
border-radius: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
::-webkit-scrollbar-thumb:hover {
|
||||||
|
background: var(--color-border);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Input styling */
|
||||||
|
input[type="number"],
|
||||||
|
input[type="text"],
|
||||||
|
select {
|
||||||
|
background-color: var(--color-bg-tertiary);
|
||||||
|
border: 1px solid var(--color-border);
|
||||||
|
color: var(--color-text-primary);
|
||||||
|
border-radius: 6px;
|
||||||
|
padding: 0.5rem 0.75rem;
|
||||||
|
font-size: 0.875rem;
|
||||||
|
transition: border-color 0.2s, box-shadow 0.2s;
|
||||||
|
}
|
||||||
|
|
||||||
|
input[type="number"]:focus,
|
||||||
|
input[type="text"]:focus,
|
||||||
|
select:focus {
|
||||||
|
outline: none;
|
||||||
|
border-color: var(--color-accent-blue);
|
||||||
|
box-shadow: 0 0 0 3px rgba(88, 166, 255, 0.2);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Button base */
|
||||||
|
.btn {
|
||||||
|
display: inline-flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
gap: 0.5rem;
|
||||||
|
padding: 0.5rem 1rem;
|
||||||
|
font-size: 0.875rem;
|
||||||
|
font-weight: 500;
|
||||||
|
border-radius: 6px;
|
||||||
|
transition: all 0.2s;
|
||||||
|
cursor: pointer;
|
||||||
|
border: 1px solid transparent;
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-primary {
|
||||||
|
background-color: var(--color-accent-blue);
|
||||||
|
color: #000;
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-primary:hover {
|
||||||
|
background-color: #79b8ff;
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-primary:disabled {
|
||||||
|
opacity: 0.5;
|
||||||
|
cursor: not-allowed;
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-secondary {
|
||||||
|
background-color: var(--color-bg-tertiary);
|
||||||
|
border-color: var(--color-border);
|
||||||
|
color: var(--color-text-primary);
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-secondary:hover {
|
||||||
|
background-color: var(--color-bg-hover);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Card styling */
|
||||||
|
.card {
|
||||||
|
background-color: var(--color-bg-card);
|
||||||
|
border: 1px solid var(--color-border-muted);
|
||||||
|
border-radius: 8px;
|
||||||
|
padding: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Profit/Loss coloring */
|
||||||
|
.profit {
|
||||||
|
color: var(--color-profit);
|
||||||
|
}
|
||||||
|
|
||||||
|
.loss {
|
||||||
|
color: var(--color-loss);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Metric value styling */
|
||||||
|
.metric-value {
|
||||||
|
font-size: 1.5rem;
|
||||||
|
font-weight: 600;
|
||||||
|
font-variant-numeric: tabular-nums;
|
||||||
|
}
|
||||||
|
|
||||||
|
.metric-label {
|
||||||
|
font-size: 0.75rem;
|
||||||
|
color: var(--color-text-secondary);
|
||||||
|
text-transform: uppercase;
|
||||||
|
letter-spacing: 0.05em;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Table styling */
|
||||||
|
table {
|
||||||
|
width: 100%;
|
||||||
|
border-collapse: collapse;
|
||||||
|
}
|
||||||
|
|
||||||
|
th {
|
||||||
|
text-align: left;
|
||||||
|
padding: 0.75rem;
|
||||||
|
font-size: 0.75rem;
|
||||||
|
font-weight: 500;
|
||||||
|
color: var(--color-text-secondary);
|
||||||
|
text-transform: uppercase;
|
||||||
|
letter-spacing: 0.05em;
|
||||||
|
border-bottom: 1px solid var(--color-border);
|
||||||
|
}
|
||||||
|
|
||||||
|
td {
|
||||||
|
padding: 0.75rem;
|
||||||
|
font-size: 0.875rem;
|
||||||
|
border-bottom: 1px solid var(--color-border-muted);
|
||||||
|
font-variant-numeric: tabular-nums;
|
||||||
|
}
|
||||||
|
|
||||||
|
tr:hover td {
|
||||||
|
background-color: var(--color-bg-hover);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Loading spinner */
|
||||||
|
.spinner {
|
||||||
|
width: 20px;
|
||||||
|
height: 20px;
|
||||||
|
border: 2px solid var(--color-border);
|
||||||
|
border-top-color: var(--color-accent-blue);
|
||||||
|
border-radius: 50%;
|
||||||
|
animation: spin 0.8s linear infinite;
|
||||||
|
}
|
||||||
|
|
||||||
|
@keyframes spin {
|
||||||
|
to {
|
||||||
|
transform: rotate(360deg);
|
||||||
|
}
|
||||||
|
}
|
||||||
4
frontend/src/types/plotly.d.ts
vendored
Normal file
4
frontend/src/types/plotly.d.ts
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
declare module 'plotly.js-dist-min' {
|
||||||
|
import Plotly from 'plotly.js'
|
||||||
|
export default Plotly
|
||||||
|
}
|
||||||
362
frontend/src/views/CompareView.vue
Normal file
362
frontend/src/views/CompareView.vue
Normal file
@@ -0,0 +1,362 @@
|
|||||||
|
<script setup lang="ts">
|
||||||
|
import { ref, watch, onMounted, onUnmounted } from 'vue'
|
||||||
|
import { useRouter } from 'vue-router'
|
||||||
|
import Plotly from 'plotly.js-dist-min'
|
||||||
|
import { useBacktest } from '@/composables/useBacktest'
|
||||||
|
import { compareRuns } from '@/api/client'
|
||||||
|
import type { BacktestResult, CompareResult } from '@/api/types'
|
||||||
|
|
||||||
|
const router = useRouter()
|
||||||
|
const { selectedRuns, clearSelections } = useBacktest()
|
||||||
|
|
||||||
|
const chartRef = ref<HTMLDivElement | null>(null)
|
||||||
|
const loading = ref(false)
|
||||||
|
const error = ref<string | null>(null)
|
||||||
|
const compareResult = ref<CompareResult | null>(null)
|
||||||
|
|
||||||
|
const CHART_COLORS = [
|
||||||
|
'#58a6ff', // blue
|
||||||
|
'#a371f7', // purple
|
||||||
|
'#39d4e8', // cyan
|
||||||
|
'#f0883e', // orange
|
||||||
|
'#db61a2', // pink
|
||||||
|
]
|
||||||
|
|
||||||
|
async function loadComparison() {
|
||||||
|
if (selectedRuns.value.length < 2) {
|
||||||
|
router.push('/')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
loading.value = true
|
||||||
|
error.value = null
|
||||||
|
|
||||||
|
try {
|
||||||
|
compareResult.value = await compareRuns(selectedRuns.value)
|
||||||
|
renderChart()
|
||||||
|
} catch (e) {
|
||||||
|
error.value = `Failed to load comparison: ${e}`
|
||||||
|
} finally {
|
||||||
|
loading.value = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderChart() {
|
||||||
|
if (!chartRef.value || !compareResult.value) return
|
||||||
|
|
||||||
|
const traces: Plotly.Data[] = compareResult.value.runs.map((run, idx) => {
|
||||||
|
// Normalize equity curves to start at 100 for comparison
|
||||||
|
const startValue = run.equity_curve[0]?.value || 1
|
||||||
|
const normalizedValues = run.equity_curve.map(p => (p.value / startValue) * 100)
|
||||||
|
|
||||||
|
return {
|
||||||
|
x: run.equity_curve.map(p => p.timestamp),
|
||||||
|
y: normalizedValues,
|
||||||
|
type: 'scatter',
|
||||||
|
mode: 'lines',
|
||||||
|
name: `${run.strategy} (${run.params.period || ''})`,
|
||||||
|
line: { color: CHART_COLORS[idx % CHART_COLORS.length], width: 2 },
|
||||||
|
hovertemplate: `%{x}<br>${run.strategy}: %{y:.2f}<extra></extra>`,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
const layout: Partial<Plotly.Layout> = {
|
||||||
|
title: {
|
||||||
|
text: 'Normalized Equity Comparison (Base 100)',
|
||||||
|
font: { color: '#8b949e', size: 14 },
|
||||||
|
},
|
||||||
|
paper_bgcolor: 'transparent',
|
||||||
|
plot_bgcolor: 'transparent',
|
||||||
|
margin: { l: 60, r: 20, t: 50, b: 40 },
|
||||||
|
xaxis: {
|
||||||
|
showgrid: true,
|
||||||
|
gridcolor: '#30363d',
|
||||||
|
tickfont: { color: '#8b949e', size: 10 },
|
||||||
|
linecolor: '#30363d',
|
||||||
|
},
|
||||||
|
yaxis: {
|
||||||
|
showgrid: true,
|
||||||
|
gridcolor: '#30363d',
|
||||||
|
tickfont: { color: '#8b949e', size: 10 },
|
||||||
|
linecolor: '#30363d',
|
||||||
|
title: { text: 'Normalized Value', font: { color: '#8b949e' } },
|
||||||
|
},
|
||||||
|
legend: {
|
||||||
|
orientation: 'h',
|
||||||
|
yanchor: 'bottom',
|
||||||
|
y: 1.02,
|
||||||
|
xanchor: 'left',
|
||||||
|
x: 0,
|
||||||
|
font: { color: '#8b949e' },
|
||||||
|
},
|
||||||
|
hovermode: 'x unified',
|
||||||
|
}
|
||||||
|
|
||||||
|
Plotly.react(chartRef.value, traces, layout, { responsive: true, displayModeBar: false })
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatPercent(val: number): string {
|
||||||
|
return (val >= 0 ? '+' : '') + val.toFixed(2) + '%'
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatNumber(val: number | null): string {
|
||||||
|
if (val === null) return '-'
|
||||||
|
return val.toFixed(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
function getBestIndex(runs: BacktestResult[], metric: keyof BacktestResult['metrics'], higher = true): number {
|
||||||
|
let bestIdx = 0
|
||||||
|
let bestVal = runs[0]?.metrics[metric] ?? 0
|
||||||
|
|
||||||
|
for (let i = 1; i < runs.length; i++) {
|
||||||
|
const val = runs[i]?.metrics[metric] ?? 0
|
||||||
|
const isBetter = higher ? (val as number) > (bestVal as number) : (val as number) < (bestVal as number)
|
||||||
|
if (isBetter) {
|
||||||
|
bestIdx = i
|
||||||
|
bestVal = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return bestIdx
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleClearAndBack() {
|
||||||
|
clearSelections()
|
||||||
|
router.push('/')
|
||||||
|
}
|
||||||
|
|
||||||
|
onMounted(() => {
|
||||||
|
loadComparison()
|
||||||
|
window.addEventListener('resize', renderChart)
|
||||||
|
})
|
||||||
|
|
||||||
|
onUnmounted(() => {
|
||||||
|
window.removeEventListener('resize', renderChart)
|
||||||
|
if (chartRef.value) {
|
||||||
|
Plotly.purge(chartRef.value)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
watch(selectedRuns, () => {
|
||||||
|
if (selectedRuns.value.length >= 2) {
|
||||||
|
loadComparison()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<template>
|
||||||
|
<div class="p-6 space-y-6">
|
||||||
|
<!-- Header -->
|
||||||
|
<div class="flex items-center justify-between">
|
||||||
|
<div>
|
||||||
|
<h1 class="text-2xl font-bold">Compare Runs</h1>
|
||||||
|
<p class="text-text-secondary text-sm mt-1">
|
||||||
|
Comparing {{ selectedRuns.length }} backtest runs
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
<button @click="handleClearAndBack" class="btn btn-secondary">
|
||||||
|
Clear & Back
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Error -->
|
||||||
|
<div v-if="error" class="p-4 rounded-lg bg-loss/10 border border-loss/30 text-loss">
|
||||||
|
{{ error }}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Loading -->
|
||||||
|
<div v-if="loading" class="card flex items-center justify-center h-[400px]">
|
||||||
|
<div class="spinner" style="width: 40px; height: 40px;"></div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Comparison Results -->
|
||||||
|
<template v-else-if="compareResult">
|
||||||
|
<!-- Equity Curve Comparison -->
|
||||||
|
<div class="card">
|
||||||
|
<div ref="chartRef" class="h-[400px]"></div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Metrics Comparison Table -->
|
||||||
|
<div class="card overflow-x-auto">
|
||||||
|
<h3 class="text-sm font-semibold text-text-secondary uppercase tracking-wide mb-4">
|
||||||
|
Metrics Comparison
|
||||||
|
</h3>
|
||||||
|
<table class="min-w-full">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Metric</th>
|
||||||
|
<th
|
||||||
|
v-for="(run, idx) in compareResult.runs"
|
||||||
|
:key="run.run_id"
|
||||||
|
class="text-center"
|
||||||
|
>
|
||||||
|
<span
|
||||||
|
class="inline-block w-3 h-3 rounded-full mr-2"
|
||||||
|
:style="{ backgroundColor: CHART_COLORS[idx] }"
|
||||||
|
></span>
|
||||||
|
{{ run.strategy }}
|
||||||
|
</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<!-- Total Return -->
|
||||||
|
<tr>
|
||||||
|
<td class="font-medium">Strategy Return</td>
|
||||||
|
<td
|
||||||
|
v-for="(run, idx) in compareResult.runs"
|
||||||
|
:key="run.run_id"
|
||||||
|
class="text-center"
|
||||||
|
:class="[
|
||||||
|
run.metrics.total_return >= 0 ? 'profit' : 'loss',
|
||||||
|
idx === getBestIndex(compareResult.runs, 'total_return') ? 'font-bold' : ''
|
||||||
|
]"
|
||||||
|
>
|
||||||
|
{{ formatPercent(run.metrics.total_return) }}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<!-- Benchmark Return -->
|
||||||
|
<tr>
|
||||||
|
<td class="font-medium">Benchmark (B&H)</td>
|
||||||
|
<td
|
||||||
|
v-for="run in compareResult.runs"
|
||||||
|
:key="run.run_id"
|
||||||
|
class="text-center"
|
||||||
|
:class="run.metrics.benchmark_return >= 0 ? 'profit' : 'loss'"
|
||||||
|
>
|
||||||
|
{{ formatPercent(run.metrics.benchmark_return) }}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<!-- Alpha -->
|
||||||
|
<tr>
|
||||||
|
<td class="font-medium">Alpha</td>
|
||||||
|
<td
|
||||||
|
v-for="(run, idx) in compareResult.runs"
|
||||||
|
:key="run.run_id"
|
||||||
|
class="text-center"
|
||||||
|
:class="[
|
||||||
|
run.metrics.alpha >= 0 ? 'profit' : 'loss',
|
||||||
|
idx === getBestIndex(compareResult.runs, 'alpha') ? 'font-bold' : ''
|
||||||
|
]"
|
||||||
|
>
|
||||||
|
{{ formatPercent(run.metrics.alpha) }}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<!-- Sharpe Ratio -->
|
||||||
|
<tr>
|
||||||
|
<td class="font-medium">Sharpe Ratio</td>
|
||||||
|
<td
|
||||||
|
v-for="(run, idx) in compareResult.runs"
|
||||||
|
:key="run.run_id"
|
||||||
|
class="text-center"
|
||||||
|
:class="idx === getBestIndex(compareResult.runs, 'sharpe_ratio') ? 'font-bold text-accent-blue' : ''"
|
||||||
|
>
|
||||||
|
{{ formatNumber(run.metrics.sharpe_ratio) }}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<!-- Max Drawdown -->
|
||||||
|
<tr>
|
||||||
|
<td class="font-medium">Max Drawdown</td>
|
||||||
|
<td
|
||||||
|
v-for="(run, idx) in compareResult.runs"
|
||||||
|
:key="run.run_id"
|
||||||
|
class="text-center loss"
|
||||||
|
:class="idx === getBestIndex(compareResult.runs, 'max_drawdown', false) ? 'font-bold' : ''"
|
||||||
|
>
|
||||||
|
{{ formatPercent(run.metrics.max_drawdown) }}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<!-- Win Rate -->
|
||||||
|
<tr>
|
||||||
|
<td class="font-medium">Win Rate</td>
|
||||||
|
<td
|
||||||
|
v-for="(run, idx) in compareResult.runs"
|
||||||
|
:key="run.run_id"
|
||||||
|
class="text-center"
|
||||||
|
:class="idx === getBestIndex(compareResult.runs, 'win_rate') ? 'font-bold text-profit' : ''"
|
||||||
|
>
|
||||||
|
{{ formatNumber(run.metrics.win_rate) }}%
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<!-- Total Trades -->
|
||||||
|
<tr>
|
||||||
|
<td class="font-medium">Total Trades</td>
|
||||||
|
<td
|
||||||
|
v-for="run in compareResult.runs"
|
||||||
|
:key="run.run_id"
|
||||||
|
class="text-center"
|
||||||
|
>
|
||||||
|
{{ run.metrics.total_trades }}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<!-- Profit Factor -->
|
||||||
|
<tr>
|
||||||
|
<td class="font-medium">Profit Factor</td>
|
||||||
|
<td
|
||||||
|
v-for="(run, idx) in compareResult.runs"
|
||||||
|
:key="run.run_id"
|
||||||
|
class="text-center"
|
||||||
|
:class="idx === getBestIndex(compareResult.runs, 'profit_factor') ? 'font-bold text-profit' : ''"
|
||||||
|
>
|
||||||
|
{{ formatNumber(run.metrics.profit_factor) }}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Parameter Differences -->
|
||||||
|
<div v-if="Object.keys(compareResult.param_diff).length > 0" class="card">
|
||||||
|
<h3 class="text-sm font-semibold text-text-secondary uppercase tracking-wide mb-4">
|
||||||
|
Parameter Differences
|
||||||
|
</h3>
|
||||||
|
<table class="min-w-full">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Parameter</th>
|
||||||
|
<th
|
||||||
|
v-for="(run, idx) in compareResult.runs"
|
||||||
|
:key="run.run_id"
|
||||||
|
class="text-center"
|
||||||
|
>
|
||||||
|
<span
|
||||||
|
class="inline-block w-3 h-3 rounded-full mr-2"
|
||||||
|
:style="{ backgroundColor: CHART_COLORS[idx] }"
|
||||||
|
></span>
|
||||||
|
Run {{ idx + 1 }}
|
||||||
|
</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr v-for="(values, key) in compareResult.param_diff" :key="key">
|
||||||
|
<td class="font-medium font-mono">{{ key }}</td>
|
||||||
|
<td
|
||||||
|
v-for="(val, idx) in values"
|
||||||
|
:key="idx"
|
||||||
|
class="text-center font-mono"
|
||||||
|
>
|
||||||
|
{{ val ?? '-' }}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
|
|
||||||
|
<!-- No Selection -->
|
||||||
|
<div v-else class="card flex items-center justify-center h-[400px]">
|
||||||
|
<div class="text-center text-text-muted">
|
||||||
|
<p>Select at least 2 runs from the history to compare.</p>
|
||||||
|
<button @click="router.push('/')" class="btn btn-secondary mt-4">
|
||||||
|
Go to Dashboard
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
136
frontend/src/views/DashboardView.vue
Normal file
136
frontend/src/views/DashboardView.vue
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
<script setup lang="ts">
|
||||||
|
import { useBacktest } from '@/composables/useBacktest'
|
||||||
|
import BacktestConfig from '@/components/BacktestConfig.vue'
|
||||||
|
import EquityCurve from '@/components/EquityCurve.vue'
|
||||||
|
import MetricsPanel from '@/components/MetricsPanel.vue'
|
||||||
|
import TradeLog from '@/components/TradeLog.vue'
|
||||||
|
|
||||||
|
const { currentResult, loading, error } = useBacktest()
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<template>
|
||||||
|
<div class="p-6 space-y-6">
|
||||||
|
<!-- Header -->
|
||||||
|
<div class="flex items-center justify-between">
|
||||||
|
<div>
|
||||||
|
<h1 class="text-2xl font-bold">Lowkey Backtest</h1>
|
||||||
|
<p class="text-text-secondary text-sm mt-1">
|
||||||
|
Run and analyze trading strategy backtests
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Error Banner -->
|
||||||
|
<div
|
||||||
|
v-if="error"
|
||||||
|
class="p-4 rounded-lg bg-loss/10 border border-loss/30 text-loss"
|
||||||
|
>
|
||||||
|
{{ error }}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Main Grid -->
|
||||||
|
<div class="grid grid-cols-1 lg:grid-cols-3 gap-6">
|
||||||
|
<!-- Config Panel (Left) -->
|
||||||
|
<div class="lg:col-span-1">
|
||||||
|
<BacktestConfig />
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Results (Right) -->
|
||||||
|
<div class="lg:col-span-2 space-y-6">
|
||||||
|
<!-- Loading State -->
|
||||||
|
<div
|
||||||
|
v-if="loading"
|
||||||
|
class="card flex items-center justify-center h-[400px]"
|
||||||
|
>
|
||||||
|
<div class="text-center">
|
||||||
|
<div class="spinner mx-auto mb-4" style="width: 40px; height: 40px;"></div>
|
||||||
|
<p class="text-text-secondary">Running backtest...</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Results Display -->
|
||||||
|
<template v-else-if="currentResult">
|
||||||
|
<!-- Result Header -->
|
||||||
|
<div class="card">
|
||||||
|
<div class="flex items-center justify-between">
|
||||||
|
<div>
|
||||||
|
<h2 class="text-lg font-semibold">
|
||||||
|
{{ currentResult.strategy }} on {{ currentResult.symbol }}
|
||||||
|
</h2>
|
||||||
|
<p class="text-sm text-text-secondary mt-1">
|
||||||
|
{{ currentResult.start_date }} - {{ currentResult.end_date }}
|
||||||
|
<span class="mx-2">|</span>
|
||||||
|
{{ currentResult.market_type.toUpperCase() }}
|
||||||
|
<span v-if="currentResult.leverage > 1" class="ml-2">
|
||||||
|
{{ currentResult.leverage }}x
|
||||||
|
</span>
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
<div class="text-right">
|
||||||
|
<div
|
||||||
|
class="text-2xl font-bold"
|
||||||
|
:class="currentResult.metrics.total_return >= 0 ? 'profit' : 'loss'"
|
||||||
|
>
|
||||||
|
{{ currentResult.metrics.total_return >= 0 ? '+' : '' }}{{ currentResult.metrics.total_return.toFixed(2) }}%
|
||||||
|
</div>
|
||||||
|
<div class="text-sm text-text-secondary">
|
||||||
|
Sharpe: {{ currentResult.metrics.sharpe_ratio.toFixed(2) }}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Equity Curve -->
|
||||||
|
<div class="card">
|
||||||
|
<h3 class="text-sm font-semibold text-text-secondary uppercase tracking-wide mb-4">
|
||||||
|
Equity Curve
|
||||||
|
</h3>
|
||||||
|
<div class="h-[350px]">
|
||||||
|
<EquityCurve :data="currentResult.equity_curve" />
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Metrics -->
|
||||||
|
<MetricsPanel
|
||||||
|
:metrics="currentResult.metrics"
|
||||||
|
:leverage="currentResult.leverage"
|
||||||
|
:market-type="currentResult.market_type"
|
||||||
|
/>
|
||||||
|
|
||||||
|
<!-- Trade Log -->
|
||||||
|
<TradeLog :trades="currentResult.trades" />
|
||||||
|
|
||||||
|
<!-- Parameters Used -->
|
||||||
|
<div class="card">
|
||||||
|
<h3 class="text-sm font-semibold text-text-secondary uppercase tracking-wide mb-3">
|
||||||
|
Parameters
|
||||||
|
</h3>
|
||||||
|
<div class="flex flex-wrap gap-2">
|
||||||
|
<span
|
||||||
|
v-for="(value, key) in currentResult.params"
|
||||||
|
:key="key"
|
||||||
|
class="px-2 py-1 rounded bg-bg-tertiary text-sm font-mono"
|
||||||
|
>
|
||||||
|
{{ key }}: {{ value }}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
|
|
||||||
|
<!-- Empty State -->
|
||||||
|
<div
|
||||||
|
v-else
|
||||||
|
class="card flex items-center justify-center h-[400px]"
|
||||||
|
>
|
||||||
|
<div class="text-center text-text-muted">
|
||||||
|
<svg class="w-16 h-16 mx-auto mb-4 opacity-50" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||||
|
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="1" d="M9 19v-6a2 2 0 00-2-2H5a2 2 0 00-2 2v6a2 2 0 002 2h2a2 2 0 002-2zm0 0V9a2 2 0 012-2h2a2 2 0 012 2v10m-6 0a2 2 0 002 2h2a2 2 0 002-2m0 0V5a2 2 0 012-2h2a2 2 0 012 2v14a2 2 0 01-2 2h-2a2 2 0 01-2-2z" />
|
||||||
|
</svg>
|
||||||
|
<p>Configure and run a backtest to see results.</p>
|
||||||
|
<p class="text-xs mt-2">Or select a run from history.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
20
frontend/tsconfig.app.json
Normal file
20
frontend/tsconfig.app.json
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"extends": "@vue/tsconfig/tsconfig.dom.json",
|
||||||
|
"compilerOptions": {
|
||||||
|
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
|
||||||
|
"types": ["vite/client"],
|
||||||
|
"baseUrl": ".",
|
||||||
|
"paths": {
|
||||||
|
"@/*": ["./src/*"]
|
||||||
|
},
|
||||||
|
|
||||||
|
/* Linting */
|
||||||
|
"strict": true,
|
||||||
|
"noUnusedLocals": true,
|
||||||
|
"noUnusedParameters": true,
|
||||||
|
"erasableSyntaxOnly": true,
|
||||||
|
"noFallthroughCasesInSwitch": true,
|
||||||
|
"noUncheckedSideEffectImports": true
|
||||||
|
},
|
||||||
|
"include": ["src/**/*.ts", "src/**/*.tsx", "src/**/*.vue"]
|
||||||
|
}
|
||||||
7
frontend/tsconfig.json
Normal file
7
frontend/tsconfig.json
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"files": [],
|
||||||
|
"references": [
|
||||||
|
{ "path": "./tsconfig.app.json" },
|
||||||
|
{ "path": "./tsconfig.node.json" }
|
||||||
|
]
|
||||||
|
}
|
||||||
26
frontend/tsconfig.node.json
Normal file
26
frontend/tsconfig.node.json
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
|
||||||
|
"target": "ES2023",
|
||||||
|
"lib": ["ES2023"],
|
||||||
|
"module": "ESNext",
|
||||||
|
"types": ["node"],
|
||||||
|
"skipLibCheck": true,
|
||||||
|
|
||||||
|
/* Bundler mode */
|
||||||
|
"moduleResolution": "bundler",
|
||||||
|
"allowImportingTsExtensions": true,
|
||||||
|
"verbatimModuleSyntax": true,
|
||||||
|
"moduleDetection": "force",
|
||||||
|
"noEmit": true,
|
||||||
|
|
||||||
|
/* Linting */
|
||||||
|
"strict": true,
|
||||||
|
"noUnusedLocals": true,
|
||||||
|
"noUnusedParameters": true,
|
||||||
|
"erasableSyntaxOnly": true,
|
||||||
|
"noFallthroughCasesInSwitch": true,
|
||||||
|
"noUncheckedSideEffectImports": true
|
||||||
|
},
|
||||||
|
"include": ["vite.config.ts"]
|
||||||
|
}
|
||||||
22
frontend/vite.config.ts
Normal file
22
frontend/vite.config.ts
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
import { defineConfig } from 'vite'
|
||||||
|
import vue from '@vitejs/plugin-vue'
|
||||||
|
import tailwindcss from '@tailwindcss/vite'
|
||||||
|
import { fileURLToPath, URL } from 'node:url'
|
||||||
|
|
||||||
|
// https://vite.dev/config/
|
||||||
|
export default defineConfig({
|
||||||
|
plugins: [vue(), tailwindcss()],
|
||||||
|
resolve: {
|
||||||
|
alias: {
|
||||||
|
'@': fileURLToPath(new URL('./src', import.meta.url))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
server: {
|
||||||
|
proxy: {
|
||||||
|
'/api': {
|
||||||
|
target: 'http://127.0.0.1:8000',
|
||||||
|
changeOrigin: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
135
live_trading/README.md
Normal file
135
live_trading/README.md
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
# Live Trading - Regime Reversion Strategy
|
||||||
|
|
||||||
|
This module implements live trading for the ML-based regime detection and mean reversion strategy on OKX perpetual futures.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The strategy trades ETH perpetual futures based on:
|
||||||
|
1. **BTC/ETH Spread Z-Score**: Identifies when ETH is cheap or expensive relative to BTC
|
||||||
|
2. **Random Forest ML Model**: Predicts probability of successful mean reversion
|
||||||
|
3. **Funding Rate Filter**: Avoids trades in overheated/oversold market conditions
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
### 1. API Keys
|
||||||
|
|
||||||
|
The bot loads OKX API credentials from `../BTC_spot_MVRV/.env`.
|
||||||
|
|
||||||
|
**IMPORTANT: OKX uses SEPARATE API keys for live vs demo trading!**
|
||||||
|
|
||||||
|
#### Option A: Demo Trading (Recommended for Testing)
|
||||||
|
1. Go to [OKX Demo Trading](https://www.okx.com/demo-trading)
|
||||||
|
2. Create a demo account if you haven't
|
||||||
|
3. Generate API keys from the demo environment
|
||||||
|
4. Set in `.env`:
|
||||||
|
```env
|
||||||
|
OKX_API_KEY=your_demo_api_key
|
||||||
|
OKX_SECRET=your_demo_secret
|
||||||
|
OKX_PASSWORD=your_demo_passphrase
|
||||||
|
OKX_DEMO_MODE=true
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Option B: Live Trading (Real Funds)
|
||||||
|
Use your existing live API keys with:
|
||||||
|
```env
|
||||||
|
OKX_API_KEY=your_live_api_key
|
||||||
|
OKX_SECRET=your_live_secret
|
||||||
|
OKX_PASSWORD=your_live_passphrase
|
||||||
|
OKX_DEMO_MODE=false
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** You cannot use live API keys with `OKX_DEMO_MODE=true` or vice versa.
|
||||||
|
OKX will return error `50101: APIKey does not match current environment`.
|
||||||
|
|
||||||
|
### 2. Dependencies
|
||||||
|
|
||||||
|
All dependencies are already in the project's `pyproject.toml`. No additional installation needed.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Run with Demo Account (Recommended First)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /path/to/lowkey_backtest
|
||||||
|
uv run python -m live_trading.main
|
||||||
|
```
|
||||||
|
|
||||||
|
### Command Line Options
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Custom position size
|
||||||
|
uv run python -m live_trading.main --max-position 500
|
||||||
|
|
||||||
|
# Custom leverage
|
||||||
|
uv run python -m live_trading.main --leverage 2
|
||||||
|
|
||||||
|
# Custom cycle interval (in seconds)
|
||||||
|
uv run python -m live_trading.main --interval 1800
|
||||||
|
|
||||||
|
# Combine options
|
||||||
|
uv run python -m live_trading.main --max-position 1000 --leverage 3 --interval 3600
|
||||||
|
```
|
||||||
|
|
||||||
|
### Live Trading (Use with Caution)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Requires OKX_DEMO_MODE=false in .env
|
||||||
|
uv run python -m live_trading.main --live
|
||||||
|
```
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
live_trading/
|
||||||
|
__init__.py # Module initialization
|
||||||
|
config.py # Configuration loading
|
||||||
|
okx_client.py # OKX API wrapper
|
||||||
|
data_feed.py # Real-time OHLCV data
|
||||||
|
position_manager.py # Position tracking
|
||||||
|
live_regime_strategy.py # Strategy logic
|
||||||
|
main.py # Entry point
|
||||||
|
.env.example # Environment template
|
||||||
|
README.md # This file
|
||||||
|
```
|
||||||
|
|
||||||
|
## Strategy Parameters
|
||||||
|
|
||||||
|
| Parameter | Default | Description |
|
||||||
|
|-----------|---------|-------------|
|
||||||
|
| `z_entry_threshold` | 1.0 | Enter when \|Z-Score\| > threshold |
|
||||||
|
| `z_window` | 24 | Rolling window for Z-Score (hours) |
|
||||||
|
| `model_prob_threshold` | 0.5 | ML probability threshold for entry |
|
||||||
|
| `funding_threshold` | 0.005 | Funding rate filter threshold |
|
||||||
|
| `stop_loss_pct` | 6% | Stop-loss percentage |
|
||||||
|
| `take_profit_pct` | 5% | Take-profit percentage |
|
||||||
|
|
||||||
|
## Files Generated
|
||||||
|
|
||||||
|
- `live_trading/positions.json` - Open positions persistence
|
||||||
|
- `live_trading/trade_log.csv` - Trade history
|
||||||
|
- `live_trading/regime_model.pkl` - Trained ML model
|
||||||
|
- `logs/live_trading.log` - Trading logs
|
||||||
|
|
||||||
|
## Risk Warning
|
||||||
|
|
||||||
|
This is experimental trading software. Use at your own risk:
|
||||||
|
- Always start with demo trading
|
||||||
|
- Never risk more than you can afford to lose
|
||||||
|
- Monitor the bot regularly
|
||||||
|
- Have a kill switch ready (Ctrl+C)
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### API Key Issues
|
||||||
|
- Ensure API keys have trading permissions
|
||||||
|
- For demo trading, use demo-specific API keys
|
||||||
|
- Check that passphrase matches exactly
|
||||||
|
|
||||||
|
### No Signals Generated
|
||||||
|
- The strategy requires the ML model to be trained
|
||||||
|
- Need at least 200 candles of data
|
||||||
|
- Model trains automatically on first run
|
||||||
|
|
||||||
|
### Position Sync Issues
|
||||||
|
- The bot syncs with exchange positions on each cycle
|
||||||
|
- If positions are closed manually, the bot will detect this
|
||||||
6
live_trading/__init__.py
Normal file
6
live_trading/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
"""
|
||||||
|
Live Trading Module for Regime Reversion Strategy on OKX.
|
||||||
|
|
||||||
|
This module implements live trading using the ML-based regime detection
|
||||||
|
and mean reversion strategy on OKX perpetual futures.
|
||||||
|
"""
|
||||||
114
live_trading/config.py
Normal file
114
live_trading/config.py
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
"""
|
||||||
|
Configuration for Live Trading.
|
||||||
|
|
||||||
|
Loads OKX API credentials from environment variables.
|
||||||
|
Uses demo/sandbox mode by default for paper trading.
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class OKXConfig:
|
||||||
|
"""OKX API configuration."""
|
||||||
|
api_key: str = field(default_factory=lambda: "")
|
||||||
|
secret: str = field(default_factory=lambda: "")
|
||||||
|
password: str = field(default_factory=lambda: "")
|
||||||
|
demo_mode: bool = field(default_factory=lambda: True)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Load credentials based on demo mode setting."""
|
||||||
|
# Check demo mode first
|
||||||
|
self.demo_mode = os.getenv("OKX_DEMO_MODE", "true").lower() in ("true", "1", "yes")
|
||||||
|
|
||||||
|
if self.demo_mode:
|
||||||
|
# Load demo-specific credentials if available
|
||||||
|
self.api_key = os.getenv("OKX_DEMO_API_KEY", os.getenv("OKX_API_KEY", ""))
|
||||||
|
self.secret = os.getenv("OKX_DEMO_SECRET", os.getenv("OKX_SECRET", ""))
|
||||||
|
self.password = os.getenv("OKX_DEMO_PASSWORD", os.getenv("OKX_PASSWORD", ""))
|
||||||
|
else:
|
||||||
|
# Load live credentials
|
||||||
|
self.api_key = os.getenv("OKX_API_KEY", "")
|
||||||
|
self.secret = os.getenv("OKX_SECRET", "")
|
||||||
|
self.password = os.getenv("OKX_PASSWORD", "")
|
||||||
|
|
||||||
|
def validate(self) -> None:
|
||||||
|
"""Validate that required credentials are present."""
|
||||||
|
mode = "demo" if self.demo_mode else "live"
|
||||||
|
if not self.api_key:
|
||||||
|
raise ValueError(f"OKX API key not set for {mode} mode")
|
||||||
|
if not self.secret:
|
||||||
|
raise ValueError(f"OKX secret not set for {mode} mode")
|
||||||
|
if not self.password:
|
||||||
|
raise ValueError(f"OKX password not set for {mode} mode")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TradingConfig:
|
||||||
|
"""Trading parameters configuration."""
|
||||||
|
# Trading pairs
|
||||||
|
eth_symbol: str = "ETH/USDT:USDT" # ETH perpetual (primary trading asset)
|
||||||
|
btc_symbol: str = "BTC/USDT:USDT" # BTC perpetual (context asset)
|
||||||
|
|
||||||
|
# Timeframe
|
||||||
|
timeframe: str = "1h"
|
||||||
|
candles_to_fetch: int = 500 # Enough for feature calculation
|
||||||
|
|
||||||
|
# Position sizing
|
||||||
|
max_position_usdt: float = -1.0 # Max position size in USDT. If <= 0, use all available funds
|
||||||
|
min_position_usdt: float = 10.0 # Min position size in USDT
|
||||||
|
leverage: int = 1 # Leverage (1x = no leverage)
|
||||||
|
margin_mode: str = "cross" # "cross" or "isolated"
|
||||||
|
|
||||||
|
# Risk management
|
||||||
|
stop_loss_pct: float = 0.06 # 6% stop loss
|
||||||
|
take_profit_pct: float = 0.05 # 5% take profit
|
||||||
|
max_concurrent_positions: int = 1 # Max open positions
|
||||||
|
|
||||||
|
# Strategy parameters (from regime_strategy.py)
|
||||||
|
z_entry_threshold: float = 1.0 # Enter when |Z| > 1.0
|
||||||
|
z_window: int = 24 # 24h rolling Z-score window
|
||||||
|
model_prob_threshold: float = 0.5 # ML model probability threshold
|
||||||
|
funding_threshold: float = 0.005 # Funding rate filter threshold
|
||||||
|
|
||||||
|
# Execution
|
||||||
|
sleep_seconds: int = 3600 # Run every hour (1h candles)
|
||||||
|
slippage_pct: float = 0.001 # 0.1% slippage buffer
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PathConfig:
|
||||||
|
"""File paths configuration."""
|
||||||
|
base_dir: Path = field(
|
||||||
|
default_factory=lambda: Path(__file__).parent.parent
|
||||||
|
)
|
||||||
|
data_dir: Path = field(default=None)
|
||||||
|
logs_dir: Path = field(default=None)
|
||||||
|
model_path: Path = field(default=None)
|
||||||
|
positions_file: Path = field(default=None)
|
||||||
|
trade_log_file: Path = field(default=None)
|
||||||
|
cq_data_path: Path = field(default=None)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
self.data_dir = self.base_dir / "data"
|
||||||
|
self.logs_dir = self.base_dir / "logs"
|
||||||
|
self.model_path = self.base_dir / "live_trading" / "regime_model.pkl"
|
||||||
|
self.positions_file = self.base_dir / "live_trading" / "positions.json"
|
||||||
|
self.trade_log_file = self.base_dir / "live_trading" / "trade_log.csv"
|
||||||
|
self.cq_data_path = self.data_dir / "cq_training_data.csv"
|
||||||
|
|
||||||
|
# Ensure directories exist
|
||||||
|
self.data_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
self.logs_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
def get_config():
|
||||||
|
"""Get all configuration objects."""
|
||||||
|
okx = OKXConfig()
|
||||||
|
trading = TradingConfig()
|
||||||
|
paths = PathConfig()
|
||||||
|
return okx, trading, paths
|
||||||
216
live_trading/data_feed.py
Normal file
216
live_trading/data_feed.py
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
"""
|
||||||
|
Data Feed for Live Trading.
|
||||||
|
|
||||||
|
Fetches real-time OHLCV data from OKX and prepares features
|
||||||
|
for the regime strategy.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import ta
|
||||||
|
|
||||||
|
from .okx_client import OKXClient
|
||||||
|
from .config import TradingConfig, PathConfig
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DataFeed:
|
||||||
|
"""
|
||||||
|
Real-time data feed for the regime strategy.
|
||||||
|
|
||||||
|
Fetches BTC and ETH OHLCV data from OKX and calculates
|
||||||
|
the spread-based features required by the ML model.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
okx_client: OKXClient,
|
||||||
|
trading_config: TradingConfig,
|
||||||
|
path_config: PathConfig
|
||||||
|
):
|
||||||
|
self.client = okx_client
|
||||||
|
self.config = trading_config
|
||||||
|
self.paths = path_config
|
||||||
|
self.cq_data: Optional[pd.DataFrame] = None
|
||||||
|
self._load_cq_data()
|
||||||
|
|
||||||
|
def _load_cq_data(self) -> None:
|
||||||
|
"""Load CryptoQuant on-chain data if available."""
|
||||||
|
try:
|
||||||
|
if self.paths.cq_data_path.exists():
|
||||||
|
self.cq_data = pd.read_csv(
|
||||||
|
self.paths.cq_data_path,
|
||||||
|
index_col='timestamp',
|
||||||
|
parse_dates=True
|
||||||
|
)
|
||||||
|
if self.cq_data.index.tz is None:
|
||||||
|
self.cq_data.index = self.cq_data.index.tz_localize('UTC')
|
||||||
|
logger.info(f"Loaded CryptoQuant data: {len(self.cq_data)} rows")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not load CryptoQuant data: {e}")
|
||||||
|
self.cq_data = None
|
||||||
|
|
||||||
|
def fetch_ohlcv_data(self) -> tuple[pd.DataFrame, pd.DataFrame]:
|
||||||
|
"""
|
||||||
|
Fetch OHLCV data for BTC and ETH.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (btc_df, eth_df) DataFrames
|
||||||
|
"""
|
||||||
|
# Fetch BTC data
|
||||||
|
btc_ohlcv = self.client.fetch_ohlcv(
|
||||||
|
self.config.btc_symbol,
|
||||||
|
self.config.timeframe,
|
||||||
|
self.config.candles_to_fetch
|
||||||
|
)
|
||||||
|
btc_df = self._ohlcv_to_dataframe(btc_ohlcv)
|
||||||
|
|
||||||
|
# Fetch ETH data
|
||||||
|
eth_ohlcv = self.client.fetch_ohlcv(
|
||||||
|
self.config.eth_symbol,
|
||||||
|
self.config.timeframe,
|
||||||
|
self.config.candles_to_fetch
|
||||||
|
)
|
||||||
|
eth_df = self._ohlcv_to_dataframe(eth_ohlcv)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Fetched {len(btc_df)} BTC candles and {len(eth_df)} ETH candles"
|
||||||
|
)
|
||||||
|
|
||||||
|
return btc_df, eth_df
|
||||||
|
|
||||||
|
def _ohlcv_to_dataframe(self, ohlcv: list) -> pd.DataFrame:
|
||||||
|
"""Convert OHLCV list to DataFrame."""
|
||||||
|
df = pd.DataFrame(
|
||||||
|
ohlcv,
|
||||||
|
columns=['timestamp', 'open', 'high', 'low', 'close', 'volume']
|
||||||
|
)
|
||||||
|
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms', utc=True)
|
||||||
|
df.set_index('timestamp', inplace=True)
|
||||||
|
return df
|
||||||
|
|
||||||
|
def calculate_features(
|
||||||
|
self,
|
||||||
|
btc_df: pd.DataFrame,
|
||||||
|
eth_df: pd.DataFrame
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Calculate spread-based features for the regime strategy.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
btc_df: BTC OHLCV DataFrame
|
||||||
|
eth_df: ETH OHLCV DataFrame
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DataFrame with calculated features
|
||||||
|
"""
|
||||||
|
# Align indices
|
||||||
|
common_idx = btc_df.index.intersection(eth_df.index)
|
||||||
|
df_btc = btc_df.loc[common_idx].copy()
|
||||||
|
df_eth = eth_df.loc[common_idx].copy()
|
||||||
|
|
||||||
|
# Calculate spread (ETH/BTC ratio)
|
||||||
|
spread = df_eth['close'] / df_btc['close']
|
||||||
|
|
||||||
|
# Z-Score of spread
|
||||||
|
z_window = self.config.z_window
|
||||||
|
rolling_mean = spread.rolling(window=z_window).mean()
|
||||||
|
rolling_std = spread.rolling(window=z_window).std()
|
||||||
|
z_score = (spread - rolling_mean) / rolling_std
|
||||||
|
|
||||||
|
# Spread technicals
|
||||||
|
spread_rsi = ta.momentum.RSIIndicator(spread, window=14).rsi()
|
||||||
|
spread_roc = spread.pct_change(periods=5) * 100
|
||||||
|
spread_change_1h = spread.pct_change(periods=1)
|
||||||
|
|
||||||
|
# Volume ratio
|
||||||
|
vol_ratio = df_eth['volume'] / df_btc['volume']
|
||||||
|
vol_ratio_ma = vol_ratio.rolling(window=12).mean()
|
||||||
|
|
||||||
|
# Volatility
|
||||||
|
ret_btc = df_btc['close'].pct_change()
|
||||||
|
ret_eth = df_eth['close'].pct_change()
|
||||||
|
vol_btc = ret_btc.rolling(window=z_window).std()
|
||||||
|
vol_eth = ret_eth.rolling(window=z_window).std()
|
||||||
|
vol_spread_ratio = vol_eth / vol_btc
|
||||||
|
|
||||||
|
# Build features DataFrame
|
||||||
|
features = pd.DataFrame(index=spread.index)
|
||||||
|
features['spread'] = spread
|
||||||
|
features['z_score'] = z_score
|
||||||
|
features['spread_rsi'] = spread_rsi
|
||||||
|
features['spread_roc'] = spread_roc
|
||||||
|
features['spread_change_1h'] = spread_change_1h
|
||||||
|
features['vol_ratio'] = vol_ratio
|
||||||
|
features['vol_ratio_rel'] = vol_ratio / vol_ratio_ma
|
||||||
|
features['vol_diff_ratio'] = vol_spread_ratio
|
||||||
|
|
||||||
|
# Add price data for reference
|
||||||
|
features['btc_close'] = df_btc['close']
|
||||||
|
features['eth_close'] = df_eth['close']
|
||||||
|
features['eth_volume'] = df_eth['volume']
|
||||||
|
|
||||||
|
# Merge CryptoQuant data if available
|
||||||
|
if self.cq_data is not None:
|
||||||
|
cq_aligned = self.cq_data.reindex(features.index, method='ffill')
|
||||||
|
|
||||||
|
# Calculate derived features
|
||||||
|
if 'btc_funding' in cq_aligned.columns and 'eth_funding' in cq_aligned.columns:
|
||||||
|
cq_aligned['funding_diff'] = (
|
||||||
|
cq_aligned['eth_funding'] - cq_aligned['btc_funding']
|
||||||
|
)
|
||||||
|
if 'btc_inflow' in cq_aligned.columns and 'eth_inflow' in cq_aligned.columns:
|
||||||
|
cq_aligned['inflow_ratio'] = (
|
||||||
|
cq_aligned['eth_inflow'] / (cq_aligned['btc_inflow'] + 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
features = features.join(cq_aligned)
|
||||||
|
|
||||||
|
return features.dropna()
|
||||||
|
|
||||||
|
def get_latest_data(self) -> Optional[pd.DataFrame]:
|
||||||
|
"""
|
||||||
|
Fetch and process latest market data.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DataFrame with features or None on error
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
btc_df, eth_df = self.fetch_ohlcv_data()
|
||||||
|
features = self.calculate_features(btc_df, eth_df)
|
||||||
|
|
||||||
|
if features.empty:
|
||||||
|
logger.warning("No valid features calculated")
|
||||||
|
return None
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Latest data: ETH={features['eth_close'].iloc[-1]:.2f}, "
|
||||||
|
f"BTC={features['btc_close'].iloc[-1]:.2f}, "
|
||||||
|
f"Z-Score={features['z_score'].iloc[-1]:.3f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return features
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error fetching market data: {e}", exc_info=True)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_current_funding_rates(self) -> dict:
|
||||||
|
"""
|
||||||
|
Get current funding rates for BTC and ETH.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with 'btc_funding' and 'eth_funding' rates
|
||||||
|
"""
|
||||||
|
btc_funding = self.client.get_funding_rate(self.config.btc_symbol)
|
||||||
|
eth_funding = self.client.get_funding_rate(self.config.eth_symbol)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'btc_funding': btc_funding,
|
||||||
|
'eth_funding': eth_funding,
|
||||||
|
'funding_diff': eth_funding - btc_funding,
|
||||||
|
}
|
||||||
15
live_trading/env.template
Normal file
15
live_trading/env.template
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# OKX API Credentials Template
|
||||||
|
# Copy this file to .env and fill in your credentials
|
||||||
|
# For demo trading, use your OKX demo account API keys
|
||||||
|
# Generate keys at: https://www.okx.com/account/my-api (Demo Trading section)
|
||||||
|
|
||||||
|
OKX_API_KEY=your_api_key_here
|
||||||
|
OKX_SECRET=your_secret_key_here
|
||||||
|
OKX_PASSWORD=your_passphrase_here
|
||||||
|
|
||||||
|
# Demo Mode: Set to "true" for paper trading (sandbox)
|
||||||
|
# Set to "false" for live trading with real funds
|
||||||
|
OKX_DEMO_MODE=true
|
||||||
|
|
||||||
|
# CryptoQuant API (optional, for on-chain features)
|
||||||
|
CRYPTOQUANT_API_KEY=your_cryptoquant_api_key_here
|
||||||
287
live_trading/live_regime_strategy.py
Normal file
287
live_trading/live_regime_strategy.py
Normal file
@@ -0,0 +1,287 @@
|
|||||||
|
"""
|
||||||
|
Live Regime Reversion Strategy.
|
||||||
|
|
||||||
|
Adapts the backtest regime strategy for live trading.
|
||||||
|
Uses a pre-trained ML model or trains on historical data.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
import pickle
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from sklearn.ensemble import RandomForestClassifier
|
||||||
|
|
||||||
|
from .config import TradingConfig, PathConfig
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LiveRegimeStrategy:
|
||||||
|
"""
|
||||||
|
Live trading implementation of the ML-based regime detection
|
||||||
|
and mean reversion strategy.
|
||||||
|
|
||||||
|
Logic:
|
||||||
|
1. Calculates BTC/ETH spread Z-Score
|
||||||
|
2. Uses Random Forest to predict reversion probability
|
||||||
|
3. Applies funding rate filter
|
||||||
|
4. Generates long/short signals on ETH perpetual
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
trading_config: TradingConfig,
|
||||||
|
path_config: PathConfig
|
||||||
|
):
|
||||||
|
self.config = trading_config
|
||||||
|
self.paths = path_config
|
||||||
|
self.model: Optional[RandomForestClassifier] = None
|
||||||
|
self.feature_cols: Optional[list] = None
|
||||||
|
self._load_or_train_model()
|
||||||
|
|
||||||
|
def _load_or_train_model(self) -> None:
|
||||||
|
"""Load pre-trained model or train a new one."""
|
||||||
|
if self.paths.model_path.exists():
|
||||||
|
try:
|
||||||
|
with open(self.paths.model_path, 'rb') as f:
|
||||||
|
saved = pickle.load(f)
|
||||||
|
self.model = saved['model']
|
||||||
|
self.feature_cols = saved['feature_cols']
|
||||||
|
logger.info(f"Loaded model from {self.paths.model_path}")
|
||||||
|
return
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not load model: {e}")
|
||||||
|
|
||||||
|
logger.info("No pre-trained model found. Will train on first data batch.")
|
||||||
|
|
||||||
|
def save_model(self) -> None:
|
||||||
|
"""Save trained model to file."""
|
||||||
|
if self.model is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(self.paths.model_path, 'wb') as f:
|
||||||
|
pickle.dump({
|
||||||
|
'model': self.model,
|
||||||
|
'feature_cols': self.feature_cols,
|
||||||
|
}, f)
|
||||||
|
logger.info(f"Saved model to {self.paths.model_path}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Could not save model: {e}")
|
||||||
|
|
||||||
|
def train_model(self, features: pd.DataFrame) -> None:
|
||||||
|
"""
|
||||||
|
Train the Random Forest model on historical data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
features: DataFrame with calculated features
|
||||||
|
"""
|
||||||
|
logger.info(f"Training model on {len(features)} samples...")
|
||||||
|
|
||||||
|
z_thresh = self.config.z_entry_threshold
|
||||||
|
horizon = 102 # Optimal horizon from research
|
||||||
|
profit_target = 0.005 # 0.5% profit threshold
|
||||||
|
|
||||||
|
# Define targets
|
||||||
|
future_min = features['spread'].rolling(window=horizon).min().shift(-horizon)
|
||||||
|
future_max = features['spread'].rolling(window=horizon).max().shift(-horizon)
|
||||||
|
|
||||||
|
target_short = features['spread'] * (1 - profit_target)
|
||||||
|
target_long = features['spread'] * (1 + profit_target)
|
||||||
|
|
||||||
|
success_short = (features['z_score'] > z_thresh) & (future_min < target_short)
|
||||||
|
success_long = (features['z_score'] < -z_thresh) & (future_max > target_long)
|
||||||
|
|
||||||
|
targets = np.select([success_short, success_long], [1, 1], default=0)
|
||||||
|
|
||||||
|
# Exclude non-feature columns
|
||||||
|
exclude = ['spread', 'btc_close', 'eth_close', 'eth_volume']
|
||||||
|
self.feature_cols = [c for c in features.columns if c not in exclude]
|
||||||
|
|
||||||
|
# Clean features
|
||||||
|
X = features[self.feature_cols].fillna(0)
|
||||||
|
X = X.replace([np.inf, -np.inf], 0)
|
||||||
|
|
||||||
|
# Remove rows with invalid targets
|
||||||
|
valid_mask = ~np.isnan(targets) & future_min.notna().values & future_max.notna().values
|
||||||
|
X_clean = X[valid_mask]
|
||||||
|
y_clean = targets[valid_mask]
|
||||||
|
|
||||||
|
if len(X_clean) < 100:
|
||||||
|
logger.warning("Not enough data to train model")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Train model
|
||||||
|
self.model = RandomForestClassifier(
|
||||||
|
n_estimators=300,
|
||||||
|
max_depth=5,
|
||||||
|
min_samples_leaf=30,
|
||||||
|
class_weight={0: 1, 1: 3},
|
||||||
|
random_state=42
|
||||||
|
)
|
||||||
|
self.model.fit(X_clean, y_clean)
|
||||||
|
|
||||||
|
logger.info(f"Model trained on {len(X_clean)} samples")
|
||||||
|
self.save_model()
|
||||||
|
|
||||||
|
def generate_signal(
|
||||||
|
self,
|
||||||
|
features: pd.DataFrame,
|
||||||
|
current_funding: dict
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Generate trading signal from latest features.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
features: DataFrame with calculated features
|
||||||
|
current_funding: Dictionary with funding rate data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Signal dictionary with action, side, confidence, etc.
|
||||||
|
"""
|
||||||
|
if self.model is None:
|
||||||
|
# Train model if not available
|
||||||
|
if len(features) >= 200:
|
||||||
|
self.train_model(features)
|
||||||
|
else:
|
||||||
|
return {'action': 'hold', 'reason': 'model_not_trained'}
|
||||||
|
|
||||||
|
if self.model is None:
|
||||||
|
return {'action': 'hold', 'reason': 'insufficient_data_for_training'}
|
||||||
|
|
||||||
|
# Get latest row
|
||||||
|
latest = features.iloc[-1]
|
||||||
|
z_score = latest['z_score']
|
||||||
|
eth_price = latest['eth_close']
|
||||||
|
btc_price = latest['btc_close']
|
||||||
|
|
||||||
|
# Prepare features for prediction
|
||||||
|
X = features[self.feature_cols].iloc[[-1]].fillna(0)
|
||||||
|
X = X.replace([np.inf, -np.inf], 0)
|
||||||
|
|
||||||
|
# Get prediction probability
|
||||||
|
prob = self.model.predict_proba(X)[0, 1]
|
||||||
|
|
||||||
|
# Apply thresholds
|
||||||
|
z_thresh = self.config.z_entry_threshold
|
||||||
|
prob_thresh = self.config.model_prob_threshold
|
||||||
|
|
||||||
|
# Determine signal direction
|
||||||
|
signal = {
|
||||||
|
'action': 'hold',
|
||||||
|
'side': None,
|
||||||
|
'probability': prob,
|
||||||
|
'z_score': z_score,
|
||||||
|
'eth_price': eth_price,
|
||||||
|
'btc_price': btc_price,
|
||||||
|
'reason': '',
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check for entry conditions
|
||||||
|
if prob > prob_thresh:
|
||||||
|
if z_score > z_thresh:
|
||||||
|
# Spread high (ETH expensive relative to BTC) -> Short ETH
|
||||||
|
signal['action'] = 'entry'
|
||||||
|
signal['side'] = 'short'
|
||||||
|
signal['reason'] = f'z_score={z_score:.2f}>threshold, prob={prob:.2f}'
|
||||||
|
elif z_score < -z_thresh:
|
||||||
|
# Spread low (ETH cheap relative to BTC) -> Long ETH
|
||||||
|
signal['action'] = 'entry'
|
||||||
|
signal['side'] = 'long'
|
||||||
|
signal['reason'] = f'z_score={z_score:.2f}<-threshold, prob={prob:.2f}'
|
||||||
|
else:
|
||||||
|
signal['reason'] = f'z_score={z_score:.2f} within threshold'
|
||||||
|
else:
|
||||||
|
signal['reason'] = f'prob={prob:.2f}<threshold'
|
||||||
|
|
||||||
|
# Apply funding rate filter
|
||||||
|
if signal['action'] == 'entry':
|
||||||
|
btc_funding = current_funding.get('btc_funding', 0)
|
||||||
|
funding_thresh = self.config.funding_threshold
|
||||||
|
|
||||||
|
if signal['side'] == 'long' and btc_funding > funding_thresh:
|
||||||
|
# High positive funding = overheated, don't go long
|
||||||
|
signal['action'] = 'hold'
|
||||||
|
signal['reason'] = f'funding_filter_blocked_long (funding={btc_funding:.4f})'
|
||||||
|
elif signal['side'] == 'short' and btc_funding < -funding_thresh:
|
||||||
|
# High negative funding = oversold, don't go short
|
||||||
|
signal['action'] = 'hold'
|
||||||
|
signal['reason'] = f'funding_filter_blocked_short (funding={btc_funding:.4f})'
|
||||||
|
|
||||||
|
# Check for exit conditions (mean reversion complete)
|
||||||
|
if signal['action'] == 'hold':
|
||||||
|
# Z-score crossed back through 0
|
||||||
|
if abs(z_score) < 0.3:
|
||||||
|
signal['action'] = 'check_exit'
|
||||||
|
signal['reason'] = f'z_score_reverted_to_mean ({z_score:.2f})'
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Signal: {signal['action']} {signal['side'] or ''} "
|
||||||
|
f"(prob={prob:.2f}, z={z_score:.2f}, reason={signal['reason']})"
|
||||||
|
)
|
||||||
|
|
||||||
|
return signal
|
||||||
|
|
||||||
|
def calculate_position_size(
|
||||||
|
self,
|
||||||
|
signal: dict,
|
||||||
|
available_usdt: float
|
||||||
|
) -> float:
|
||||||
|
"""
|
||||||
|
Calculate position size based on signal confidence.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
signal: Signal dictionary with probability
|
||||||
|
available_usdt: Available USDT balance
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Position size in USDT
|
||||||
|
"""
|
||||||
|
prob = signal.get('probability', 0.5)
|
||||||
|
|
||||||
|
# Base size: if max_position_usdt <= 0, use all available funds
|
||||||
|
if self.config.max_position_usdt <= 0:
|
||||||
|
base_size = available_usdt
|
||||||
|
else:
|
||||||
|
base_size = min(available_usdt, self.config.max_position_usdt)
|
||||||
|
|
||||||
|
# Scale by probability (1.0x at 0.5 prob, up to 1.6x at 0.8 prob)
|
||||||
|
scale = 1.0 + (prob - 0.5) * 2.0
|
||||||
|
scale = max(1.0, min(scale, 2.0)) # Clamp between 1x and 2x
|
||||||
|
|
||||||
|
size = base_size * scale
|
||||||
|
|
||||||
|
# Ensure minimum position size
|
||||||
|
if size < self.config.min_position_usdt:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
return min(size, available_usdt * 0.95) # Leave 5% buffer
|
||||||
|
|
||||||
|
def calculate_sl_tp(
|
||||||
|
self,
|
||||||
|
entry_price: float,
|
||||||
|
side: str
|
||||||
|
) -> tuple[float, float]:
|
||||||
|
"""
|
||||||
|
Calculate stop-loss and take-profit prices.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
entry_price: Entry price
|
||||||
|
side: "long" or "short"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (stop_loss_price, take_profit_price)
|
||||||
|
"""
|
||||||
|
sl_pct = self.config.stop_loss_pct
|
||||||
|
tp_pct = self.config.take_profit_pct
|
||||||
|
|
||||||
|
if side == "long":
|
||||||
|
stop_loss = entry_price * (1 - sl_pct)
|
||||||
|
take_profit = entry_price * (1 + tp_pct)
|
||||||
|
else: # short
|
||||||
|
stop_loss = entry_price * (1 + sl_pct)
|
||||||
|
take_profit = entry_price * (1 - tp_pct)
|
||||||
|
|
||||||
|
return stop_loss, take_profit
|
||||||
390
live_trading/main.py
Normal file
390
live_trading/main.py
Normal file
@@ -0,0 +1,390 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Live Trading Bot for Regime Reversion Strategy on OKX.
|
||||||
|
|
||||||
|
This script runs the regime-based mean reversion strategy
|
||||||
|
on ETH perpetual futures using OKX exchange.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
# Run with demo account (default)
|
||||||
|
uv run python -m live_trading.main
|
||||||
|
|
||||||
|
# Run with specific settings
|
||||||
|
uv run python -m live_trading.main --max-position 500 --leverage 2
|
||||||
|
"""
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent directory to path for imports
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||||
|
|
||||||
|
from live_trading.config import get_config, OKXConfig, TradingConfig, PathConfig
|
||||||
|
from live_trading.okx_client import OKXClient
|
||||||
|
from live_trading.data_feed import DataFeed
|
||||||
|
from live_trading.position_manager import PositionManager
|
||||||
|
from live_trading.live_regime_strategy import LiveRegimeStrategy
|
||||||
|
|
||||||
|
|
||||||
|
def setup_logging(log_dir: Path) -> logging.Logger:
|
||||||
|
"""Configure logging for the trading bot."""
|
||||||
|
log_file = log_dir / "live_trading.log"
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
|
||||||
|
handlers=[
|
||||||
|
logging.FileHandler(log_file),
|
||||||
|
logging.StreamHandler(sys.stdout),
|
||||||
|
],
|
||||||
|
force=True
|
||||||
|
)
|
||||||
|
|
||||||
|
return logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LiveTradingBot:
|
||||||
|
"""
|
||||||
|
Main trading bot orchestrator.
|
||||||
|
|
||||||
|
Coordinates data fetching, signal generation, and order execution
|
||||||
|
in a continuous loop.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
okx_config: OKXConfig,
|
||||||
|
trading_config: TradingConfig,
|
||||||
|
path_config: PathConfig
|
||||||
|
):
|
||||||
|
self.okx_config = okx_config
|
||||||
|
self.trading_config = trading_config
|
||||||
|
self.path_config = path_config
|
||||||
|
|
||||||
|
self.logger = logging.getLogger(__name__)
|
||||||
|
self.running = True
|
||||||
|
|
||||||
|
# Initialize components
|
||||||
|
self.logger.info("Initializing trading bot components...")
|
||||||
|
|
||||||
|
self.okx_client = OKXClient(okx_config, trading_config)
|
||||||
|
self.data_feed = DataFeed(self.okx_client, trading_config, path_config)
|
||||||
|
self.position_manager = PositionManager(
|
||||||
|
self.okx_client, trading_config, path_config
|
||||||
|
)
|
||||||
|
self.strategy = LiveRegimeStrategy(trading_config, path_config)
|
||||||
|
|
||||||
|
# Register signal handlers for graceful shutdown
|
||||||
|
signal.signal(signal.SIGINT, self._handle_shutdown)
|
||||||
|
signal.signal(signal.SIGTERM, self._handle_shutdown)
|
||||||
|
|
||||||
|
self._print_startup_banner()
|
||||||
|
|
||||||
|
def _print_startup_banner(self) -> None:
|
||||||
|
"""Print startup information."""
|
||||||
|
mode = "DEMO/SANDBOX" if self.okx_config.demo_mode else "LIVE"
|
||||||
|
|
||||||
|
print("=" * 60)
|
||||||
|
print(f" Regime Reversion Strategy - Live Trading Bot")
|
||||||
|
print("=" * 60)
|
||||||
|
print(f" Mode: {mode}")
|
||||||
|
print(f" Trading Pair: {self.trading_config.eth_symbol}")
|
||||||
|
print(f" Context Pair: {self.trading_config.btc_symbol}")
|
||||||
|
print(f" Timeframe: {self.trading_config.timeframe}")
|
||||||
|
print(f" Max Position: ${self.trading_config.max_position_usdt if self.trading_config.max_position_usdt > 0 else 'All available'}")
|
||||||
|
print(f" Leverage: {self.trading_config.leverage}x")
|
||||||
|
print(f" Stop Loss: {self.trading_config.stop_loss_pct * 100:.1f}%")
|
||||||
|
print(f" Take Profit: {self.trading_config.take_profit_pct * 100:.1f}%")
|
||||||
|
print(f" Cycle Interval: {self.trading_config.sleep_seconds // 60} minutes")
|
||||||
|
print("=" * 60)
|
||||||
|
|
||||||
|
if not self.okx_config.demo_mode:
|
||||||
|
print("\n *** WARNING: LIVE TRADING MODE - REAL FUNDS AT RISK ***\n")
|
||||||
|
|
||||||
|
def _handle_shutdown(self, signum, frame) -> None:
|
||||||
|
"""Handle shutdown signals gracefully."""
|
||||||
|
self.logger.info("Shutdown signal received, stopping...")
|
||||||
|
self.running = False
|
||||||
|
|
||||||
|
def run_trading_cycle(self) -> None:
|
||||||
|
"""
|
||||||
|
Execute one trading cycle.
|
||||||
|
|
||||||
|
1. Fetch latest market data
|
||||||
|
2. Update open positions
|
||||||
|
3. Generate trading signal
|
||||||
|
4. Execute trades if signal triggers
|
||||||
|
"""
|
||||||
|
cycle_start = datetime.now(timezone.utc)
|
||||||
|
self.logger.info(f"--- Trading Cycle Start: {cycle_start.isoformat()} ---")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# 1. Fetch market data
|
||||||
|
features = self.data_feed.get_latest_data()
|
||||||
|
if features is None or features.empty:
|
||||||
|
self.logger.warning("No market data available, skipping cycle")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Get current prices
|
||||||
|
eth_price = features['eth_close'].iloc[-1]
|
||||||
|
btc_price = features['btc_close'].iloc[-1]
|
||||||
|
|
||||||
|
current_prices = {
|
||||||
|
self.trading_config.eth_symbol: eth_price,
|
||||||
|
self.trading_config.btc_symbol: btc_price,
|
||||||
|
}
|
||||||
|
|
||||||
|
# 2. Update existing positions (check SL/TP)
|
||||||
|
closed_trades = self.position_manager.update_positions(current_prices)
|
||||||
|
if closed_trades:
|
||||||
|
for trade in closed_trades:
|
||||||
|
self.logger.info(
|
||||||
|
f"Trade closed: {trade['trade_id']} "
|
||||||
|
f"PnL=${trade['pnl_usd']:.2f} ({trade['reason']})"
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. Sync with exchange positions
|
||||||
|
self.position_manager.sync_with_exchange()
|
||||||
|
|
||||||
|
# 4. Get current funding rates
|
||||||
|
funding = self.data_feed.get_current_funding_rates()
|
||||||
|
|
||||||
|
# 5. Generate trading signal
|
||||||
|
signal = self.strategy.generate_signal(features, funding)
|
||||||
|
|
||||||
|
# 6. Execute trades based on signal
|
||||||
|
if signal['action'] == 'entry':
|
||||||
|
self._execute_entry(signal, eth_price)
|
||||||
|
elif signal['action'] == 'check_exit':
|
||||||
|
self._execute_exit(signal)
|
||||||
|
|
||||||
|
# 7. Log portfolio summary
|
||||||
|
summary = self.position_manager.get_portfolio_summary()
|
||||||
|
self.logger.info(
|
||||||
|
f"Portfolio: {summary['open_positions']} positions, "
|
||||||
|
f"exposure=${summary['total_exposure_usdt']:.2f}, "
|
||||||
|
f"unrealized_pnl=${summary['total_unrealized_pnl']:.2f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Trading cycle error: {e}", exc_info=True)
|
||||||
|
# Save positions on error
|
||||||
|
self.position_manager.save_positions()
|
||||||
|
|
||||||
|
cycle_duration = (datetime.now(timezone.utc) - cycle_start).total_seconds()
|
||||||
|
self.logger.info(f"--- Cycle completed in {cycle_duration:.1f}s ---")
|
||||||
|
|
||||||
|
def _execute_entry(self, signal: dict, current_price: float) -> None:
|
||||||
|
"""Execute entry trade."""
|
||||||
|
symbol = self.trading_config.eth_symbol
|
||||||
|
side = signal['side']
|
||||||
|
|
||||||
|
# Check if we can open a position
|
||||||
|
if not self.position_manager.can_open_position():
|
||||||
|
self.logger.info("Cannot open position: max positions reached")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Get account balance
|
||||||
|
balance = self.okx_client.get_balance()
|
||||||
|
available_usdt = balance['free']
|
||||||
|
|
||||||
|
# Calculate position size
|
||||||
|
size_usdt = self.strategy.calculate_position_size(signal, available_usdt)
|
||||||
|
if size_usdt <= 0:
|
||||||
|
self.logger.info("Position size too small, skipping entry")
|
||||||
|
return
|
||||||
|
|
||||||
|
size_eth = size_usdt / current_price
|
||||||
|
|
||||||
|
# Calculate SL/TP
|
||||||
|
stop_loss, take_profit = self.strategy.calculate_sl_tp(current_price, side)
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
f"Executing {side.upper()} entry: {size_eth:.4f} ETH @ {current_price:.2f} "
|
||||||
|
f"(${size_usdt:.2f}), SL={stop_loss:.2f}, TP={take_profit:.2f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Place market order
|
||||||
|
order_side = "buy" if side == "long" else "sell"
|
||||||
|
order = self.okx_client.place_market_order(symbol, order_side, size_eth)
|
||||||
|
|
||||||
|
# Get filled price (handle None values from OKX response)
|
||||||
|
filled_price = order.get('average') or order.get('price') or current_price
|
||||||
|
filled_amount = order.get('filled') or order.get('amount') or size_eth
|
||||||
|
|
||||||
|
# Ensure we have valid numeric values
|
||||||
|
if filled_price is None or filled_price == 0:
|
||||||
|
self.logger.warning(f"No fill price in order response, using current price: {current_price}")
|
||||||
|
filled_price = current_price
|
||||||
|
if filled_amount is None or filled_amount == 0:
|
||||||
|
self.logger.warning(f"No fill amount in order response, using requested: {size_eth}")
|
||||||
|
filled_amount = size_eth
|
||||||
|
|
||||||
|
# Recalculate SL/TP with filled price
|
||||||
|
stop_loss, take_profit = self.strategy.calculate_sl_tp(filled_price, side)
|
||||||
|
|
||||||
|
# Get order ID from response
|
||||||
|
order_id = order.get('id', '')
|
||||||
|
|
||||||
|
# Record position locally
|
||||||
|
position = self.position_manager.open_position(
|
||||||
|
symbol=symbol,
|
||||||
|
side=side,
|
||||||
|
entry_price=filled_price,
|
||||||
|
size=filled_amount,
|
||||||
|
stop_loss_price=stop_loss,
|
||||||
|
take_profit_price=take_profit,
|
||||||
|
order_id=order_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
if position:
|
||||||
|
self.logger.info(
|
||||||
|
f"Position opened: {position.trade_id}, "
|
||||||
|
f"{filled_amount:.4f} ETH @ {filled_price:.2f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Try to set SL/TP on exchange
|
||||||
|
try:
|
||||||
|
self.okx_client.set_stop_loss_take_profit(
|
||||||
|
symbol, side, filled_amount, stop_loss, take_profit
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.warning(f"Could not set SL/TP on exchange: {e}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Order execution failed: {e}", exc_info=True)
|
||||||
|
|
||||||
|
def _execute_exit(self, signal: dict) -> None:
|
||||||
|
"""Execute exit based on mean reversion signal."""
|
||||||
|
symbol = self.trading_config.eth_symbol
|
||||||
|
|
||||||
|
# Get position for ETH
|
||||||
|
position = self.position_manager.get_position_for_symbol(symbol)
|
||||||
|
if not position:
|
||||||
|
return
|
||||||
|
|
||||||
|
current_price = signal.get('eth_price', position.current_price)
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
f"Mean reversion exit signal: closing {position.trade_id} "
|
||||||
|
f"@ {current_price:.2f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Close position on exchange
|
||||||
|
exit_order = self.okx_client.close_position(symbol)
|
||||||
|
exit_order_id = exit_order.get('id', '') if exit_order else ''
|
||||||
|
|
||||||
|
# Record closure locally
|
||||||
|
self.position_manager.close_position(
|
||||||
|
position.trade_id,
|
||||||
|
current_price,
|
||||||
|
reason="mean_reversion_complete",
|
||||||
|
exit_order_id=exit_order_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Exit execution failed: {e}", exc_info=True)
|
||||||
|
|
||||||
|
def run(self) -> None:
|
||||||
|
"""Main trading loop."""
|
||||||
|
self.logger.info("Starting trading loop...")
|
||||||
|
|
||||||
|
while self.running:
|
||||||
|
try:
|
||||||
|
self.run_trading_cycle()
|
||||||
|
|
||||||
|
if self.running:
|
||||||
|
sleep_seconds = self.trading_config.sleep_seconds
|
||||||
|
minutes = sleep_seconds // 60
|
||||||
|
self.logger.info(f"Sleeping for {minutes} minutes...")
|
||||||
|
|
||||||
|
# Sleep in smaller chunks to allow faster shutdown
|
||||||
|
for _ in range(sleep_seconds):
|
||||||
|
if not self.running:
|
||||||
|
break
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
self.logger.info("Keyboard interrupt received")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Unexpected error in main loop: {e}", exc_info=True)
|
||||||
|
time.sleep(60) # Wait before retry
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
self.logger.info("Shutting down...")
|
||||||
|
self.position_manager.save_positions()
|
||||||
|
self.logger.info("Shutdown complete")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
"""Parse command line arguments."""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Live Trading Bot for Regime Reversion Strategy"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-position",
|
||||||
|
type=float,
|
||||||
|
default=None,
|
||||||
|
help="Maximum position size in USDT"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--leverage",
|
||||||
|
type=int,
|
||||||
|
default=None,
|
||||||
|
help="Trading leverage (1-125)"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--interval",
|
||||||
|
type=int,
|
||||||
|
default=None,
|
||||||
|
help="Trading cycle interval in seconds"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--live",
|
||||||
|
action="store_true",
|
||||||
|
help="Use live trading mode (requires OKX_DEMO_MODE=false)"
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main entry point."""
|
||||||
|
args = parse_args()
|
||||||
|
|
||||||
|
# Load configuration
|
||||||
|
okx_config, trading_config, path_config = get_config()
|
||||||
|
|
||||||
|
# Apply command line overrides
|
||||||
|
if args.max_position is not None:
|
||||||
|
trading_config.max_position_usdt = args.max_position
|
||||||
|
if args.leverage is not None:
|
||||||
|
trading_config.leverage = args.leverage
|
||||||
|
if args.interval is not None:
|
||||||
|
trading_config.sleep_seconds = args.interval
|
||||||
|
if args.live:
|
||||||
|
okx_config.demo_mode = False
|
||||||
|
|
||||||
|
# Setup logging
|
||||||
|
logger = setup_logging(path_config.logs_dir)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Create and run bot
|
||||||
|
bot = LiveTradingBot(okx_config, trading_config, path_config)
|
||||||
|
bot.run()
|
||||||
|
except ValueError as e:
|
||||||
|
logger.error(f"Configuration error: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Fatal error: {e}", exc_info=True)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
145
live_trading/multi_pair/README.md
Normal file
145
live_trading/multi_pair/README.md
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
# Multi-Pair Divergence Live Trading
|
||||||
|
|
||||||
|
This module implements live trading for the Multi-Pair Divergence Selection Strategy on OKX perpetual futures.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The strategy scans 10 cryptocurrency pairs for spread divergence opportunities:
|
||||||
|
|
||||||
|
1. **Pair Universe**: Top 10 assets by market cap (BTC, ETH, SOL, XRP, BNB, DOGE, ADA, AVAX, LINK, DOT)
|
||||||
|
2. **Spread Z-Score**: Identifies when pairs are divergent from their historical mean
|
||||||
|
3. **Universal ML Model**: Predicts probability of successful mean reversion
|
||||||
|
4. **Dynamic Selection**: Trades the pair with highest divergence score
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Before running live trading, you must train the model via backtesting:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv run python scripts/run_multi_pair_backtest.py
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates `data/multi_pair_model.pkl` which the live trading bot requires.
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
### 1. API Keys
|
||||||
|
|
||||||
|
Same as single-pair trading. Set in `.env`:
|
||||||
|
|
||||||
|
```env
|
||||||
|
OKX_API_KEY=your_api_key
|
||||||
|
OKX_SECRET=your_secret
|
||||||
|
OKX_PASSWORD=your_passphrase
|
||||||
|
OKX_DEMO_MODE=true # Use demo for testing
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Dependencies
|
||||||
|
|
||||||
|
All dependencies are in `pyproject.toml`. No additional installation needed.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Run with Demo Account (Recommended First)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv run python -m live_trading.multi_pair.main
|
||||||
|
```
|
||||||
|
|
||||||
|
### Command Line Options
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Custom position size
|
||||||
|
uv run python -m live_trading.multi_pair.main --max-position 500
|
||||||
|
|
||||||
|
# Custom leverage
|
||||||
|
uv run python -m live_trading.multi_pair.main --leverage 2
|
||||||
|
|
||||||
|
# Custom cycle interval (in seconds)
|
||||||
|
uv run python -m live_trading.multi_pair.main --interval 1800
|
||||||
|
|
||||||
|
# Combine options
|
||||||
|
uv run python -m live_trading.multi_pair.main --max-position 1000 --leverage 3 --interval 3600
|
||||||
|
```
|
||||||
|
|
||||||
|
### Live Trading (Use with Caution)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv run python -m live_trading.multi_pair.main --live
|
||||||
|
```
|
||||||
|
|
||||||
|
## How It Works
|
||||||
|
|
||||||
|
### Each Trading Cycle
|
||||||
|
|
||||||
|
1. **Fetch Data**: Gets OHLCV for all 10 assets from OKX
|
||||||
|
2. **Calculate Features**: Computes Z-Score, RSI, volatility for all 45 pair combinations
|
||||||
|
3. **Score Pairs**: Uses ML model to rank pairs by divergence score (|Z| x probability)
|
||||||
|
4. **Check Exits**: If holding, check mean reversion or SL/TP
|
||||||
|
5. **Enter Best**: If no position, enter the highest-scoring divergent pair
|
||||||
|
|
||||||
|
### Entry Conditions
|
||||||
|
|
||||||
|
- |Z-Score| > 1.0 (spread diverged from mean)
|
||||||
|
- ML probability > 0.5 (model predicts successful reversion)
|
||||||
|
- Funding rate filter passes (avoid crowded trades)
|
||||||
|
|
||||||
|
### Exit Conditions
|
||||||
|
|
||||||
|
- Mean reversion: |Z-Score| returns to ~0
|
||||||
|
- Stop-loss: ATR-based (default ~6%)
|
||||||
|
- Take-profit: ATR-based (default ~5%)
|
||||||
|
|
||||||
|
## Strategy Parameters
|
||||||
|
|
||||||
|
| Parameter | Default | Description |
|
||||||
|
|-----------|---------|-------------|
|
||||||
|
| `z_entry_threshold` | 1.0 | Enter when \|Z-Score\| > threshold |
|
||||||
|
| `z_exit_threshold` | 0.0 | Exit when Z reverts to mean |
|
||||||
|
| `z_window` | 24 | Rolling window for Z-Score (hours) |
|
||||||
|
| `prob_threshold` | 0.5 | ML probability threshold for entry |
|
||||||
|
| `funding_threshold` | 0.0005 | Funding rate filter (0.05%) |
|
||||||
|
| `sl_atr_multiplier` | 10.0 | Stop-loss as ATR multiple |
|
||||||
|
| `tp_atr_multiplier` | 8.0 | Take-profit as ATR multiple |
|
||||||
|
|
||||||
|
## Files
|
||||||
|
|
||||||
|
### Input
|
||||||
|
|
||||||
|
- `data/multi_pair_model.pkl` - Pre-trained ML model (required)
|
||||||
|
|
||||||
|
### Output
|
||||||
|
|
||||||
|
- `logs/multi_pair_live.log` - Trading logs
|
||||||
|
- `live_trading/multi_pair_positions.json` - Position persistence
|
||||||
|
- `live_trading/multi_pair_trade_log.csv` - Trade history
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
live_trading/multi_pair/
|
||||||
|
__init__.py # Module exports
|
||||||
|
config.py # Configuration classes
|
||||||
|
data_feed.py # Multi-asset OHLCV fetcher
|
||||||
|
strategy.py # ML scoring and signal generation
|
||||||
|
main.py # Bot orchestrator
|
||||||
|
README.md # This file
|
||||||
|
```
|
||||||
|
|
||||||
|
## Differences from Single-Pair
|
||||||
|
|
||||||
|
| Aspect | Single-Pair | Multi-Pair |
|
||||||
|
|--------|-------------|------------|
|
||||||
|
| Assets | ETH only (BTC context) | 10 assets, 45 pairs |
|
||||||
|
| Model | ETH-specific | Universal across pairs |
|
||||||
|
| Selection | Fixed pair | Dynamic best pair |
|
||||||
|
| Stops | Fixed 6%/5% | ATR-based dynamic |
|
||||||
|
|
||||||
|
## Risk Warning
|
||||||
|
|
||||||
|
This is experimental trading software. Use at your own risk:
|
||||||
|
|
||||||
|
- Always start with demo trading
|
||||||
|
- Never risk more than you can afford to lose
|
||||||
|
- Monitor the bot regularly
|
||||||
|
- The model was trained on historical data and may not predict future performance
|
||||||
11
live_trading/multi_pair/__init__.py
Normal file
11
live_trading/multi_pair/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
"""Multi-Pair Divergence Live Trading Module."""
|
||||||
|
from .config import MultiPairLiveConfig, get_multi_pair_config
|
||||||
|
from .data_feed import MultiPairDataFeed
|
||||||
|
from .strategy import LiveMultiPairStrategy
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"MultiPairLiveConfig",
|
||||||
|
"get_multi_pair_config",
|
||||||
|
"MultiPairDataFeed",
|
||||||
|
"LiveMultiPairStrategy",
|
||||||
|
]
|
||||||
145
live_trading/multi_pair/config.py
Normal file
145
live_trading/multi_pair/config.py
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
"""
|
||||||
|
Configuration for Multi-Pair Live Trading.
|
||||||
|
|
||||||
|
Extends the base live trading config with multi-pair specific settings.
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class OKXConfig:
|
||||||
|
"""OKX API configuration."""
|
||||||
|
api_key: str = field(default_factory=lambda: "")
|
||||||
|
secret: str = field(default_factory=lambda: "")
|
||||||
|
password: str = field(default_factory=lambda: "")
|
||||||
|
demo_mode: bool = field(default_factory=lambda: True)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Load credentials based on demo mode setting."""
|
||||||
|
self.demo_mode = os.getenv("OKX_DEMO_MODE", "true").lower() in ("true", "1", "yes")
|
||||||
|
|
||||||
|
if self.demo_mode:
|
||||||
|
self.api_key = os.getenv("OKX_DEMO_API_KEY", os.getenv("OKX_API_KEY", ""))
|
||||||
|
self.secret = os.getenv("OKX_DEMO_SECRET", os.getenv("OKX_SECRET", ""))
|
||||||
|
self.password = os.getenv("OKX_DEMO_PASSWORD", os.getenv("OKX_PASSWORD", ""))
|
||||||
|
else:
|
||||||
|
self.api_key = os.getenv("OKX_API_KEY", "")
|
||||||
|
self.secret = os.getenv("OKX_SECRET", "")
|
||||||
|
self.password = os.getenv("OKX_PASSWORD", "")
|
||||||
|
|
||||||
|
def validate(self) -> None:
|
||||||
|
"""Validate that required credentials are present."""
|
||||||
|
mode = "demo" if self.demo_mode else "live"
|
||||||
|
if not self.api_key:
|
||||||
|
raise ValueError(f"OKX API key not set for {mode} mode")
|
||||||
|
if not self.secret:
|
||||||
|
raise ValueError(f"OKX secret not set for {mode} mode")
|
||||||
|
if not self.password:
|
||||||
|
raise ValueError(f"OKX password not set for {mode} mode")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MultiPairLiveConfig:
|
||||||
|
"""
|
||||||
|
Configuration for multi-pair live trading.
|
||||||
|
|
||||||
|
Combines trading parameters, strategy settings, and risk management.
|
||||||
|
"""
|
||||||
|
# Asset Universe (top 10 by market cap perpetuals)
|
||||||
|
assets: list[str] = field(default_factory=lambda: [
|
||||||
|
"BTC/USDT:USDT", "ETH/USDT:USDT", "SOL/USDT:USDT", "XRP/USDT:USDT",
|
||||||
|
"BNB/USDT:USDT", "DOGE/USDT:USDT", "ADA/USDT:USDT", "AVAX/USDT:USDT",
|
||||||
|
"LINK/USDT:USDT", "DOT/USDT:USDT"
|
||||||
|
])
|
||||||
|
|
||||||
|
# Timeframe
|
||||||
|
timeframe: str = "1h"
|
||||||
|
candles_to_fetch: int = 500 # Enough for feature calculation
|
||||||
|
|
||||||
|
# Z-Score Thresholds
|
||||||
|
z_window: int = 24
|
||||||
|
z_entry_threshold: float = 1.0
|
||||||
|
z_exit_threshold: float = 0.0 # Exit at mean reversion
|
||||||
|
|
||||||
|
# ML Thresholds
|
||||||
|
prob_threshold: float = 0.5
|
||||||
|
|
||||||
|
# Position sizing
|
||||||
|
max_position_usdt: float = -1.0 # If <= 0, use all available funds
|
||||||
|
min_position_usdt: float = 10.0
|
||||||
|
leverage: int = 1
|
||||||
|
margin_mode: str = "cross"
|
||||||
|
max_concurrent_positions: int = 1 # Trade one pair at a time
|
||||||
|
|
||||||
|
# Risk Management - ATR-Based Stops
|
||||||
|
atr_period: int = 14
|
||||||
|
sl_atr_multiplier: float = 10.0
|
||||||
|
tp_atr_multiplier: float = 8.0
|
||||||
|
|
||||||
|
# Fallback fixed percentages
|
||||||
|
base_sl_pct: float = 0.06
|
||||||
|
base_tp_pct: float = 0.05
|
||||||
|
|
||||||
|
# ATR bounds
|
||||||
|
min_sl_pct: float = 0.02
|
||||||
|
max_sl_pct: float = 0.10
|
||||||
|
min_tp_pct: float = 0.02
|
||||||
|
max_tp_pct: float = 0.15
|
||||||
|
|
||||||
|
# Funding Rate Filter
|
||||||
|
funding_threshold: float = 0.0005 # 0.05%
|
||||||
|
|
||||||
|
# Trade Management
|
||||||
|
min_hold_bars: int = 0
|
||||||
|
cooldown_bars: int = 0
|
||||||
|
|
||||||
|
# Execution
|
||||||
|
sleep_seconds: int = 3600 # Run every hour
|
||||||
|
slippage_pct: float = 0.001
|
||||||
|
|
||||||
|
def get_asset_short_name(self, symbol: str) -> str:
|
||||||
|
"""Convert symbol to short name (e.g., BTC/USDT:USDT -> btc)."""
|
||||||
|
return symbol.split("/")[0].lower()
|
||||||
|
|
||||||
|
def get_pair_count(self) -> int:
|
||||||
|
"""Calculate number of unique pairs from asset list."""
|
||||||
|
n = len(self.assets)
|
||||||
|
return n * (n - 1) // 2
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PathConfig:
|
||||||
|
"""File paths configuration."""
|
||||||
|
base_dir: Path = field(
|
||||||
|
default_factory=lambda: Path(__file__).parent.parent.parent
|
||||||
|
)
|
||||||
|
data_dir: Path = field(default=None)
|
||||||
|
logs_dir: Path = field(default=None)
|
||||||
|
model_path: Path = field(default=None)
|
||||||
|
positions_file: Path = field(default=None)
|
||||||
|
trade_log_file: Path = field(default=None)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
self.data_dir = self.base_dir / "data"
|
||||||
|
self.logs_dir = self.base_dir / "logs"
|
||||||
|
# Use the same model as backtesting
|
||||||
|
self.model_path = self.base_dir / "data" / "multi_pair_model.pkl"
|
||||||
|
self.positions_file = self.base_dir / "live_trading" / "multi_pair_positions.json"
|
||||||
|
self.trade_log_file = self.base_dir / "live_trading" / "multi_pair_trade_log.csv"
|
||||||
|
|
||||||
|
# Ensure directories exist
|
||||||
|
self.data_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
self.logs_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
def get_multi_pair_config() -> tuple[OKXConfig, MultiPairLiveConfig, PathConfig]:
|
||||||
|
"""Get all configuration objects for multi-pair trading."""
|
||||||
|
okx = OKXConfig()
|
||||||
|
trading = MultiPairLiveConfig()
|
||||||
|
paths = PathConfig()
|
||||||
|
return okx, trading, paths
|
||||||
336
live_trading/multi_pair/data_feed.py
Normal file
336
live_trading/multi_pair/data_feed.py
Normal file
@@ -0,0 +1,336 @@
|
|||||||
|
"""
|
||||||
|
Multi-Pair Data Feed for Live Trading.
|
||||||
|
|
||||||
|
Fetches real-time OHLCV and funding data for all assets in the universe.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
from itertools import combinations
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import ta
|
||||||
|
|
||||||
|
from live_trading.okx_client import OKXClient
|
||||||
|
from .config import MultiPairLiveConfig, PathConfig
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class TradingPair:
|
||||||
|
"""
|
||||||
|
Represents a tradeable pair for spread analysis.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
base_asset: First asset symbol (e.g., ETH/USDT:USDT)
|
||||||
|
quote_asset: Second asset symbol (e.g., BTC/USDT:USDT)
|
||||||
|
pair_id: Unique identifier
|
||||||
|
"""
|
||||||
|
def __init__(self, base_asset: str, quote_asset: str):
|
||||||
|
self.base_asset = base_asset
|
||||||
|
self.quote_asset = quote_asset
|
||||||
|
self.pair_id = f"{base_asset}__{quote_asset}"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self) -> str:
|
||||||
|
"""Human-readable pair name."""
|
||||||
|
base = self.base_asset.split("/")[0]
|
||||||
|
quote = self.quote_asset.split("/")[0]
|
||||||
|
return f"{base}/{quote}"
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash(self.pair_id)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not isinstance(other, TradingPair):
|
||||||
|
return False
|
||||||
|
return self.pair_id == other.pair_id
|
||||||
|
|
||||||
|
|
||||||
|
class MultiPairDataFeed:
|
||||||
|
"""
|
||||||
|
Real-time data feed for multi-pair strategy.
|
||||||
|
|
||||||
|
Fetches OHLCV data for all assets and calculates spread features
|
||||||
|
for all pair combinations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
okx_client: OKXClient,
|
||||||
|
config: MultiPairLiveConfig,
|
||||||
|
path_config: PathConfig
|
||||||
|
):
|
||||||
|
self.client = okx_client
|
||||||
|
self.config = config
|
||||||
|
self.paths = path_config
|
||||||
|
|
||||||
|
# Cache for asset data
|
||||||
|
self._asset_data: dict[str, pd.DataFrame] = {}
|
||||||
|
self._funding_rates: dict[str, float] = {}
|
||||||
|
self._pairs: list[TradingPair] = []
|
||||||
|
|
||||||
|
# Generate pairs
|
||||||
|
self._generate_pairs()
|
||||||
|
|
||||||
|
def _generate_pairs(self) -> None:
|
||||||
|
"""Generate all unique pairs from asset universe."""
|
||||||
|
self._pairs = []
|
||||||
|
for base, quote in combinations(self.config.assets, 2):
|
||||||
|
pair = TradingPair(base_asset=base, quote_asset=quote)
|
||||||
|
self._pairs.append(pair)
|
||||||
|
|
||||||
|
logger.info("Generated %d pairs from %d assets",
|
||||||
|
len(self._pairs), len(self.config.assets))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def pairs(self) -> list[TradingPair]:
|
||||||
|
"""Get list of trading pairs."""
|
||||||
|
return self._pairs
|
||||||
|
|
||||||
|
def fetch_all_ohlcv(self) -> dict[str, pd.DataFrame]:
|
||||||
|
"""
|
||||||
|
Fetch OHLCV data for all assets.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping symbol to OHLCV DataFrame
|
||||||
|
"""
|
||||||
|
self._asset_data = {}
|
||||||
|
|
||||||
|
for symbol in self.config.assets:
|
||||||
|
try:
|
||||||
|
ohlcv = self.client.fetch_ohlcv(
|
||||||
|
symbol,
|
||||||
|
self.config.timeframe,
|
||||||
|
self.config.candles_to_fetch
|
||||||
|
)
|
||||||
|
df = self._ohlcv_to_dataframe(ohlcv)
|
||||||
|
|
||||||
|
if len(df) >= 200:
|
||||||
|
self._asset_data[symbol] = df
|
||||||
|
logger.debug("Fetched %s: %d candles", symbol, len(df))
|
||||||
|
else:
|
||||||
|
logger.warning("Skipping %s: insufficient data (%d)",
|
||||||
|
symbol, len(df))
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Error fetching %s: %s", symbol, e)
|
||||||
|
|
||||||
|
logger.info("Fetched data for %d/%d assets",
|
||||||
|
len(self._asset_data), len(self.config.assets))
|
||||||
|
return self._asset_data
|
||||||
|
|
||||||
|
def _ohlcv_to_dataframe(self, ohlcv: list) -> pd.DataFrame:
|
||||||
|
"""Convert OHLCV list to DataFrame."""
|
||||||
|
df = pd.DataFrame(
|
||||||
|
ohlcv,
|
||||||
|
columns=['timestamp', 'open', 'high', 'low', 'close', 'volume']
|
||||||
|
)
|
||||||
|
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms', utc=True)
|
||||||
|
df.set_index('timestamp', inplace=True)
|
||||||
|
return df
|
||||||
|
|
||||||
|
def fetch_all_funding_rates(self) -> dict[str, float]:
|
||||||
|
"""
|
||||||
|
Fetch current funding rates for all assets.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping symbol to funding rate
|
||||||
|
"""
|
||||||
|
self._funding_rates = {}
|
||||||
|
|
||||||
|
for symbol in self.config.assets:
|
||||||
|
try:
|
||||||
|
rate = self.client.get_funding_rate(symbol)
|
||||||
|
self._funding_rates[symbol] = rate
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Could not get funding for %s: %s", symbol, e)
|
||||||
|
self._funding_rates[symbol] = 0.0
|
||||||
|
|
||||||
|
return self._funding_rates
|
||||||
|
|
||||||
|
def calculate_pair_features(
|
||||||
|
self,
|
||||||
|
pair: TradingPair
|
||||||
|
) -> pd.DataFrame | None:
|
||||||
|
"""
|
||||||
|
Calculate features for a single pair.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pair: Trading pair
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DataFrame with features, or None if insufficient data
|
||||||
|
"""
|
||||||
|
base = pair.base_asset
|
||||||
|
quote = pair.quote_asset
|
||||||
|
|
||||||
|
if base not in self._asset_data or quote not in self._asset_data:
|
||||||
|
return None
|
||||||
|
|
||||||
|
df_base = self._asset_data[base]
|
||||||
|
df_quote = self._asset_data[quote]
|
||||||
|
|
||||||
|
# Align indices
|
||||||
|
common_idx = df_base.index.intersection(df_quote.index)
|
||||||
|
if len(common_idx) < 200:
|
||||||
|
return None
|
||||||
|
|
||||||
|
df_a = df_base.loc[common_idx]
|
||||||
|
df_b = df_quote.loc[common_idx]
|
||||||
|
|
||||||
|
# Calculate spread (base / quote)
|
||||||
|
spread = df_a['close'] / df_b['close']
|
||||||
|
|
||||||
|
# Z-Score
|
||||||
|
z_window = self.config.z_window
|
||||||
|
rolling_mean = spread.rolling(window=z_window).mean()
|
||||||
|
rolling_std = spread.rolling(window=z_window).std()
|
||||||
|
z_score = (spread - rolling_mean) / rolling_std
|
||||||
|
|
||||||
|
# Spread Technicals
|
||||||
|
spread_rsi = ta.momentum.RSIIndicator(spread, window=14).rsi()
|
||||||
|
spread_roc = spread.pct_change(periods=5) * 100
|
||||||
|
spread_change_1h = spread.pct_change(periods=1)
|
||||||
|
|
||||||
|
# Volume Analysis
|
||||||
|
vol_ratio = df_a['volume'] / (df_b['volume'] + 1e-10)
|
||||||
|
vol_ratio_ma = vol_ratio.rolling(window=12).mean()
|
||||||
|
vol_ratio_rel = vol_ratio / (vol_ratio_ma + 1e-10)
|
||||||
|
|
||||||
|
# Volatility
|
||||||
|
ret_a = df_a['close'].pct_change()
|
||||||
|
ret_b = df_b['close'].pct_change()
|
||||||
|
vol_a = ret_a.rolling(window=z_window).std()
|
||||||
|
vol_b = ret_b.rolling(window=z_window).std()
|
||||||
|
vol_spread_ratio = vol_a / (vol_b + 1e-10)
|
||||||
|
|
||||||
|
# Realized Volatility
|
||||||
|
realized_vol_a = ret_a.rolling(window=24).std()
|
||||||
|
realized_vol_b = ret_b.rolling(window=24).std()
|
||||||
|
|
||||||
|
# ATR (Average True Range)
|
||||||
|
high_a, low_a, close_a = df_a['high'], df_a['low'], df_a['close']
|
||||||
|
|
||||||
|
tr_a = pd.concat([
|
||||||
|
high_a - low_a,
|
||||||
|
(high_a - close_a.shift(1)).abs(),
|
||||||
|
(low_a - close_a.shift(1)).abs()
|
||||||
|
], axis=1).max(axis=1)
|
||||||
|
atr_a = tr_a.rolling(window=self.config.atr_period).mean()
|
||||||
|
atr_pct_a = atr_a / close_a
|
||||||
|
|
||||||
|
# Build feature DataFrame
|
||||||
|
features = pd.DataFrame(index=common_idx)
|
||||||
|
features['pair_id'] = pair.pair_id
|
||||||
|
features['base_asset'] = base
|
||||||
|
features['quote_asset'] = quote
|
||||||
|
|
||||||
|
# Price data
|
||||||
|
features['spread'] = spread
|
||||||
|
features['base_close'] = df_a['close']
|
||||||
|
features['quote_close'] = df_b['close']
|
||||||
|
features['base_volume'] = df_a['volume']
|
||||||
|
|
||||||
|
# Core Features
|
||||||
|
features['z_score'] = z_score
|
||||||
|
features['spread_rsi'] = spread_rsi
|
||||||
|
features['spread_roc'] = spread_roc
|
||||||
|
features['spread_change_1h'] = spread_change_1h
|
||||||
|
features['vol_ratio'] = vol_ratio
|
||||||
|
features['vol_ratio_rel'] = vol_ratio_rel
|
||||||
|
features['vol_diff_ratio'] = vol_spread_ratio
|
||||||
|
|
||||||
|
# Volatility
|
||||||
|
features['realized_vol_base'] = realized_vol_a
|
||||||
|
features['realized_vol_quote'] = realized_vol_b
|
||||||
|
features['realized_vol_avg'] = (realized_vol_a + realized_vol_b) / 2
|
||||||
|
|
||||||
|
# ATR
|
||||||
|
features['atr_base'] = atr_a
|
||||||
|
features['atr_pct_base'] = atr_pct_a
|
||||||
|
|
||||||
|
# Pair encoding
|
||||||
|
assets = self.config.assets
|
||||||
|
features['base_idx'] = assets.index(base) if base in assets else -1
|
||||||
|
features['quote_idx'] = assets.index(quote) if quote in assets else -1
|
||||||
|
|
||||||
|
# Funding rates
|
||||||
|
base_funding = self._funding_rates.get(base, 0.0)
|
||||||
|
quote_funding = self._funding_rates.get(quote, 0.0)
|
||||||
|
features['base_funding'] = base_funding
|
||||||
|
features['quote_funding'] = quote_funding
|
||||||
|
features['funding_diff'] = base_funding - quote_funding
|
||||||
|
features['funding_avg'] = (base_funding + quote_funding) / 2
|
||||||
|
|
||||||
|
# Drop NaN rows in core features
|
||||||
|
core_cols = [
|
||||||
|
'z_score', 'spread_rsi', 'spread_roc', 'spread_change_1h',
|
||||||
|
'vol_ratio', 'vol_ratio_rel', 'vol_diff_ratio',
|
||||||
|
'realized_vol_base', 'atr_base', 'atr_pct_base'
|
||||||
|
]
|
||||||
|
features = features.dropna(subset=core_cols)
|
||||||
|
|
||||||
|
return features
|
||||||
|
|
||||||
|
def calculate_all_pair_features(self) -> dict[str, pd.DataFrame]:
|
||||||
|
"""
|
||||||
|
Calculate features for all pairs.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping pair_id to feature DataFrame
|
||||||
|
"""
|
||||||
|
all_features = {}
|
||||||
|
|
||||||
|
for pair in self._pairs:
|
||||||
|
features = self.calculate_pair_features(pair)
|
||||||
|
if features is not None and len(features) > 0:
|
||||||
|
all_features[pair.pair_id] = features
|
||||||
|
|
||||||
|
logger.info("Calculated features for %d/%d pairs",
|
||||||
|
len(all_features), len(self._pairs))
|
||||||
|
|
||||||
|
return all_features
|
||||||
|
|
||||||
|
def get_latest_data(self) -> dict[str, pd.DataFrame] | None:
|
||||||
|
"""
|
||||||
|
Fetch and process latest market data for all pairs.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary of pair features or None on error
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Fetch OHLCV for all assets
|
||||||
|
self.fetch_all_ohlcv()
|
||||||
|
|
||||||
|
if len(self._asset_data) < 2:
|
||||||
|
logger.warning("Insufficient assets fetched")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Fetch funding rates
|
||||||
|
self.fetch_all_funding_rates()
|
||||||
|
|
||||||
|
# Calculate features for all pairs
|
||||||
|
pair_features = self.calculate_all_pair_features()
|
||||||
|
|
||||||
|
if not pair_features:
|
||||||
|
logger.warning("No pair features calculated")
|
||||||
|
return None
|
||||||
|
|
||||||
|
logger.info("Processed %d pairs with valid features", len(pair_features))
|
||||||
|
return pair_features
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Error fetching market data: %s", e, exc_info=True)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_pair_by_id(self, pair_id: str) -> TradingPair | None:
|
||||||
|
"""Get pair object by ID."""
|
||||||
|
for pair in self._pairs:
|
||||||
|
if pair.pair_id == pair_id:
|
||||||
|
return pair
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_current_price(self, symbol: str) -> float | None:
|
||||||
|
"""Get current price for a symbol."""
|
||||||
|
if symbol in self._asset_data:
|
||||||
|
return self._asset_data[symbol]['close'].iloc[-1]
|
||||||
|
return None
|
||||||
609
live_trading/multi_pair/main.py
Normal file
609
live_trading/multi_pair/main.py
Normal file
@@ -0,0 +1,609 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Multi-Pair Divergence Live Trading Bot.
|
||||||
|
|
||||||
|
Trades the top 10 cryptocurrency pairs based on spread divergence
|
||||||
|
using a universal ML model for signal generation.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
# Run with demo account (default)
|
||||||
|
uv run python -m live_trading.multi_pair.main
|
||||||
|
|
||||||
|
# Run with specific settings
|
||||||
|
uv run python -m live_trading.multi_pair.main --max-position 500 --leverage 2
|
||||||
|
"""
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||||
|
|
||||||
|
from live_trading.okx_client import OKXClient
|
||||||
|
from live_trading.position_manager import PositionManager
|
||||||
|
from live_trading.multi_pair.config import (
|
||||||
|
OKXConfig, MultiPairLiveConfig, PathConfig, get_multi_pair_config
|
||||||
|
)
|
||||||
|
from live_trading.multi_pair.data_feed import MultiPairDataFeed, TradingPair
|
||||||
|
from live_trading.multi_pair.strategy import LiveMultiPairStrategy
|
||||||
|
|
||||||
|
|
||||||
|
def setup_logging(log_dir: Path) -> logging.Logger:
|
||||||
|
"""Configure logging for the trading bot."""
|
||||||
|
log_file = log_dir / "multi_pair_live.log"
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
|
||||||
|
handlers=[
|
||||||
|
logging.FileHandler(log_file),
|
||||||
|
logging.StreamHandler(sys.stdout),
|
||||||
|
],
|
||||||
|
force=True
|
||||||
|
)
|
||||||
|
|
||||||
|
return logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PositionState:
|
||||||
|
"""Track current position state for multi-pair."""
|
||||||
|
pair: TradingPair | None = None
|
||||||
|
pair_id: str | None = None
|
||||||
|
direction: str | None = None
|
||||||
|
entry_price: float = 0.0
|
||||||
|
size: float = 0.0
|
||||||
|
stop_loss: float = 0.0
|
||||||
|
take_profit: float = 0.0
|
||||||
|
entry_time: datetime | None = None
|
||||||
|
|
||||||
|
|
||||||
|
class MultiPairLiveTradingBot:
|
||||||
|
"""
|
||||||
|
Main trading bot for multi-pair divergence strategy.
|
||||||
|
|
||||||
|
Coordinates data fetching, pair scoring, and order execution.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
okx_config: OKXConfig,
|
||||||
|
trading_config: MultiPairLiveConfig,
|
||||||
|
path_config: PathConfig
|
||||||
|
):
|
||||||
|
self.okx_config = okx_config
|
||||||
|
self.trading_config = trading_config
|
||||||
|
self.path_config = path_config
|
||||||
|
|
||||||
|
self.logger = logging.getLogger(__name__)
|
||||||
|
self.running = True
|
||||||
|
|
||||||
|
# Initialize components
|
||||||
|
self.logger.info("Initializing multi-pair trading bot...")
|
||||||
|
|
||||||
|
# Create OKX client with adapted config
|
||||||
|
self._adapted_trading_config = self._adapt_config_for_okx_client()
|
||||||
|
self.okx_client = OKXClient(okx_config, self._adapted_trading_config)
|
||||||
|
|
||||||
|
# Initialize data feed
|
||||||
|
self.data_feed = MultiPairDataFeed(
|
||||||
|
self.okx_client, trading_config, path_config
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize position manager (reuse from single-pair)
|
||||||
|
self.position_manager = PositionManager(
|
||||||
|
self.okx_client, self._adapted_trading_config, path_config
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize strategy
|
||||||
|
self.strategy = LiveMultiPairStrategy(trading_config, path_config)
|
||||||
|
|
||||||
|
# Current position state
|
||||||
|
self.position = PositionState()
|
||||||
|
|
||||||
|
# Register signal handlers
|
||||||
|
signal.signal(signal.SIGINT, self._handle_shutdown)
|
||||||
|
signal.signal(signal.SIGTERM, self._handle_shutdown)
|
||||||
|
|
||||||
|
self._print_startup_banner()
|
||||||
|
|
||||||
|
# Sync with exchange positions on startup
|
||||||
|
self._sync_position_from_exchange()
|
||||||
|
|
||||||
|
def _adapt_config_for_okx_client(self):
|
||||||
|
"""Create config compatible with OKXClient."""
|
||||||
|
# OKXClient expects specific attributes
|
||||||
|
@dataclass
|
||||||
|
class AdaptedConfig:
|
||||||
|
eth_symbol: str = "ETH/USDT:USDT"
|
||||||
|
btc_symbol: str = "BTC/USDT:USDT"
|
||||||
|
timeframe: str = "1h"
|
||||||
|
candles_to_fetch: int = 500
|
||||||
|
max_position_usdt: float = -1.0
|
||||||
|
min_position_usdt: float = 10.0
|
||||||
|
leverage: int = 1
|
||||||
|
margin_mode: str = "cross"
|
||||||
|
stop_loss_pct: float = 0.06
|
||||||
|
take_profit_pct: float = 0.05
|
||||||
|
max_concurrent_positions: int = 1
|
||||||
|
z_entry_threshold: float = 1.0
|
||||||
|
z_window: int = 24
|
||||||
|
model_prob_threshold: float = 0.5
|
||||||
|
funding_threshold: float = 0.0005
|
||||||
|
sleep_seconds: int = 3600
|
||||||
|
slippage_pct: float = 0.001
|
||||||
|
|
||||||
|
adapted = AdaptedConfig()
|
||||||
|
adapted.timeframe = self.trading_config.timeframe
|
||||||
|
adapted.candles_to_fetch = self.trading_config.candles_to_fetch
|
||||||
|
adapted.max_position_usdt = self.trading_config.max_position_usdt
|
||||||
|
adapted.min_position_usdt = self.trading_config.min_position_usdt
|
||||||
|
adapted.leverage = self.trading_config.leverage
|
||||||
|
adapted.margin_mode = self.trading_config.margin_mode
|
||||||
|
adapted.max_concurrent_positions = self.trading_config.max_concurrent_positions
|
||||||
|
adapted.sleep_seconds = self.trading_config.sleep_seconds
|
||||||
|
adapted.slippage_pct = self.trading_config.slippage_pct
|
||||||
|
|
||||||
|
return adapted
|
||||||
|
|
||||||
|
def _print_startup_banner(self) -> None:
|
||||||
|
"""Print startup information."""
|
||||||
|
mode = "DEMO/SANDBOX" if self.okx_config.demo_mode else "LIVE"
|
||||||
|
|
||||||
|
print("=" * 60)
|
||||||
|
print(" Multi-Pair Divergence Strategy - Live Trading Bot")
|
||||||
|
print("=" * 60)
|
||||||
|
print(f" Mode: {mode}")
|
||||||
|
print(f" Assets: {len(self.trading_config.assets)} assets")
|
||||||
|
print(f" Pairs: {self.trading_config.get_pair_count()} pairs")
|
||||||
|
print(f" Timeframe: {self.trading_config.timeframe}")
|
||||||
|
print(f" Max Position: ${self.trading_config.max_position_usdt if self.trading_config.max_position_usdt > 0 else 'All available'}")
|
||||||
|
print(f" Leverage: {self.trading_config.leverage}x")
|
||||||
|
print(f" Z-Entry: > {self.trading_config.z_entry_threshold}")
|
||||||
|
print(f" Prob Threshold: > {self.trading_config.prob_threshold}")
|
||||||
|
print(f" Cycle Interval: {self.trading_config.sleep_seconds // 60} minutes")
|
||||||
|
print("=" * 60)
|
||||||
|
print(f" Assets: {', '.join([a.split('/')[0] for a in self.trading_config.assets])}")
|
||||||
|
print("=" * 60)
|
||||||
|
|
||||||
|
if not self.okx_config.demo_mode:
|
||||||
|
print("\n *** WARNING: LIVE TRADING MODE - REAL FUNDS AT RISK ***\n")
|
||||||
|
|
||||||
|
def _handle_shutdown(self, signum, frame) -> None:
|
||||||
|
"""Handle shutdown signals gracefully."""
|
||||||
|
self.logger.info("Shutdown signal received, stopping...")
|
||||||
|
self.running = False
|
||||||
|
|
||||||
|
def _sync_position_from_exchange(self) -> bool:
|
||||||
|
"""
|
||||||
|
Sync internal position state with exchange positions.
|
||||||
|
|
||||||
|
Checks for existing open positions on the exchange and updates
|
||||||
|
internal state to match. This prevents stacking positions when
|
||||||
|
the bot is restarted.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if a position was synced, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
positions = self.okx_client.get_positions()
|
||||||
|
|
||||||
|
if not positions:
|
||||||
|
if self.position.pair is not None:
|
||||||
|
# Position was closed externally (e.g., SL/TP hit)
|
||||||
|
self.logger.info(
|
||||||
|
"Position %s was closed externally, resetting state",
|
||||||
|
self.position.pair.name if self.position.pair else "unknown"
|
||||||
|
)
|
||||||
|
self.position = PositionState()
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check each position against our tradeable assets
|
||||||
|
our_assets = set(self.trading_config.assets)
|
||||||
|
|
||||||
|
for pos in positions:
|
||||||
|
pos_symbol = pos.get('symbol', '')
|
||||||
|
contracts = abs(float(pos.get('contracts', 0)))
|
||||||
|
|
||||||
|
if contracts == 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if this position is for one of our assets
|
||||||
|
if pos_symbol not in our_assets:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Found a position for one of our assets
|
||||||
|
side = pos.get('side', 'long')
|
||||||
|
entry_price = float(pos.get('entryPrice', 0))
|
||||||
|
unrealized_pnl = float(pos.get('unrealizedPnl', 0))
|
||||||
|
|
||||||
|
# If we already track this position, just update
|
||||||
|
if (self.position.pair is not None and
|
||||||
|
self.position.pair.base_asset == pos_symbol):
|
||||||
|
self.logger.debug(
|
||||||
|
"Position already tracked: %s %s %.2f contracts",
|
||||||
|
side, pos_symbol, contracts
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# New position found - sync it
|
||||||
|
# Find or create a TradingPair for this position
|
||||||
|
matched_pair = None
|
||||||
|
for pair in self.data_feed.pairs:
|
||||||
|
if pair.base_asset == pos_symbol:
|
||||||
|
matched_pair = pair
|
||||||
|
break
|
||||||
|
|
||||||
|
if matched_pair is None:
|
||||||
|
# Create a placeholder pair (we don't know the quote asset)
|
||||||
|
matched_pair = TradingPair(
|
||||||
|
base_asset=pos_symbol,
|
||||||
|
quote_asset="UNKNOWN"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate approximate SL/TP based on config defaults
|
||||||
|
sl_pct = self.trading_config.base_sl_pct
|
||||||
|
tp_pct = self.trading_config.base_tp_pct
|
||||||
|
|
||||||
|
if side == 'long':
|
||||||
|
stop_loss = entry_price * (1 - sl_pct)
|
||||||
|
take_profit = entry_price * (1 + tp_pct)
|
||||||
|
else:
|
||||||
|
stop_loss = entry_price * (1 + sl_pct)
|
||||||
|
take_profit = entry_price * (1 - tp_pct)
|
||||||
|
|
||||||
|
self.position = PositionState(
|
||||||
|
pair=matched_pair,
|
||||||
|
pair_id=matched_pair.pair_id,
|
||||||
|
direction=side,
|
||||||
|
entry_price=entry_price,
|
||||||
|
size=contracts,
|
||||||
|
stop_loss=stop_loss,
|
||||||
|
take_profit=take_profit,
|
||||||
|
entry_time=None # Unknown for synced positions
|
||||||
|
)
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
"Synced existing position from exchange: %s %s %.4f @ %.4f (PnL: %.2f)",
|
||||||
|
side.upper(),
|
||||||
|
pos_symbol,
|
||||||
|
contracts,
|
||||||
|
entry_price,
|
||||||
|
unrealized_pnl
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# No matching positions found
|
||||||
|
if self.position.pair is not None:
|
||||||
|
self.logger.info(
|
||||||
|
"Position %s no longer exists on exchange, resetting state",
|
||||||
|
self.position.pair.name
|
||||||
|
)
|
||||||
|
self.position = PositionState()
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error("Failed to sync position from exchange: %s", e)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def run_trading_cycle(self) -> None:
|
||||||
|
"""
|
||||||
|
Execute one trading cycle.
|
||||||
|
|
||||||
|
1. Sync position state with exchange
|
||||||
|
2. Fetch latest market data for all assets
|
||||||
|
3. Calculate features for all pairs
|
||||||
|
4. Score pairs and find best opportunity
|
||||||
|
5. Check exit conditions for current position
|
||||||
|
6. Execute trades if needed
|
||||||
|
"""
|
||||||
|
cycle_start = datetime.now(timezone.utc)
|
||||||
|
self.logger.info("--- Trading Cycle Start: %s ---", cycle_start.isoformat())
|
||||||
|
|
||||||
|
try:
|
||||||
|
# 1. Sync position state with exchange (detect SL/TP closures)
|
||||||
|
self._sync_position_from_exchange()
|
||||||
|
|
||||||
|
# 2. Fetch all market data
|
||||||
|
pair_features = self.data_feed.get_latest_data()
|
||||||
|
if pair_features is None:
|
||||||
|
self.logger.warning("No market data available, skipping cycle")
|
||||||
|
return
|
||||||
|
|
||||||
|
# 2. Check exit conditions for current position
|
||||||
|
if self.position.pair is not None:
|
||||||
|
exit_signal = self.strategy.check_exit_signal(
|
||||||
|
pair_features,
|
||||||
|
self.position.pair_id
|
||||||
|
)
|
||||||
|
|
||||||
|
if exit_signal['action'] == 'exit':
|
||||||
|
self._execute_exit(exit_signal)
|
||||||
|
else:
|
||||||
|
# Check SL/TP
|
||||||
|
current_price = self.data_feed.get_current_price(
|
||||||
|
self.position.pair.base_asset
|
||||||
|
)
|
||||||
|
if current_price:
|
||||||
|
sl_tp_exit = self._check_sl_tp(current_price)
|
||||||
|
if sl_tp_exit:
|
||||||
|
self._execute_exit({'reason': sl_tp_exit})
|
||||||
|
|
||||||
|
# 3. Generate entry signal if no position
|
||||||
|
if self.position.pair is None:
|
||||||
|
entry_signal = self.strategy.generate_signal(
|
||||||
|
pair_features,
|
||||||
|
self.data_feed.pairs
|
||||||
|
)
|
||||||
|
|
||||||
|
if entry_signal['action'] == 'entry':
|
||||||
|
self._execute_entry(entry_signal)
|
||||||
|
|
||||||
|
# 4. Log status
|
||||||
|
if self.position.pair:
|
||||||
|
self.logger.info(
|
||||||
|
"Position: %s %s, entry=%.4f, current PnL check pending",
|
||||||
|
self.position.direction,
|
||||||
|
self.position.pair.name,
|
||||||
|
self.position.entry_price
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.logger.info("No open position")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error("Trading cycle error: %s", e, exc_info=True)
|
||||||
|
|
||||||
|
cycle_duration = (datetime.now(timezone.utc) - cycle_start).total_seconds()
|
||||||
|
self.logger.info("--- Cycle completed in %.1fs ---", cycle_duration)
|
||||||
|
|
||||||
|
def _check_sl_tp(self, current_price: float) -> str | None:
|
||||||
|
"""Check stop-loss and take-profit levels."""
|
||||||
|
if self.position.direction == 'long':
|
||||||
|
if current_price <= self.position.stop_loss:
|
||||||
|
return f"stop_loss ({current_price:.4f} <= {self.position.stop_loss:.4f})"
|
||||||
|
if current_price >= self.position.take_profit:
|
||||||
|
return f"take_profit ({current_price:.4f} >= {self.position.take_profit:.4f})"
|
||||||
|
else: # short
|
||||||
|
if current_price >= self.position.stop_loss:
|
||||||
|
return f"stop_loss ({current_price:.4f} >= {self.position.stop_loss:.4f})"
|
||||||
|
if current_price <= self.position.take_profit:
|
||||||
|
return f"take_profit ({current_price:.4f} <= {self.position.take_profit:.4f})"
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _execute_entry(self, signal: dict) -> None:
|
||||||
|
"""Execute entry trade."""
|
||||||
|
pair = signal['pair']
|
||||||
|
symbol = pair.base_asset # Trade the base asset
|
||||||
|
direction = signal['direction']
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
"Entry signal: %s %s (z=%.2f, p=%.2f, score=%.3f)",
|
||||||
|
direction.upper(),
|
||||||
|
pair.name,
|
||||||
|
signal['z_score'],
|
||||||
|
signal['probability'],
|
||||||
|
signal['divergence_score']
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get account balance
|
||||||
|
try:
|
||||||
|
balance = self.okx_client.get_balance()
|
||||||
|
available_usdt = balance['free']
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error("Could not get balance: %s", e)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Calculate position size
|
||||||
|
size_usdt = self.strategy.calculate_position_size(
|
||||||
|
signal['divergence_score'],
|
||||||
|
available_usdt
|
||||||
|
)
|
||||||
|
|
||||||
|
if size_usdt <= 0:
|
||||||
|
self.logger.info("Position size too small, skipping entry")
|
||||||
|
return
|
||||||
|
|
||||||
|
current_price = signal['base_price']
|
||||||
|
size_asset = size_usdt / current_price
|
||||||
|
|
||||||
|
# Calculate SL/TP
|
||||||
|
stop_loss, take_profit = self.strategy.calculate_sl_tp(
|
||||||
|
current_price,
|
||||||
|
direction,
|
||||||
|
signal['atr'],
|
||||||
|
signal['atr_pct']
|
||||||
|
)
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
"Executing %s entry: %.6f %s @ %.4f ($%.2f), SL=%.4f, TP=%.4f",
|
||||||
|
direction.upper(),
|
||||||
|
size_asset,
|
||||||
|
symbol.split('/')[0],
|
||||||
|
current_price,
|
||||||
|
size_usdt,
|
||||||
|
stop_loss,
|
||||||
|
take_profit
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Place market order
|
||||||
|
order_side = "buy" if direction == "long" else "sell"
|
||||||
|
order = self.okx_client.place_market_order(symbol, order_side, size_asset)
|
||||||
|
|
||||||
|
filled_price = order.get('average') or order.get('price') or current_price
|
||||||
|
filled_amount = order.get('filled') or order.get('amount') or size_asset
|
||||||
|
|
||||||
|
if filled_price is None or filled_price == 0:
|
||||||
|
filled_price = current_price
|
||||||
|
if filled_amount is None or filled_amount == 0:
|
||||||
|
filled_amount = size_asset
|
||||||
|
|
||||||
|
# Recalculate SL/TP with filled price
|
||||||
|
stop_loss, take_profit = self.strategy.calculate_sl_tp(
|
||||||
|
filled_price, direction, signal['atr'], signal['atr_pct']
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update position state
|
||||||
|
self.position = PositionState(
|
||||||
|
pair=pair,
|
||||||
|
pair_id=pair.pair_id,
|
||||||
|
direction=direction,
|
||||||
|
entry_price=filled_price,
|
||||||
|
size=filled_amount,
|
||||||
|
stop_loss=stop_loss,
|
||||||
|
take_profit=take_profit,
|
||||||
|
entry_time=datetime.now(timezone.utc)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
"Position opened: %s %s %.6f @ %.4f",
|
||||||
|
direction.upper(),
|
||||||
|
pair.name,
|
||||||
|
filled_amount,
|
||||||
|
filled_price
|
||||||
|
)
|
||||||
|
|
||||||
|
# Try to set SL/TP on exchange
|
||||||
|
try:
|
||||||
|
self.okx_client.set_stop_loss_take_profit(
|
||||||
|
symbol, direction, filled_amount, stop_loss, take_profit
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.warning("Could not set SL/TP on exchange: %s", e)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error("Order execution failed: %s", e, exc_info=True)
|
||||||
|
|
||||||
|
def _execute_exit(self, signal: dict) -> None:
|
||||||
|
"""Execute exit trade."""
|
||||||
|
if self.position.pair is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
symbol = self.position.pair.base_asset
|
||||||
|
reason = signal.get('reason', 'unknown')
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
"Exit signal: %s %s, reason: %s",
|
||||||
|
self.position.direction,
|
||||||
|
self.position.pair.name,
|
||||||
|
reason
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Close position on exchange
|
||||||
|
self.okx_client.close_position(symbol)
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
"Position closed: %s %s",
|
||||||
|
self.position.direction,
|
||||||
|
self.position.pair.name
|
||||||
|
)
|
||||||
|
|
||||||
|
# Reset position state
|
||||||
|
self.position = PositionState()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error("Exit execution failed: %s", e, exc_info=True)
|
||||||
|
|
||||||
|
def run(self) -> None:
|
||||||
|
"""Main trading loop."""
|
||||||
|
self.logger.info("Starting multi-pair trading loop...")
|
||||||
|
|
||||||
|
while self.running:
|
||||||
|
try:
|
||||||
|
self.run_trading_cycle()
|
||||||
|
|
||||||
|
if self.running:
|
||||||
|
sleep_seconds = self.trading_config.sleep_seconds
|
||||||
|
minutes = sleep_seconds // 60
|
||||||
|
self.logger.info("Sleeping for %d minutes...", minutes)
|
||||||
|
|
||||||
|
for _ in range(sleep_seconds):
|
||||||
|
if not self.running:
|
||||||
|
break
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
self.logger.info("Keyboard interrupt received")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error("Unexpected error in main loop: %s", e, exc_info=True)
|
||||||
|
time.sleep(60)
|
||||||
|
|
||||||
|
self.logger.info("Shutting down...")
|
||||||
|
self.logger.info("Shutdown complete")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
"""Parse command line arguments."""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Multi-Pair Divergence Live Trading Bot"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-position",
|
||||||
|
type=float,
|
||||||
|
default=None,
|
||||||
|
help="Maximum position size in USDT"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--leverage",
|
||||||
|
type=int,
|
||||||
|
default=None,
|
||||||
|
help="Trading leverage (1-125)"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--interval",
|
||||||
|
type=int,
|
||||||
|
default=None,
|
||||||
|
help="Trading cycle interval in seconds"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--live",
|
||||||
|
action="store_true",
|
||||||
|
help="Use live trading mode (requires OKX_DEMO_MODE=false)"
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main entry point."""
|
||||||
|
args = parse_args()
|
||||||
|
|
||||||
|
# Load configuration
|
||||||
|
okx_config, trading_config, path_config = get_multi_pair_config()
|
||||||
|
|
||||||
|
# Apply command line overrides
|
||||||
|
if args.max_position is not None:
|
||||||
|
trading_config.max_position_usdt = args.max_position
|
||||||
|
if args.leverage is not None:
|
||||||
|
trading_config.leverage = args.leverage
|
||||||
|
if args.interval is not None:
|
||||||
|
trading_config.sleep_seconds = args.interval
|
||||||
|
if args.live:
|
||||||
|
okx_config.demo_mode = False
|
||||||
|
|
||||||
|
# Setup logging
|
||||||
|
logger = setup_logging(path_config.logs_dir)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Validate config
|
||||||
|
okx_config.validate()
|
||||||
|
|
||||||
|
# Create and run bot
|
||||||
|
bot = MultiPairLiveTradingBot(okx_config, trading_config, path_config)
|
||||||
|
bot.run()
|
||||||
|
except ValueError as e:
|
||||||
|
logger.error("Configuration error: %s", e)
|
||||||
|
sys.exit(1)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Fatal error: %s", e, exc_info=True)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
396
live_trading/multi_pair/strategy.py
Normal file
396
live_trading/multi_pair/strategy.py
Normal file
@@ -0,0 +1,396 @@
|
|||||||
|
"""
|
||||||
|
Live Multi-Pair Divergence Strategy.
|
||||||
|
|
||||||
|
Scores all pairs and selects the best divergence opportunity for trading.
|
||||||
|
Uses the pre-trained universal ML model from backtesting.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
import pickle
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
from sklearn.ensemble import RandomForestClassifier
|
||||||
|
|
||||||
|
# Opt-in to future pandas behavior to silence FutureWarning on fillna
|
||||||
|
pd.set_option('future.no_silent_downcasting', True)
|
||||||
|
|
||||||
|
from .config import MultiPairLiveConfig, PathConfig
|
||||||
|
from .data_feed import TradingPair
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DivergenceSignal:
|
||||||
|
"""
|
||||||
|
Signal for a divergent pair.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
pair: Trading pair
|
||||||
|
z_score: Current Z-Score of the spread
|
||||||
|
probability: ML model probability of profitable reversion
|
||||||
|
divergence_score: Combined score (|z_score| * probability)
|
||||||
|
direction: 'long' or 'short' (relative to base asset)
|
||||||
|
base_price: Current price of base asset
|
||||||
|
quote_price: Current price of quote asset
|
||||||
|
atr: Average True Range in price units
|
||||||
|
atr_pct: ATR as percentage of price
|
||||||
|
"""
|
||||||
|
pair: TradingPair
|
||||||
|
z_score: float
|
||||||
|
probability: float
|
||||||
|
divergence_score: float
|
||||||
|
direction: str
|
||||||
|
base_price: float
|
||||||
|
quote_price: float
|
||||||
|
atr: float
|
||||||
|
atr_pct: float
|
||||||
|
base_funding: float = 0.0
|
||||||
|
|
||||||
|
|
||||||
|
class LiveMultiPairStrategy:
|
||||||
|
"""
|
||||||
|
Live trading implementation of multi-pair divergence strategy.
|
||||||
|
|
||||||
|
Scores all pairs using the universal ML model and selects
|
||||||
|
the best opportunity for mean-reversion trading.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
config: MultiPairLiveConfig,
|
||||||
|
path_config: PathConfig
|
||||||
|
):
|
||||||
|
self.config = config
|
||||||
|
self.paths = path_config
|
||||||
|
self.model: RandomForestClassifier | None = None
|
||||||
|
self.feature_cols: list[str] | None = None
|
||||||
|
self._load_model()
|
||||||
|
|
||||||
|
def _load_model(self) -> None:
|
||||||
|
"""Load pre-trained model from backtesting."""
|
||||||
|
if self.paths.model_path.exists():
|
||||||
|
try:
|
||||||
|
with open(self.paths.model_path, 'rb') as f:
|
||||||
|
saved = pickle.load(f)
|
||||||
|
self.model = saved['model']
|
||||||
|
self.feature_cols = saved['feature_cols']
|
||||||
|
logger.info("Loaded model from %s", self.paths.model_path)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Could not load model: %s", e)
|
||||||
|
raise ValueError(
|
||||||
|
f"Multi-pair model not found at {self.paths.model_path}. "
|
||||||
|
"Run the backtest first to train the model."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Multi-pair model not found at {self.paths.model_path}. "
|
||||||
|
"Run the backtest first to train the model."
|
||||||
|
)
|
||||||
|
|
||||||
|
def score_pairs(
|
||||||
|
self,
|
||||||
|
pair_features: dict[str, pd.DataFrame],
|
||||||
|
pairs: list[TradingPair]
|
||||||
|
) -> list[DivergenceSignal]:
|
||||||
|
"""
|
||||||
|
Score all pairs and return ranked signals.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pair_features: Feature DataFrames by pair_id
|
||||||
|
pairs: List of TradingPair objects
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of DivergenceSignal sorted by score (descending)
|
||||||
|
"""
|
||||||
|
if self.model is None:
|
||||||
|
logger.warning("Model not loaded")
|
||||||
|
return []
|
||||||
|
|
||||||
|
signals = []
|
||||||
|
pair_map = {p.pair_id: p for p in pairs}
|
||||||
|
|
||||||
|
for pair_id, features in pair_features.items():
|
||||||
|
if pair_id not in pair_map:
|
||||||
|
continue
|
||||||
|
|
||||||
|
pair = pair_map[pair_id]
|
||||||
|
|
||||||
|
# Get latest features
|
||||||
|
if len(features) == 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
latest = features.iloc[-1]
|
||||||
|
z_score = latest['z_score']
|
||||||
|
|
||||||
|
# Skip if Z-score below threshold
|
||||||
|
if abs(z_score) < self.config.z_entry_threshold:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Prepare features for prediction
|
||||||
|
# Handle missing feature columns gracefully
|
||||||
|
available_cols = [c for c in self.feature_cols if c in latest.index]
|
||||||
|
missing_cols = [c for c in self.feature_cols if c not in latest.index]
|
||||||
|
|
||||||
|
if missing_cols:
|
||||||
|
logger.debug("Missing feature columns: %s", missing_cols)
|
||||||
|
|
||||||
|
feature_row = latest[available_cols].fillna(0)
|
||||||
|
feature_row = feature_row.replace([np.inf, -np.inf], 0)
|
||||||
|
|
||||||
|
# Create full feature vector with zeros for missing
|
||||||
|
X_dict = {c: 0 for c in self.feature_cols}
|
||||||
|
for col in available_cols:
|
||||||
|
X_dict[col] = feature_row[col]
|
||||||
|
|
||||||
|
X = pd.DataFrame([X_dict])
|
||||||
|
|
||||||
|
# Predict probability
|
||||||
|
prob = self.model.predict_proba(X)[0, 1]
|
||||||
|
|
||||||
|
# Skip if probability below threshold
|
||||||
|
if prob < self.config.prob_threshold:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Apply funding rate filter
|
||||||
|
base_funding = latest.get('base_funding', 0) or 0
|
||||||
|
funding_thresh = self.config.funding_threshold
|
||||||
|
|
||||||
|
if z_score > 0: # Short signal
|
||||||
|
if base_funding < -funding_thresh:
|
||||||
|
logger.debug(
|
||||||
|
"Skipping %s short: funding too negative (%.4f)",
|
||||||
|
pair.name, base_funding
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
else: # Long signal
|
||||||
|
if base_funding > funding_thresh:
|
||||||
|
logger.debug(
|
||||||
|
"Skipping %s long: funding too positive (%.4f)",
|
||||||
|
pair.name, base_funding
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Calculate divergence score
|
||||||
|
divergence_score = abs(z_score) * prob
|
||||||
|
|
||||||
|
# Determine direction
|
||||||
|
direction = 'short' if z_score > 0 else 'long'
|
||||||
|
|
||||||
|
signal = DivergenceSignal(
|
||||||
|
pair=pair,
|
||||||
|
z_score=z_score,
|
||||||
|
probability=prob,
|
||||||
|
divergence_score=divergence_score,
|
||||||
|
direction=direction,
|
||||||
|
base_price=latest['base_close'],
|
||||||
|
quote_price=latest['quote_close'],
|
||||||
|
atr=latest.get('atr_base', 0),
|
||||||
|
atr_pct=latest.get('atr_pct_base', 0.02),
|
||||||
|
base_funding=base_funding
|
||||||
|
)
|
||||||
|
signals.append(signal)
|
||||||
|
|
||||||
|
# Sort by divergence score (highest first)
|
||||||
|
signals.sort(key=lambda s: s.divergence_score, reverse=True)
|
||||||
|
|
||||||
|
if signals:
|
||||||
|
logger.info(
|
||||||
|
"Scored %d pairs, top: %s (score=%.3f, z=%.2f, p=%.2f, dir=%s)",
|
||||||
|
len(signals),
|
||||||
|
signals[0].pair.name,
|
||||||
|
signals[0].divergence_score,
|
||||||
|
signals[0].z_score,
|
||||||
|
signals[0].probability,
|
||||||
|
signals[0].direction
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info("No pairs meet entry criteria")
|
||||||
|
|
||||||
|
return signals
|
||||||
|
|
||||||
|
def select_best_pair(
|
||||||
|
self,
|
||||||
|
signals: list[DivergenceSignal]
|
||||||
|
) -> DivergenceSignal | None:
|
||||||
|
"""
|
||||||
|
Select the best pair from scored signals.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
signals: List of DivergenceSignal (pre-sorted by score)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Best signal or None if no valid candidates
|
||||||
|
"""
|
||||||
|
if not signals:
|
||||||
|
return None
|
||||||
|
return signals[0]
|
||||||
|
|
||||||
|
def generate_signal(
|
||||||
|
self,
|
||||||
|
pair_features: dict[str, pd.DataFrame],
|
||||||
|
pairs: list[TradingPair]
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Generate trading signal from latest features.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pair_features: Feature DataFrames by pair_id
|
||||||
|
pairs: List of TradingPair objects
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Signal dictionary with action, pair, direction, etc.
|
||||||
|
"""
|
||||||
|
# Score all pairs
|
||||||
|
signals = self.score_pairs(pair_features, pairs)
|
||||||
|
|
||||||
|
# Select best
|
||||||
|
best = self.select_best_pair(signals)
|
||||||
|
|
||||||
|
if best is None:
|
||||||
|
return {
|
||||||
|
'action': 'hold',
|
||||||
|
'reason': 'no_valid_signals'
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
'action': 'entry',
|
||||||
|
'pair': best.pair,
|
||||||
|
'pair_id': best.pair.pair_id,
|
||||||
|
'direction': best.direction,
|
||||||
|
'z_score': best.z_score,
|
||||||
|
'probability': best.probability,
|
||||||
|
'divergence_score': best.divergence_score,
|
||||||
|
'base_price': best.base_price,
|
||||||
|
'quote_price': best.quote_price,
|
||||||
|
'atr': best.atr,
|
||||||
|
'atr_pct': best.atr_pct,
|
||||||
|
'base_funding': best.base_funding,
|
||||||
|
'reason': f'{best.pair.name} z={best.z_score:.2f} p={best.probability:.2f}'
|
||||||
|
}
|
||||||
|
|
||||||
|
def check_exit_signal(
|
||||||
|
self,
|
||||||
|
pair_features: dict[str, pd.DataFrame],
|
||||||
|
current_pair_id: str
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Check if current position should be exited.
|
||||||
|
|
||||||
|
Exit conditions:
|
||||||
|
1. Z-Score reverted to mean (|Z| < threshold)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pair_features: Feature DataFrames by pair_id
|
||||||
|
current_pair_id: Current position's pair ID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Signal dictionary with action and reason
|
||||||
|
"""
|
||||||
|
if current_pair_id not in pair_features:
|
||||||
|
return {
|
||||||
|
'action': 'exit',
|
||||||
|
'reason': 'pair_data_missing'
|
||||||
|
}
|
||||||
|
|
||||||
|
features = pair_features[current_pair_id]
|
||||||
|
if len(features) == 0:
|
||||||
|
return {
|
||||||
|
'action': 'exit',
|
||||||
|
'reason': 'no_data'
|
||||||
|
}
|
||||||
|
|
||||||
|
latest = features.iloc[-1]
|
||||||
|
z_score = latest['z_score']
|
||||||
|
|
||||||
|
# Check mean reversion
|
||||||
|
if abs(z_score) < self.config.z_exit_threshold:
|
||||||
|
return {
|
||||||
|
'action': 'exit',
|
||||||
|
'reason': f'mean_reversion (z={z_score:.2f})'
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
'action': 'hold',
|
||||||
|
'z_score': z_score,
|
||||||
|
'reason': f'holding (z={z_score:.2f})'
|
||||||
|
}
|
||||||
|
|
||||||
|
def calculate_sl_tp(
|
||||||
|
self,
|
||||||
|
entry_price: float,
|
||||||
|
direction: str,
|
||||||
|
atr: float,
|
||||||
|
atr_pct: float
|
||||||
|
) -> tuple[float, float]:
|
||||||
|
"""
|
||||||
|
Calculate ATR-based dynamic stop-loss and take-profit prices.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
entry_price: Entry price
|
||||||
|
direction: 'long' or 'short'
|
||||||
|
atr: ATR in price units
|
||||||
|
atr_pct: ATR as percentage of price
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (stop_loss_price, take_profit_price)
|
||||||
|
"""
|
||||||
|
if atr > 0 and atr_pct > 0:
|
||||||
|
sl_distance = atr * self.config.sl_atr_multiplier
|
||||||
|
tp_distance = atr * self.config.tp_atr_multiplier
|
||||||
|
|
||||||
|
sl_pct = sl_distance / entry_price
|
||||||
|
tp_pct = tp_distance / entry_price
|
||||||
|
else:
|
||||||
|
sl_pct = self.config.base_sl_pct
|
||||||
|
tp_pct = self.config.base_tp_pct
|
||||||
|
|
||||||
|
# Apply bounds
|
||||||
|
sl_pct = max(self.config.min_sl_pct, min(sl_pct, self.config.max_sl_pct))
|
||||||
|
tp_pct = max(self.config.min_tp_pct, min(tp_pct, self.config.max_tp_pct))
|
||||||
|
|
||||||
|
if direction == 'long':
|
||||||
|
stop_loss = entry_price * (1 - sl_pct)
|
||||||
|
take_profit = entry_price * (1 + tp_pct)
|
||||||
|
else:
|
||||||
|
stop_loss = entry_price * (1 + sl_pct)
|
||||||
|
take_profit = entry_price * (1 - tp_pct)
|
||||||
|
|
||||||
|
return stop_loss, take_profit
|
||||||
|
|
||||||
|
def calculate_position_size(
|
||||||
|
self,
|
||||||
|
divergence_score: float,
|
||||||
|
available_usdt: float
|
||||||
|
) -> float:
|
||||||
|
"""
|
||||||
|
Calculate position size based on divergence score.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
divergence_score: Combined score (|z| * prob)
|
||||||
|
available_usdt: Available USDT balance
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Position size in USDT
|
||||||
|
"""
|
||||||
|
if self.config.max_position_usdt <= 0:
|
||||||
|
base_size = available_usdt
|
||||||
|
else:
|
||||||
|
base_size = min(available_usdt, self.config.max_position_usdt)
|
||||||
|
|
||||||
|
# Scale by divergence (1.0 at 0.5 score, up to 2.0 at 1.0+ score)
|
||||||
|
base_threshold = 0.5
|
||||||
|
if divergence_score <= base_threshold:
|
||||||
|
scale = 1.0
|
||||||
|
else:
|
||||||
|
scale = 1.0 + (divergence_score - base_threshold) / base_threshold
|
||||||
|
scale = min(scale, 2.0)
|
||||||
|
|
||||||
|
size = base_size * scale
|
||||||
|
|
||||||
|
if size < self.config.min_position_usdt:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
return min(size, available_usdt * 0.95)
|
||||||
338
live_trading/okx_client.py
Normal file
338
live_trading/okx_client.py
Normal file
@@ -0,0 +1,338 @@
|
|||||||
|
"""
|
||||||
|
OKX Exchange Client for Live Trading.
|
||||||
|
|
||||||
|
Handles connection to OKX API, order execution, and account management.
|
||||||
|
Supports demo/sandbox mode for paper trading.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
from typing import Optional
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
import ccxt
|
||||||
|
|
||||||
|
from .config import OKXConfig, TradingConfig
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class OKXClient:
|
||||||
|
"""
|
||||||
|
OKX Exchange client wrapper using CCXT.
|
||||||
|
|
||||||
|
Supports both live and demo (sandbox) trading modes.
|
||||||
|
Demo mode uses OKX's official sandbox environment.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, okx_config: OKXConfig, trading_config: TradingConfig):
|
||||||
|
self.okx_config = okx_config
|
||||||
|
self.trading_config = trading_config
|
||||||
|
self.exchange: Optional[ccxt.okx] = None
|
||||||
|
self._setup_exchange()
|
||||||
|
|
||||||
|
def _setup_exchange(self) -> None:
|
||||||
|
"""Initialize CCXT OKX exchange instance."""
|
||||||
|
self.okx_config.validate()
|
||||||
|
|
||||||
|
config = {
|
||||||
|
'apiKey': self.okx_config.api_key,
|
||||||
|
'secret': self.okx_config.secret,
|
||||||
|
'password': self.okx_config.password,
|
||||||
|
'sandbox': self.okx_config.demo_mode,
|
||||||
|
'options': {
|
||||||
|
'defaultType': 'swap', # Perpetual futures
|
||||||
|
},
|
||||||
|
'timeout': 30000,
|
||||||
|
'enableRateLimit': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
self.exchange = ccxt.okx(config)
|
||||||
|
|
||||||
|
mode_str = "DEMO/SANDBOX" if self.okx_config.demo_mode else "LIVE"
|
||||||
|
logger.info(f"OKX Exchange initialized in {mode_str} mode")
|
||||||
|
|
||||||
|
# Configure trading settings
|
||||||
|
self._configure_trading_settings()
|
||||||
|
|
||||||
|
def _configure_trading_settings(self) -> None:
|
||||||
|
"""Configure leverage and margin mode."""
|
||||||
|
symbol = self.trading_config.eth_symbol
|
||||||
|
leverage = self.trading_config.leverage
|
||||||
|
margin_mode = self.trading_config.margin_mode
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Set position mode to one-way (net) first
|
||||||
|
self.exchange.set_position_mode(False) # False = one-way mode
|
||||||
|
logger.info("Position mode set to One-Way (Net)")
|
||||||
|
except Exception as e:
|
||||||
|
# Position mode might already be set
|
||||||
|
logger.debug(f"Position mode setting: {e}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Set margin mode with leverage parameter (required by OKX)
|
||||||
|
self.exchange.set_margin_mode(
|
||||||
|
margin_mode,
|
||||||
|
symbol,
|
||||||
|
params={'lever': leverage}
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"Margin mode set to {margin_mode} with {leverage}x leverage "
|
||||||
|
f"for {symbol}"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not set margin mode: {e}")
|
||||||
|
# Try setting leverage separately
|
||||||
|
try:
|
||||||
|
self.exchange.set_leverage(leverage, symbol)
|
||||||
|
logger.info(f"Leverage set to {leverage}x for {symbol}")
|
||||||
|
except Exception as e2:
|
||||||
|
logger.warning(f"Could not set leverage: {e2}")
|
||||||
|
|
||||||
|
def fetch_ohlcv(
|
||||||
|
self,
|
||||||
|
symbol: str,
|
||||||
|
timeframe: str = "1h",
|
||||||
|
limit: int = 500
|
||||||
|
) -> list:
|
||||||
|
"""
|
||||||
|
Fetch OHLCV candle data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading pair symbol (e.g., "ETH/USDT:USDT")
|
||||||
|
timeframe: Candle timeframe (e.g., "1h")
|
||||||
|
limit: Number of candles to fetch
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of OHLCV data
|
||||||
|
"""
|
||||||
|
return self.exchange.fetch_ohlcv(symbol, timeframe, limit=limit)
|
||||||
|
|
||||||
|
def get_balance(self) -> dict:
|
||||||
|
"""
|
||||||
|
Get account balance.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Balance dictionary with 'total' and 'free' USDT amounts
|
||||||
|
"""
|
||||||
|
balance = self.exchange.fetch_balance()
|
||||||
|
return {
|
||||||
|
'total': balance.get('USDT', {}).get('total', 0),
|
||||||
|
'free': balance.get('USDT', {}).get('free', 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_positions(self) -> list:
|
||||||
|
"""
|
||||||
|
Get open positions.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of open position dictionaries
|
||||||
|
"""
|
||||||
|
positions = self.exchange.fetch_positions()
|
||||||
|
return [p for p in positions if float(p.get('contracts', 0)) != 0]
|
||||||
|
|
||||||
|
def get_position(self, symbol: str) -> Optional[dict]:
|
||||||
|
"""
|
||||||
|
Get position for a specific symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading pair symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Position dictionary or None if no position
|
||||||
|
"""
|
||||||
|
positions = self.get_positions()
|
||||||
|
for pos in positions:
|
||||||
|
if pos.get('symbol') == symbol:
|
||||||
|
return pos
|
||||||
|
return None
|
||||||
|
|
||||||
|
def place_market_order(
|
||||||
|
self,
|
||||||
|
symbol: str,
|
||||||
|
side: str,
|
||||||
|
amount: float,
|
||||||
|
reduce_only: bool = False
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Place a market order.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading pair symbol
|
||||||
|
side: "buy" or "sell"
|
||||||
|
amount: Order amount in base currency
|
||||||
|
reduce_only: If True, only reduce existing position
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Order result dictionary
|
||||||
|
"""
|
||||||
|
params = {
|
||||||
|
'tdMode': self.trading_config.margin_mode,
|
||||||
|
}
|
||||||
|
if reduce_only:
|
||||||
|
params['reduceOnly'] = True
|
||||||
|
|
||||||
|
order = self.exchange.create_market_order(
|
||||||
|
symbol, side, amount, params=params
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"Market {side.upper()} order placed: {amount} {symbol} "
|
||||||
|
f"@ market price, order_id={order['id']}"
|
||||||
|
)
|
||||||
|
return order
|
||||||
|
|
||||||
|
def place_limit_order(
|
||||||
|
self,
|
||||||
|
symbol: str,
|
||||||
|
side: str,
|
||||||
|
amount: float,
|
||||||
|
price: float,
|
||||||
|
reduce_only: bool = False
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Place a limit order.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading pair symbol
|
||||||
|
side: "buy" or "sell"
|
||||||
|
amount: Order amount in base currency
|
||||||
|
price: Limit price
|
||||||
|
reduce_only: If True, only reduce existing position
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Order result dictionary
|
||||||
|
"""
|
||||||
|
params = {
|
||||||
|
'tdMode': self.trading_config.margin_mode,
|
||||||
|
}
|
||||||
|
if reduce_only:
|
||||||
|
params['reduceOnly'] = True
|
||||||
|
|
||||||
|
order = self.exchange.create_limit_order(
|
||||||
|
symbol, side, amount, price, params=params
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"Limit {side.upper()} order placed: {amount} {symbol} "
|
||||||
|
f"@ {price}, order_id={order['id']}"
|
||||||
|
)
|
||||||
|
return order
|
||||||
|
|
||||||
|
def set_stop_loss_take_profit(
|
||||||
|
self,
|
||||||
|
symbol: str,
|
||||||
|
side: str,
|
||||||
|
amount: float,
|
||||||
|
stop_loss_price: float,
|
||||||
|
take_profit_price: float
|
||||||
|
) -> tuple:
|
||||||
|
"""
|
||||||
|
Set stop-loss and take-profit orders.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading pair symbol
|
||||||
|
side: Position side ("long" or "short")
|
||||||
|
amount: Position size
|
||||||
|
stop_loss_price: Stop-loss trigger price
|
||||||
|
take_profit_price: Take-profit trigger price
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (sl_order, tp_order)
|
||||||
|
"""
|
||||||
|
# For long position: SL sells, TP sells
|
||||||
|
# For short position: SL buys, TP buys
|
||||||
|
close_side = "sell" if side == "long" else "buy"
|
||||||
|
|
||||||
|
# Stop-loss order
|
||||||
|
sl_params = {
|
||||||
|
'tdMode': self.trading_config.margin_mode,
|
||||||
|
'reduceOnly': True,
|
||||||
|
'stopLossPrice': stop_loss_price,
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
sl_order = self.exchange.create_order(
|
||||||
|
symbol, 'market', close_side, amount,
|
||||||
|
params={
|
||||||
|
'tdMode': self.trading_config.margin_mode,
|
||||||
|
'reduceOnly': True,
|
||||||
|
'slTriggerPx': str(stop_loss_price),
|
||||||
|
'slOrdPx': '-1', # Market price
|
||||||
|
}
|
||||||
|
)
|
||||||
|
logger.info(f"Stop-loss set at {stop_loss_price}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not set stop-loss: {e}")
|
||||||
|
sl_order = None
|
||||||
|
|
||||||
|
# Take-profit order
|
||||||
|
try:
|
||||||
|
tp_order = self.exchange.create_order(
|
||||||
|
symbol, 'market', close_side, amount,
|
||||||
|
params={
|
||||||
|
'tdMode': self.trading_config.margin_mode,
|
||||||
|
'reduceOnly': True,
|
||||||
|
'tpTriggerPx': str(take_profit_price),
|
||||||
|
'tpOrdPx': '-1', # Market price
|
||||||
|
}
|
||||||
|
)
|
||||||
|
logger.info(f"Take-profit set at {take_profit_price}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not set take-profit: {e}")
|
||||||
|
tp_order = None
|
||||||
|
|
||||||
|
return sl_order, tp_order
|
||||||
|
|
||||||
|
def close_position(self, symbol: str) -> Optional[dict]:
|
||||||
|
"""
|
||||||
|
Close an open position.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading pair symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Order result or None if no position
|
||||||
|
"""
|
||||||
|
position = self.get_position(symbol)
|
||||||
|
if not position:
|
||||||
|
logger.info(f"No open position for {symbol}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
contracts = abs(float(position.get('contracts', 0)))
|
||||||
|
if contracts == 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
side = position.get('side', 'long')
|
||||||
|
close_side = "sell" if side == "long" else "buy"
|
||||||
|
|
||||||
|
order = self.place_market_order(
|
||||||
|
symbol, close_side, contracts, reduce_only=True
|
||||||
|
)
|
||||||
|
logger.info(f"Position closed for {symbol}")
|
||||||
|
return order
|
||||||
|
|
||||||
|
def get_ticker(self, symbol: str) -> dict:
|
||||||
|
"""
|
||||||
|
Get current ticker/price for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading pair symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Ticker dictionary with 'last', 'bid', 'ask' prices
|
||||||
|
"""
|
||||||
|
return self.exchange.fetch_ticker(symbol)
|
||||||
|
|
||||||
|
def get_funding_rate(self, symbol: str) -> float:
|
||||||
|
"""
|
||||||
|
Get current funding rate for a perpetual symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading pair symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Current funding rate as decimal
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
funding = self.exchange.fetch_funding_rate(symbol)
|
||||||
|
return float(funding.get('fundingRate', 0))
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not fetch funding rate: {e}")
|
||||||
|
return 0.0
|
||||||
369
live_trading/position_manager.py
Normal file
369
live_trading/position_manager.py
Normal file
@@ -0,0 +1,369 @@
|
|||||||
|
"""
|
||||||
|
Position Manager for Live Trading.
|
||||||
|
|
||||||
|
Tracks open positions, manages risk, and handles SL/TP logic.
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
from dataclasses import dataclass, field, asdict
|
||||||
|
|
||||||
|
from .okx_client import OKXClient
|
||||||
|
from .config import TradingConfig, PathConfig
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Position:
|
||||||
|
"""Represents an open trading position."""
|
||||||
|
trade_id: str
|
||||||
|
symbol: str
|
||||||
|
side: str # "long" or "short"
|
||||||
|
entry_price: float
|
||||||
|
entry_time: str # ISO format
|
||||||
|
size: float # Amount in base currency (e.g., ETH)
|
||||||
|
size_usdt: float # Notional value in USDT
|
||||||
|
stop_loss_price: float
|
||||||
|
take_profit_price: float
|
||||||
|
current_price: float = 0.0
|
||||||
|
unrealized_pnl: float = 0.0
|
||||||
|
unrealized_pnl_pct: float = 0.0
|
||||||
|
order_id: str = "" # Entry order ID from exchange
|
||||||
|
|
||||||
|
def update_pnl(self, current_price: float) -> None:
|
||||||
|
"""Update unrealized PnL based on current price."""
|
||||||
|
self.current_price = current_price
|
||||||
|
|
||||||
|
if self.side == "long":
|
||||||
|
self.unrealized_pnl = (current_price - self.entry_price) * self.size
|
||||||
|
self.unrealized_pnl_pct = (current_price / self.entry_price - 1) * 100
|
||||||
|
else: # short
|
||||||
|
self.unrealized_pnl = (self.entry_price - current_price) * self.size
|
||||||
|
self.unrealized_pnl_pct = (1 - current_price / self.entry_price) * 100
|
||||||
|
|
||||||
|
def should_stop_loss(self, current_price: float) -> bool:
|
||||||
|
"""Check if stop-loss should trigger."""
|
||||||
|
if self.side == "long":
|
||||||
|
return current_price <= self.stop_loss_price
|
||||||
|
return current_price >= self.stop_loss_price
|
||||||
|
|
||||||
|
def should_take_profit(self, current_price: float) -> bool:
|
||||||
|
"""Check if take-profit should trigger."""
|
||||||
|
if self.side == "long":
|
||||||
|
return current_price >= self.take_profit_price
|
||||||
|
return current_price <= self.take_profit_price
|
||||||
|
|
||||||
|
def to_dict(self) -> dict:
|
||||||
|
"""Convert to dictionary for JSON serialization."""
|
||||||
|
return asdict(self)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: dict) -> 'Position':
|
||||||
|
"""Create Position from dictionary."""
|
||||||
|
return cls(**data)
|
||||||
|
|
||||||
|
|
||||||
|
class PositionManager:
|
||||||
|
"""
|
||||||
|
Manages trading positions with persistence.
|
||||||
|
|
||||||
|
Tracks open positions, enforces risk limits, and handles
|
||||||
|
position lifecycle (open, update, close).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
okx_client: OKXClient,
|
||||||
|
trading_config: TradingConfig,
|
||||||
|
path_config: PathConfig
|
||||||
|
):
|
||||||
|
self.client = okx_client
|
||||||
|
self.config = trading_config
|
||||||
|
self.paths = path_config
|
||||||
|
self.positions: dict[str, Position] = {}
|
||||||
|
self.trade_log: list[dict] = []
|
||||||
|
self._load_positions()
|
||||||
|
|
||||||
|
def _load_positions(self) -> None:
|
||||||
|
"""Load positions from file."""
|
||||||
|
if self.paths.positions_file.exists():
|
||||||
|
try:
|
||||||
|
with open(self.paths.positions_file, 'r') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
for trade_id, pos_data in data.items():
|
||||||
|
self.positions[trade_id] = Position.from_dict(pos_data)
|
||||||
|
logger.info(f"Loaded {len(self.positions)} positions from file")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not load positions: {e}")
|
||||||
|
|
||||||
|
def save_positions(self) -> None:
|
||||||
|
"""Save positions to file."""
|
||||||
|
try:
|
||||||
|
data = {
|
||||||
|
trade_id: pos.to_dict()
|
||||||
|
for trade_id, pos in self.positions.items()
|
||||||
|
}
|
||||||
|
with open(self.paths.positions_file, 'w') as f:
|
||||||
|
json.dump(data, f, indent=2)
|
||||||
|
logger.debug(f"Saved {len(self.positions)} positions")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Could not save positions: {e}")
|
||||||
|
|
||||||
|
def can_open_position(self) -> bool:
|
||||||
|
"""Check if we can open a new position."""
|
||||||
|
return len(self.positions) < self.config.max_concurrent_positions
|
||||||
|
|
||||||
|
def get_position_for_symbol(self, symbol: str) -> Optional[Position]:
|
||||||
|
"""Get position for a specific symbol."""
|
||||||
|
for pos in self.positions.values():
|
||||||
|
if pos.symbol == symbol:
|
||||||
|
return pos
|
||||||
|
return None
|
||||||
|
|
||||||
|
def open_position(
|
||||||
|
self,
|
||||||
|
symbol: str,
|
||||||
|
side: str,
|
||||||
|
entry_price: float,
|
||||||
|
size: float,
|
||||||
|
stop_loss_price: float,
|
||||||
|
take_profit_price: float,
|
||||||
|
order_id: str = ""
|
||||||
|
) -> Optional[Position]:
|
||||||
|
"""
|
||||||
|
Open a new position.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading pair symbol
|
||||||
|
side: "long" or "short"
|
||||||
|
entry_price: Entry price
|
||||||
|
size: Position size in base currency
|
||||||
|
stop_loss_price: Stop-loss price
|
||||||
|
take_profit_price: Take-profit price
|
||||||
|
order_id: Entry order ID from exchange
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Position object or None if failed
|
||||||
|
"""
|
||||||
|
if not self.can_open_position():
|
||||||
|
logger.warning("Cannot open position: max concurrent positions reached")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Check if already have position for this symbol
|
||||||
|
existing = self.get_position_for_symbol(symbol)
|
||||||
|
if existing:
|
||||||
|
logger.warning(f"Already have position for {symbol}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Generate trade ID
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
trade_id = f"{symbol}_{now.strftime('%Y%m%d_%H%M%S')}"
|
||||||
|
|
||||||
|
position = Position(
|
||||||
|
trade_id=trade_id,
|
||||||
|
symbol=symbol,
|
||||||
|
side=side,
|
||||||
|
entry_price=entry_price,
|
||||||
|
entry_time=now.isoformat(),
|
||||||
|
size=size,
|
||||||
|
size_usdt=entry_price * size,
|
||||||
|
stop_loss_price=stop_loss_price,
|
||||||
|
take_profit_price=take_profit_price,
|
||||||
|
current_price=entry_price,
|
||||||
|
order_id=order_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.positions[trade_id] = position
|
||||||
|
self.save_positions()
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Opened {side.upper()} position: {size} {symbol} @ {entry_price}, "
|
||||||
|
f"SL={stop_loss_price}, TP={take_profit_price}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return position
|
||||||
|
|
||||||
|
def close_position(
|
||||||
|
self,
|
||||||
|
trade_id: str,
|
||||||
|
exit_price: float,
|
||||||
|
reason: str = "manual",
|
||||||
|
exit_order_id: str = ""
|
||||||
|
) -> Optional[dict]:
|
||||||
|
"""
|
||||||
|
Close a position and record the trade.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
trade_id: Position trade ID
|
||||||
|
exit_price: Exit price
|
||||||
|
reason: Reason for closing (e.g., "stop_loss", "take_profit", "signal")
|
||||||
|
exit_order_id: Exit order ID from exchange
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Trade record dictionary
|
||||||
|
"""
|
||||||
|
if trade_id not in self.positions:
|
||||||
|
logger.warning(f"Position {trade_id} not found")
|
||||||
|
return None
|
||||||
|
|
||||||
|
position = self.positions[trade_id]
|
||||||
|
position.update_pnl(exit_price)
|
||||||
|
|
||||||
|
# Calculate final PnL
|
||||||
|
entry_time = datetime.fromisoformat(position.entry_time)
|
||||||
|
exit_time = datetime.now(timezone.utc)
|
||||||
|
hold_duration = (exit_time - entry_time).total_seconds() / 3600 # hours
|
||||||
|
|
||||||
|
trade_record = {
|
||||||
|
'trade_id': trade_id,
|
||||||
|
'symbol': position.symbol,
|
||||||
|
'side': position.side,
|
||||||
|
'entry_price': position.entry_price,
|
||||||
|
'exit_price': exit_price,
|
||||||
|
'size': position.size,
|
||||||
|
'size_usdt': position.size_usdt,
|
||||||
|
'pnl_usd': position.unrealized_pnl,
|
||||||
|
'pnl_pct': position.unrealized_pnl_pct,
|
||||||
|
'entry_time': position.entry_time,
|
||||||
|
'exit_time': exit_time.isoformat(),
|
||||||
|
'hold_duration_hours': hold_duration,
|
||||||
|
'reason': reason,
|
||||||
|
'order_id_entry': position.order_id,
|
||||||
|
'order_id_exit': exit_order_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
self.trade_log.append(trade_record)
|
||||||
|
del self.positions[trade_id]
|
||||||
|
self.save_positions()
|
||||||
|
self._append_trade_log(trade_record)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Closed {position.side.upper()} position: {position.size} {position.symbol} "
|
||||||
|
f"@ {exit_price}, PnL=${position.unrealized_pnl:.2f} ({position.unrealized_pnl_pct:.2f}%), "
|
||||||
|
f"reason={reason}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return trade_record
|
||||||
|
|
||||||
|
def _append_trade_log(self, trade_record: dict) -> None:
|
||||||
|
"""Append trade record to CSV log file."""
|
||||||
|
import csv
|
||||||
|
|
||||||
|
file_exists = self.paths.trade_log_file.exists()
|
||||||
|
|
||||||
|
with open(self.paths.trade_log_file, 'a', newline='') as f:
|
||||||
|
writer = csv.DictWriter(f, fieldnames=trade_record.keys())
|
||||||
|
if not file_exists:
|
||||||
|
writer.writeheader()
|
||||||
|
writer.writerow(trade_record)
|
||||||
|
|
||||||
|
def update_positions(self, current_prices: dict[str, float]) -> list[dict]:
|
||||||
|
"""
|
||||||
|
Update all positions with current prices and check SL/TP.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
current_prices: Dictionary of symbol -> current price
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of closed trade records
|
||||||
|
"""
|
||||||
|
closed_trades = []
|
||||||
|
|
||||||
|
for trade_id in list(self.positions.keys()):
|
||||||
|
position = self.positions[trade_id]
|
||||||
|
|
||||||
|
if position.symbol not in current_prices:
|
||||||
|
continue
|
||||||
|
|
||||||
|
current_price = current_prices[position.symbol]
|
||||||
|
position.update_pnl(current_price)
|
||||||
|
|
||||||
|
# Check stop-loss
|
||||||
|
if position.should_stop_loss(current_price):
|
||||||
|
logger.warning(
|
||||||
|
f"Stop-loss triggered for {trade_id} at {current_price}"
|
||||||
|
)
|
||||||
|
# Close position on exchange
|
||||||
|
exit_order_id = ""
|
||||||
|
try:
|
||||||
|
exit_order = self.client.close_position(position.symbol)
|
||||||
|
exit_order_id = exit_order.get('id', '') if exit_order else ''
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to close position on exchange: {e}")
|
||||||
|
|
||||||
|
record = self.close_position(trade_id, current_price, "stop_loss", exit_order_id)
|
||||||
|
if record:
|
||||||
|
closed_trades.append(record)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check take-profit
|
||||||
|
if position.should_take_profit(current_price):
|
||||||
|
logger.info(
|
||||||
|
f"Take-profit triggered for {trade_id} at {current_price}"
|
||||||
|
)
|
||||||
|
# Close position on exchange
|
||||||
|
exit_order_id = ""
|
||||||
|
try:
|
||||||
|
exit_order = self.client.close_position(position.symbol)
|
||||||
|
exit_order_id = exit_order.get('id', '') if exit_order else ''
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to close position on exchange: {e}")
|
||||||
|
|
||||||
|
record = self.close_position(trade_id, current_price, "take_profit", exit_order_id)
|
||||||
|
if record:
|
||||||
|
closed_trades.append(record)
|
||||||
|
|
||||||
|
self.save_positions()
|
||||||
|
return closed_trades
|
||||||
|
|
||||||
|
def sync_with_exchange(self) -> None:
|
||||||
|
"""
|
||||||
|
Sync local positions with exchange positions.
|
||||||
|
|
||||||
|
Reconciles any discrepancies between local tracking
|
||||||
|
and actual exchange positions.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
exchange_positions = self.client.get_positions()
|
||||||
|
exchange_symbols = {p['symbol'] for p in exchange_positions}
|
||||||
|
|
||||||
|
# Check for positions we have locally but not on exchange
|
||||||
|
for trade_id in list(self.positions.keys()):
|
||||||
|
pos = self.positions[trade_id]
|
||||||
|
if pos.symbol not in exchange_symbols:
|
||||||
|
logger.warning(
|
||||||
|
f"Position {trade_id} not found on exchange, removing"
|
||||||
|
)
|
||||||
|
# Get last price and close
|
||||||
|
try:
|
||||||
|
ticker = self.client.get_ticker(pos.symbol)
|
||||||
|
exit_price = ticker['last']
|
||||||
|
except Exception:
|
||||||
|
exit_price = pos.current_price
|
||||||
|
|
||||||
|
self.close_position(trade_id, exit_price, "sync_removed")
|
||||||
|
|
||||||
|
logger.info(f"Position sync complete: {len(self.positions)} local positions")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Position sync failed: {e}")
|
||||||
|
|
||||||
|
def get_portfolio_summary(self) -> dict:
|
||||||
|
"""
|
||||||
|
Get portfolio summary.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with portfolio statistics
|
||||||
|
"""
|
||||||
|
total_exposure = sum(p.size_usdt for p in self.positions.values())
|
||||||
|
total_unrealized_pnl = sum(p.unrealized_pnl for p in self.positions.values())
|
||||||
|
|
||||||
|
return {
|
||||||
|
'open_positions': len(self.positions),
|
||||||
|
'total_exposure_usdt': total_exposure,
|
||||||
|
'total_unrealized_pnl': total_unrealized_pnl,
|
||||||
|
'positions': [p.to_dict() for p in self.positions.values()],
|
||||||
|
}
|
||||||
@@ -15,6 +15,10 @@ dependencies = [
|
|||||||
"plotly>=5.24.0",
|
"plotly>=5.24.0",
|
||||||
"requests>=2.32.5",
|
"requests>=2.32.5",
|
||||||
"python-dotenv>=1.2.1",
|
"python-dotenv>=1.2.1",
|
||||||
|
# API dependencies
|
||||||
|
"fastapi>=0.115.0",
|
||||||
|
"uvicorn[standard]>=0.34.0",
|
||||||
|
"sqlalchemy>=2.0.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
|
|||||||
@@ -1,384 +1,342 @@
|
|||||||
|
"""
|
||||||
|
Regime Detection Research Script with Walk-Forward Training.
|
||||||
|
|
||||||
|
Tests multiple holding horizons to find optimal parameters
|
||||||
|
without look-ahead bias.
|
||||||
|
"""
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
# Add project root to path
|
|
||||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import ta
|
import ta
|
||||||
from sklearn.ensemble import RandomForestClassifier
|
from sklearn.ensemble import RandomForestClassifier
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.metrics import classification_report, f1_score
|
||||||
from sklearn.metrics import classification_report, confusion_matrix
|
|
||||||
import plotly.graph_objects as go
|
|
||||||
from plotly.subplots import make_subplots
|
|
||||||
|
|
||||||
from engine.data_manager import DataManager
|
from engine.data_manager import DataManager
|
||||||
from engine.market import MarketType
|
from engine.market import MarketType
|
||||||
|
from engine.logging_config import get_logger
|
||||||
|
|
||||||
def prepare_data(symbol_a="BTC-USDT", symbol_b="ETH-USDT", timeframe="1h", limit=None, start_date=None, end_date=None):
|
logger = get_logger(__name__)
|
||||||
"""
|
|
||||||
Load and align data for two assets to create a pair.
|
# Configuration
|
||||||
"""
|
TRAIN_RATIO = 0.7 # 70% train, 30% test
|
||||||
|
PROFIT_THRESHOLD = 0.005 # 0.5% profit target
|
||||||
|
Z_WINDOW = 24
|
||||||
|
FEE_RATE = 0.001 # 0.1% round-trip fee
|
||||||
|
|
||||||
|
|
||||||
|
def load_data():
|
||||||
|
"""Load and align BTC/ETH data."""
|
||||||
dm = DataManager()
|
dm = DataManager()
|
||||||
|
|
||||||
print(f"Loading data for {symbol_a} and {symbol_b}...")
|
df_btc = dm.load_data("okx", "BTC-USDT", "1h", MarketType.SPOT)
|
||||||
|
df_eth = dm.load_data("okx", "ETH-USDT", "1h", MarketType.SPOT)
|
||||||
|
|
||||||
# Helper to load or download
|
# Filter to Oct-Dec 2025
|
||||||
def get_df(symbol):
|
start = pd.Timestamp("2025-10-01", tz="UTC")
|
||||||
|
end = pd.Timestamp("2025-12-31", tz="UTC")
|
||||||
|
|
||||||
|
df_btc = df_btc[(df_btc.index >= start) & (df_btc.index <= end)]
|
||||||
|
df_eth = df_eth[(df_eth.index >= start) & (df_eth.index <= end)]
|
||||||
|
|
||||||
|
# Align indices
|
||||||
|
common = df_btc.index.intersection(df_eth.index)
|
||||||
|
df_btc = df_btc.loc[common]
|
||||||
|
df_eth = df_eth.loc[common]
|
||||||
|
|
||||||
|
logger.info(f"Loaded {len(common)} aligned hourly bars")
|
||||||
|
return df_btc, df_eth
|
||||||
|
|
||||||
|
|
||||||
|
def load_cryptoquant_data():
|
||||||
|
"""Load CryptoQuant on-chain data if available."""
|
||||||
try:
|
try:
|
||||||
# Try load first
|
cq_path = "data/cq_training_data.csv"
|
||||||
df = dm.load_data("okx", symbol, timeframe, MarketType.SPOT)
|
cq_df = pd.read_csv(cq_path, index_col='timestamp', parse_dates=True)
|
||||||
except Exception:
|
if cq_df.index.tz is None:
|
||||||
df = dm.download_data("okx", symbol, timeframe, market_type=MarketType.SPOT)
|
cq_df.index = cq_df.index.tz_localize('UTC')
|
||||||
|
logger.info(f"Loaded CryptoQuant data: {len(cq_df)} rows")
|
||||||
# If we have start/end dates, ensure we have enough data or re-download
|
return cq_df
|
||||||
if start_date:
|
except Exception as e:
|
||||||
mask_start = pd.Timestamp(start_date, tz='UTC')
|
logger.warning(f"CryptoQuant data not available: {e}")
|
||||||
if df.index.min() > mask_start:
|
|
||||||
print(f"Local data starts {df.index.min()}, need {mask_start}. Downloading...")
|
|
||||||
df = dm.download_data("okx", symbol, timeframe, start_date=start_date, end_date=end_date, market_type=MarketType.SPOT)
|
|
||||||
return df
|
|
||||||
|
|
||||||
df_a = get_df(symbol_a)
|
|
||||||
df_b = get_df(symbol_b)
|
|
||||||
|
|
||||||
# Filter by date if provided (to match CQ data range)
|
|
||||||
if start_date:
|
|
||||||
df_a = df_a[df_a.index >= pd.Timestamp(start_date, tz='UTC')]
|
|
||||||
df_b = df_b[df_b.index >= pd.Timestamp(start_date, tz='UTC')]
|
|
||||||
|
|
||||||
if end_date:
|
|
||||||
df_a = df_a[df_a.index <= pd.Timestamp(end_date, tz='UTC')]
|
|
||||||
df_b = df_b[df_b.index <= pd.Timestamp(end_date, tz='UTC')]
|
|
||||||
|
|
||||||
# Align DataFrames
|
|
||||||
print("Aligning data...")
|
|
||||||
common_index = df_a.index.intersection(df_b.index)
|
|
||||||
df_a = df_a.loc[common_index].copy()
|
|
||||||
df_b = df_b.loc[common_index].copy()
|
|
||||||
|
|
||||||
if limit:
|
|
||||||
df_a = df_a.tail(limit)
|
|
||||||
df_b = df_b.tail(limit)
|
|
||||||
|
|
||||||
return df_a, df_b
|
|
||||||
|
|
||||||
def load_cryptoquant_data(file_path: str) -> pd.DataFrame | None:
|
|
||||||
"""
|
|
||||||
Load CryptoQuant data and prepare it for merging.
|
|
||||||
"""
|
|
||||||
if not os.path.exists(file_path):
|
|
||||||
print(f"Warning: CQ data file {file_path} not found.")
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
print(f"Loading CryptoQuant data from {file_path}...")
|
|
||||||
df = pd.read_csv(file_path, index_col='timestamp', parse_dates=True)
|
|
||||||
|
|
||||||
# CQ data is usually daily (UTC 00:00).
|
def calculate_features(df_btc, df_eth, cq_df=None):
|
||||||
# Ensure index is timezone aware to match market data
|
"""Calculate all features for the model."""
|
||||||
if df.index.tz is None:
|
spread = df_eth['close'] / df_btc['close']
|
||||||
df.index = df.index.tz_localize('UTC')
|
|
||||||
|
|
||||||
return df
|
# Z-Score
|
||||||
|
rolling_mean = spread.rolling(window=Z_WINDOW).mean()
|
||||||
def calculate_features(df_a, df_b, cq_df=None, window=24):
|
rolling_std = spread.rolling(window=Z_WINDOW).std()
|
||||||
"""
|
|
||||||
Calculate spread, z-score, and advanced regime features including CQ data.
|
|
||||||
"""
|
|
||||||
# 1. Price Ratio (Spread)
|
|
||||||
spread = df_b['close'] / df_a['close']
|
|
||||||
|
|
||||||
# 2. Rolling Statistics for Z-Score
|
|
||||||
rolling_mean = spread.rolling(window=window).mean()
|
|
||||||
rolling_std = spread.rolling(window=window).std()
|
|
||||||
z_score = (spread - rolling_mean) / rolling_std
|
z_score = (spread - rolling_mean) / rolling_std
|
||||||
|
|
||||||
# 3. Spread Momentum / Technicals
|
# Technicals
|
||||||
spread_rsi = ta.momentum.RSIIndicator(spread, window=14).rsi()
|
spread_rsi = ta.momentum.RSIIndicator(spread, window=14).rsi()
|
||||||
spread_roc = spread.pct_change(periods=5) * 100
|
spread_roc = spread.pct_change(periods=5) * 100
|
||||||
|
spread_change_1h = spread.pct_change(periods=1)
|
||||||
|
|
||||||
# 4. Volume Dynamics
|
# Volume
|
||||||
vol_ratio = df_b['volume'] / df_a['volume']
|
vol_ratio = df_eth['volume'] / df_btc['volume']
|
||||||
vol_ratio_ma = vol_ratio.rolling(window=12).mean()
|
vol_ratio_ma = vol_ratio.rolling(window=12).mean()
|
||||||
|
|
||||||
# 5. Volatility Regime
|
# Volatility
|
||||||
ret_a = df_a['close'].pct_change()
|
ret_btc = df_btc['close'].pct_change()
|
||||||
ret_b = df_b['close'].pct_change()
|
ret_eth = df_eth['close'].pct_change()
|
||||||
vol_a = ret_a.rolling(window=window).std()
|
vol_btc = ret_btc.rolling(window=Z_WINDOW).std()
|
||||||
vol_b = ret_b.rolling(window=window).std()
|
vol_eth = ret_eth.rolling(window=Z_WINDOW).std()
|
||||||
vol_spread_ratio = vol_b / vol_a
|
vol_spread_ratio = vol_eth / vol_btc
|
||||||
|
|
||||||
# Create feature DataFrame
|
|
||||||
features = pd.DataFrame(index=spread.index)
|
features = pd.DataFrame(index=spread.index)
|
||||||
features['spread'] = spread
|
features['spread'] = spread
|
||||||
features['z_score'] = z_score
|
features['z_score'] = z_score
|
||||||
features['spread_rsi'] = spread_rsi
|
features['spread_rsi'] = spread_rsi
|
||||||
features['spread_roc'] = spread_roc
|
features['spread_roc'] = spread_roc
|
||||||
|
features['spread_change_1h'] = spread_change_1h
|
||||||
features['vol_ratio'] = vol_ratio
|
features['vol_ratio'] = vol_ratio
|
||||||
features['vol_ratio_rel'] = vol_ratio / vol_ratio_ma
|
features['vol_ratio_rel'] = vol_ratio / vol_ratio_ma
|
||||||
features['vol_diff_ratio'] = vol_spread_ratio
|
features['vol_diff_ratio'] = vol_spread_ratio
|
||||||
|
|
||||||
# 6. Merge CryptoQuant Data
|
# Add CQ features if available
|
||||||
if cq_df is not None:
|
if cq_df is not None:
|
||||||
print("Merging CryptoQuant features...")
|
|
||||||
# Forward fill daily data to hourly timestamps
|
|
||||||
# reindex features to match cq_df range or join
|
|
||||||
|
|
||||||
# Resample CQ to hourly (ffill)
|
|
||||||
# But easier: join features with cq_df using asof or reindex
|
|
||||||
cq_aligned = cq_df.reindex(features.index, method='ffill')
|
cq_aligned = cq_df.reindex(features.index, method='ffill')
|
||||||
|
|
||||||
# Add derived CQ features
|
|
||||||
# Funding Diff: If ETH funding > BTC funding => ETH overheated
|
|
||||||
if 'btc_funding' in cq_aligned.columns and 'eth_funding' in cq_aligned.columns:
|
if 'btc_funding' in cq_aligned.columns and 'eth_funding' in cq_aligned.columns:
|
||||||
cq_aligned['funding_diff'] = cq_aligned['eth_funding'] - cq_aligned['btc_funding']
|
cq_aligned['funding_diff'] = cq_aligned['eth_funding'] - cq_aligned['btc_funding']
|
||||||
|
|
||||||
# Inflow Ratio: If ETH inflow >> BTC inflow => ETH dump incoming?
|
|
||||||
if 'btc_inflow' in cq_aligned.columns and 'eth_inflow' in cq_aligned.columns:
|
if 'btc_inflow' in cq_aligned.columns and 'eth_inflow' in cq_aligned.columns:
|
||||||
# Add small epsilon to avoid div by zero
|
|
||||||
cq_aligned['inflow_ratio'] = cq_aligned['eth_inflow'] / (cq_aligned['btc_inflow'] + 1)
|
cq_aligned['inflow_ratio'] = cq_aligned['eth_inflow'] / (cq_aligned['btc_inflow'] + 1)
|
||||||
|
|
||||||
features = features.join(cq_aligned)
|
features = features.join(cq_aligned)
|
||||||
|
|
||||||
# --- Refined Target Definition (Anytime Profit) ---
|
|
||||||
horizon = 6
|
|
||||||
threshold = 0.005 # 0.5% profit target
|
|
||||||
z_threshold = 1.0
|
|
||||||
|
|
||||||
# For Short Spread (Z > 1): Did it drop below target?
|
|
||||||
# We look for the MINIMUM spread in the next 'horizon' periods
|
|
||||||
future_min = features['spread'].rolling(window=horizon).min().shift(-horizon)
|
|
||||||
target_short = features['spread'] * (1 - threshold)
|
|
||||||
success_short = (features['z_score'] > z_threshold) & (future_min < target_short)
|
|
||||||
|
|
||||||
# For Long Spread (Z < -1): Did it rise above target?
|
|
||||||
# We look for the MAXIMUM spread in the next 'horizon' periods
|
|
||||||
future_max = features['spread'].rolling(window=horizon).max().shift(-horizon)
|
|
||||||
target_long = features['spread'] * (1 + threshold)
|
|
||||||
success_long = (features['z_score'] < -z_threshold) & (future_max > target_long)
|
|
||||||
|
|
||||||
conditions = [success_short, success_long]
|
|
||||||
|
|
||||||
features['target'] = np.select(conditions, [1, 1], default=0)
|
|
||||||
|
|
||||||
return features.dropna()
|
return features.dropna()
|
||||||
|
|
||||||
def train_regime_model(features):
|
|
||||||
"""
|
|
||||||
Train a Random Forest to predict mean reversion success.
|
|
||||||
"""
|
|
||||||
# Define excluded columns (targets, raw prices, intermediates)
|
|
||||||
exclude_cols = ['spread', 'horizon_ret', 'target', 'rolling_mean', 'rolling_std']
|
|
||||||
|
|
||||||
# Auto-select all other numeric columns as features
|
def calculate_targets(features, horizon):
|
||||||
feature_cols = [c for c in features.columns if c not in exclude_cols]
|
"""Calculate target labels for a given horizon."""
|
||||||
|
spread = features['spread']
|
||||||
|
z_score = features['z_score']
|
||||||
|
|
||||||
# Handle NaN/Inf if any slipped through
|
# For Short (Z > 1): Did spread drop below target?
|
||||||
X = features[feature_cols].replace([np.inf, -np.inf], np.nan).fillna(0)
|
future_min = spread.rolling(window=horizon).min().shift(-horizon)
|
||||||
y = features['target']
|
target_short = spread * (1 - PROFIT_THRESHOLD)
|
||||||
|
success_short = (z_score > 1.0) & (future_min < target_short)
|
||||||
|
|
||||||
# Split Data
|
# For Long (Z < -1): Did spread rise above target?
|
||||||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=False)
|
future_max = spread.rolling(window=horizon).max().shift(-horizon)
|
||||||
|
target_long = spread * (1 + PROFIT_THRESHOLD)
|
||||||
|
success_long = (z_score < -1.0) & (future_max > target_long)
|
||||||
|
|
||||||
print(f"\nTraining on {len(X_train)} samples, Testing on {len(X_test)} samples...")
|
targets = np.select([success_short, success_long], [1, 1], default=0)
|
||||||
print(f"Features used: {feature_cols}")
|
|
||||||
print(f"Class Balance (Target=1): {y.mean():.2%}")
|
|
||||||
|
|
||||||
# Model
|
# Create valid mask (rows with complete future data)
|
||||||
|
valid_mask = future_min.notna() & future_max.notna()
|
||||||
|
|
||||||
|
return targets, valid_mask, future_min, future_max
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_mae(features, predictions, test_idx, horizon):
|
||||||
|
"""Calculate Maximum Adverse Excursion for predicted trades."""
|
||||||
|
test_features = features.loc[test_idx]
|
||||||
|
spread = test_features['spread']
|
||||||
|
z_score = test_features['z_score']
|
||||||
|
|
||||||
|
mae_values = []
|
||||||
|
|
||||||
|
for i, (idx, pred) in enumerate(zip(test_idx, predictions)):
|
||||||
|
if pred != 1:
|
||||||
|
continue
|
||||||
|
|
||||||
|
entry_spread = spread.loc[idx]
|
||||||
|
z = z_score.loc[idx]
|
||||||
|
|
||||||
|
# Get future spread values
|
||||||
|
future_idx = features.index.get_loc(idx)
|
||||||
|
future_end = min(future_idx + horizon, len(features))
|
||||||
|
future_spreads = features['spread'].iloc[future_idx:future_end]
|
||||||
|
|
||||||
|
if len(future_spreads) < 2:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if z > 1.0: # Short trade
|
||||||
|
max_adverse = (future_spreads.max() - entry_spread) / entry_spread
|
||||||
|
else: # Long trade
|
||||||
|
max_adverse = (entry_spread - future_spreads.min()) / entry_spread
|
||||||
|
|
||||||
|
mae_values.append(max_adverse * 100) # As percentage
|
||||||
|
|
||||||
|
return np.mean(mae_values) if mae_values else 0.0
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_net_profit(features, predictions, test_idx, horizon):
|
||||||
|
"""Calculate estimated net profit including fees."""
|
||||||
|
test_features = features.loc[test_idx]
|
||||||
|
spread = test_features['spread']
|
||||||
|
z_score = test_features['z_score']
|
||||||
|
|
||||||
|
total_pnl = 0.0
|
||||||
|
n_trades = 0
|
||||||
|
|
||||||
|
for i, (idx, pred) in enumerate(zip(test_idx, predictions)):
|
||||||
|
if pred != 1:
|
||||||
|
continue
|
||||||
|
|
||||||
|
entry_spread = spread.loc[idx]
|
||||||
|
z = z_score.loc[idx]
|
||||||
|
|
||||||
|
# Get future spread values
|
||||||
|
future_idx = features.index.get_loc(idx)
|
||||||
|
future_end = min(future_idx + horizon, len(features))
|
||||||
|
future_spreads = features['spread'].iloc[future_idx:future_end]
|
||||||
|
|
||||||
|
if len(future_spreads) < 2:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Calculate PnL based on direction
|
||||||
|
if z > 1.0: # Short trade - profit if spread drops
|
||||||
|
exit_spread = future_spreads.iloc[-1] # Exit at horizon
|
||||||
|
pnl = (entry_spread - exit_spread) / entry_spread
|
||||||
|
else: # Long trade - profit if spread rises
|
||||||
|
exit_spread = future_spreads.iloc[-1]
|
||||||
|
pnl = (exit_spread - entry_spread) / entry_spread
|
||||||
|
|
||||||
|
# Subtract fees
|
||||||
|
net_pnl = pnl - FEE_RATE
|
||||||
|
total_pnl += net_pnl
|
||||||
|
n_trades += 1
|
||||||
|
|
||||||
|
return total_pnl, n_trades
|
||||||
|
|
||||||
|
|
||||||
|
def test_horizon(features, horizon):
|
||||||
|
"""Test a single horizon with walk-forward training."""
|
||||||
|
# Calculate targets
|
||||||
|
targets, valid_mask, _, _ = calculate_targets(features, horizon)
|
||||||
|
|
||||||
|
# Walk-forward split
|
||||||
|
n_samples = len(features)
|
||||||
|
train_size = int(n_samples * TRAIN_RATIO)
|
||||||
|
|
||||||
|
train_features = features.iloc[:train_size]
|
||||||
|
test_features = features.iloc[train_size:]
|
||||||
|
|
||||||
|
train_targets = targets[:train_size]
|
||||||
|
test_targets = targets[train_size:]
|
||||||
|
|
||||||
|
train_valid = valid_mask.iloc[:train_size]
|
||||||
|
test_valid = valid_mask.iloc[train_size:]
|
||||||
|
|
||||||
|
# Prepare training data (only valid rows)
|
||||||
|
exclude = ['spread']
|
||||||
|
cols = [c for c in features.columns if c not in exclude]
|
||||||
|
|
||||||
|
X_train = train_features[cols].fillna(0).replace([np.inf, -np.inf], 0)
|
||||||
|
X_train_valid = X_train[train_valid]
|
||||||
|
y_train_valid = train_targets[train_valid]
|
||||||
|
|
||||||
|
if len(X_train_valid) < 50:
|
||||||
|
return None # Not enough training data
|
||||||
|
|
||||||
|
# Train model
|
||||||
model = RandomForestClassifier(
|
model = RandomForestClassifier(
|
||||||
n_estimators=200,
|
n_estimators=300, max_depth=5, min_samples_leaf=30,
|
||||||
max_depth=6,
|
class_weight={0: 1, 1: 3}, random_state=42
|
||||||
min_samples_leaf=20,
|
|
||||||
class_weight='balanced_subsample',
|
|
||||||
random_state=42
|
|
||||||
)
|
)
|
||||||
model.fit(X_train, y_train)
|
model.fit(X_train_valid, y_train_valid)
|
||||||
|
|
||||||
# Evaluation
|
# Predict on test set
|
||||||
y_pred = model.predict(X_test)
|
X_test = test_features[cols].fillna(0).replace([np.inf, -np.inf], 0)
|
||||||
y_prob = model.predict_proba(X_test)[:, 1]
|
predictions = model.predict(X_test)
|
||||||
|
|
||||||
print("\n--- Model Evaluation ---")
|
# Only evaluate on valid test rows (those with complete future data)
|
||||||
print(classification_report(y_test, y_pred))
|
test_valid_mask = test_valid.values
|
||||||
|
y_test_valid = test_targets[test_valid_mask]
|
||||||
|
pred_valid = predictions[test_valid_mask]
|
||||||
|
|
||||||
# Feature Importance
|
if len(y_test_valid) < 10:
|
||||||
importances = pd.Series(model.feature_importances_, index=feature_cols).sort_values(ascending=False)
|
return None
|
||||||
print("\n--- Feature Importance ---")
|
|
||||||
print(importances)
|
|
||||||
|
|
||||||
return model, X_test, y_test, y_pred, y_prob
|
# Calculate metrics
|
||||||
|
f1 = f1_score(y_test_valid, pred_valid, zero_division=0)
|
||||||
|
|
||||||
def plot_interactive_results(features, y_test, y_pred, y_prob):
|
# Calculate MAE and Net Profit on ALL test predictions (not just valid targets)
|
||||||
"""
|
test_idx = test_features.index
|
||||||
Create an interactive HTML plot using Plotly.
|
avg_mae = calculate_mae(features, predictions, test_idx, horizon)
|
||||||
"""
|
net_pnl, n_trades = calculate_net_profit(features, predictions, test_idx, horizon)
|
||||||
print("\nGenerating interactive plot...")
|
|
||||||
|
|
||||||
test_idx = y_test.index
|
return {
|
||||||
test_data = features.loc[test_idx].copy()
|
'horizon': horizon,
|
||||||
test_data['prob'] = y_prob
|
'f1_score': f1,
|
||||||
test_data['prediction'] = y_pred
|
'avg_mae': avg_mae,
|
||||||
test_data['actual'] = y_test
|
'net_pnl': net_pnl,
|
||||||
|
'n_trades': n_trades,
|
||||||
|
'train_samples': len(X_train_valid),
|
||||||
|
'test_samples': len(X_test)
|
||||||
|
}
|
||||||
|
|
||||||
# Create Subplots
|
|
||||||
fig = make_subplots(
|
|
||||||
rows=3, cols=1,
|
|
||||||
shared_xaxes=True,
|
|
||||||
vertical_spacing=0.05,
|
|
||||||
row_heights=[0.5, 0.25, 0.25],
|
|
||||||
subplot_titles=('Spread & Signals', 'Exchange Inflows', 'Z-Score & Probability')
|
|
||||||
)
|
|
||||||
|
|
||||||
# Top: Spread
|
def test_horizons(features, horizons):
|
||||||
fig.add_trace(
|
"""Test multiple horizons and return comparison."""
|
||||||
go.Scatter(x=test_data.index, y=test_data['spread'], mode='lines', name='Spread', line=dict(color='gray')),
|
results = []
|
||||||
row=1, col=1
|
|
||||||
)
|
|
||||||
|
|
||||||
# Signals
|
print("\n" + "=" * 80)
|
||||||
# Separate Long and Short signals for clarity
|
print("WALK-FORWARD HORIZON OPTIMIZATION")
|
||||||
# Logic: If Z-Score was High (>1), we were betting on a SHORT Spread (Reversion Down)
|
print(f"Train Ratio: {TRAIN_RATIO*100:.0f}% | Profit Target: {PROFIT_THRESHOLD*100:.1f}% | Fee Rate: {FEE_RATE*100:.2f}%")
|
||||||
# If Z-Score was Low (< -1), we were betting on a LONG Spread (Reversion Up)
|
print("=" * 80)
|
||||||
|
|
||||||
# Correct Short Signals (Green Triangle Down)
|
for h in horizons:
|
||||||
tp_short = test_data[(test_data['prediction'] == 1) & (test_data['actual'] == 1) & (test_data['z_score'] > 0)]
|
result = test_horizon(features, h)
|
||||||
fig.add_trace(
|
if result:
|
||||||
go.Scatter(x=tp_short.index, y=tp_short['spread'], mode='markers', name='Win: Short Spread',
|
results.append(result)
|
||||||
marker=dict(symbol='triangle-down', size=12, color='green')),
|
print(f"Horizon {h:3d}h: F1={result['f1_score']:.3f}, "
|
||||||
row=1, col=1
|
f"MAE={result['avg_mae']:.2f}%, "
|
||||||
)
|
f"Net PnL={result['net_pnl']*100:.2f}%, "
|
||||||
|
f"Trades={result['n_trades']}")
|
||||||
|
|
||||||
# Correct Long Signals (Green Triangle Up)
|
return results
|
||||||
tp_long = test_data[(test_data['prediction'] == 1) & (test_data['actual'] == 1) & (test_data['z_score'] < 0)]
|
|
||||||
fig.add_trace(
|
|
||||||
go.Scatter(x=tp_long.index, y=tp_long['spread'], mode='markers', name='Win: Long Spread',
|
|
||||||
marker=dict(symbol='triangle-up', size=12, color='green')),
|
|
||||||
row=1, col=1
|
|
||||||
)
|
|
||||||
|
|
||||||
# False Short Signals (Red Triangle Down)
|
|
||||||
fp_short = test_data[(test_data['prediction'] == 1) & (test_data['actual'] == 0) & (test_data['z_score'] > 0)]
|
|
||||||
fig.add_trace(
|
|
||||||
go.Scatter(x=fp_short.index, y=fp_short['spread'], mode='markers', name='Loss: Short Spread',
|
|
||||||
marker=dict(symbol='triangle-down', size=10, color='red')),
|
|
||||||
row=1, col=1
|
|
||||||
)
|
|
||||||
|
|
||||||
# False Long Signals (Red Triangle Up)
|
|
||||||
fp_long = test_data[(test_data['prediction'] == 1) & (test_data['actual'] == 0) & (test_data['z_score'] < 0)]
|
|
||||||
fig.add_trace(
|
|
||||||
go.Scatter(x=fp_long.index, y=fp_long['spread'], mode='markers', name='Loss: Long Spread',
|
|
||||||
marker=dict(symbol='triangle-up', size=10, color='red')),
|
|
||||||
row=1, col=1
|
|
||||||
)
|
|
||||||
|
|
||||||
# Middle: Inflows (BTC vs ETH)
|
|
||||||
if 'btc_inflow' in test_data.columns:
|
|
||||||
fig.add_trace(
|
|
||||||
go.Bar(x=test_data.index, y=test_data['btc_inflow'], name='BTC Inflow', marker_color='orange', opacity=0.6),
|
|
||||||
row=2, col=1
|
|
||||||
)
|
|
||||||
if 'eth_inflow' in test_data.columns:
|
|
||||||
fig.add_trace(
|
|
||||||
go.Bar(x=test_data.index, y=test_data['eth_inflow'], name='ETH Inflow', marker_color='purple', opacity=0.6),
|
|
||||||
row=2, col=1
|
|
||||||
)
|
|
||||||
|
|
||||||
# Bottom: Z-Score
|
|
||||||
fig.add_trace(
|
|
||||||
go.Scatter(x=test_data.index, y=test_data['z_score'], mode='lines', name='Z-Score', line=dict(color='blue'), opacity=0.5),
|
|
||||||
row=3, col=1
|
|
||||||
)
|
|
||||||
fig.add_hline(y=2, line_dash="dash", line_color="red", row=3, col=1)
|
|
||||||
fig.add_hline(y=-2, line_dash="dash", line_color="green", row=3, col=1)
|
|
||||||
|
|
||||||
# Probability (Secondary Y for Row 3)
|
|
||||||
fig.add_trace(
|
|
||||||
go.Scatter(x=test_data.index, y=test_data['prob'], mode='lines', name='Prob', line=dict(color='cyan', width=1.5), yaxis='y4'),
|
|
||||||
row=3, col=1
|
|
||||||
)
|
|
||||||
|
|
||||||
fig.update_layout(
|
|
||||||
title='Regime Detection Analysis (with CryptoQuant)',
|
|
||||||
autosize=True,
|
|
||||||
height=None,
|
|
||||||
hovermode='x unified',
|
|
||||||
yaxis4=dict(title='Probability', overlaying='y3', side='right', range=[0, 1], showgrid=False),
|
|
||||||
template="plotly_dark",
|
|
||||||
margin=dict(l=10, r=10, t=40, b=10),
|
|
||||||
barmode='group'
|
|
||||||
)
|
|
||||||
|
|
||||||
# Update all x-axes to ensure spikes are visible everywhere
|
|
||||||
fig.update_xaxes(
|
|
||||||
showspikes=True,
|
|
||||||
spikemode='across',
|
|
||||||
spikesnap='cursor',
|
|
||||||
showline=False,
|
|
||||||
showgrid=True,
|
|
||||||
spikedash='dot',
|
|
||||||
spikecolor='white', # Make it bright to see
|
|
||||||
spikethickness=1,
|
|
||||||
)
|
|
||||||
|
|
||||||
fig.update_layout(
|
|
||||||
title='Regime Detection Analysis (with CryptoQuant)',
|
|
||||||
autosize=True,
|
|
||||||
height=None,
|
|
||||||
hovermode='x unified', # Keep unified hover for data reading
|
|
||||||
yaxis4=dict(title='Probability', overlaying='y3', side='right', range=[0, 1], showgrid=False),
|
|
||||||
template="plotly_dark",
|
|
||||||
margin=dict(l=10, r=10, t=40, b=10),
|
|
||||||
barmode='group'
|
|
||||||
)
|
|
||||||
|
|
||||||
output_path = "research/regime_results.html"
|
|
||||||
fig.write_html(
|
|
||||||
output_path,
|
|
||||||
config={'responsive': True, 'scrollZoom': True},
|
|
||||||
include_plotlyjs='cdn',
|
|
||||||
full_html=True,
|
|
||||||
default_height='100vh',
|
|
||||||
default_width='100%'
|
|
||||||
)
|
|
||||||
print(f"Interactive plot saved to {output_path}")
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
# 1. Load CQ Data first to determine valid date range
|
"""Main research function."""
|
||||||
cq_path = "data/cq_training_data.csv"
|
# Load data
|
||||||
cq_df = load_cryptoquant_data(cq_path)
|
df_btc, df_eth = load_data()
|
||||||
|
cq_df = load_cryptoquant_data()
|
||||||
|
|
||||||
start_date = None
|
# Calculate features
|
||||||
end_date = None
|
features = calculate_features(df_btc, df_eth, cq_df)
|
||||||
|
logger.info(f"Calculated {len(features)} feature rows with {len(features.columns)} columns")
|
||||||
|
|
||||||
if cq_df is not None and not cq_df.empty:
|
# Test horizons from 6h to 150h
|
||||||
start_date = cq_df.index.min().strftime('%Y-%m-%d')
|
horizons = list(range(6, 151, 6)) # 6, 12, 18, ..., 150
|
||||||
end_date = cq_df.index.max().strftime('%Y-%m-%d')
|
|
||||||
print(f"CryptoQuant Data Range: {start_date} to {end_date}")
|
|
||||||
|
|
||||||
# 2. Get Market Data (Aligned to CQ range)
|
results = test_horizons(features, horizons)
|
||||||
df_btc, df_eth = prepare_data(
|
|
||||||
"BTC-USDT", "ETH-USDT",
|
|
||||||
timeframe="1h",
|
|
||||||
start_date=start_date,
|
|
||||||
end_date=end_date
|
|
||||||
)
|
|
||||||
|
|
||||||
# 3. Calculate Features
|
if not results:
|
||||||
print("Calculating advanced regime features...")
|
print("No valid results!")
|
||||||
data = calculate_features(df_btc, df_eth, cq_df=cq_df, window=24)
|
|
||||||
|
|
||||||
if data.empty:
|
|
||||||
print("Error: No overlapping data found between Price and CryptoQuant data.")
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# 4. Train & Evaluate
|
# Find best by different metrics
|
||||||
model, X_test, y_test, y_pred, y_prob = train_regime_model(data)
|
results_df = pd.DataFrame(results)
|
||||||
|
|
||||||
|
print("\n" + "=" * 80)
|
||||||
|
print("BEST HORIZONS BY METRIC")
|
||||||
|
print("=" * 80)
|
||||||
|
|
||||||
|
best_f1 = results_df.loc[results_df['f1_score'].idxmax()]
|
||||||
|
print(f"Best F1 Score: {best_f1['horizon']:.0f}h (F1={best_f1['f1_score']:.3f})")
|
||||||
|
|
||||||
|
best_pnl = results_df.loc[results_df['net_pnl'].idxmax()]
|
||||||
|
print(f"Best Net PnL: {best_pnl['horizon']:.0f}h (PnL={best_pnl['net_pnl']*100:.2f}%)")
|
||||||
|
|
||||||
|
lowest_mae = results_df.loc[results_df['avg_mae'].idxmin()]
|
||||||
|
print(f"Lowest MAE: {lowest_mae['horizon']:.0f}h (MAE={lowest_mae['avg_mae']:.2f}%)")
|
||||||
|
|
||||||
|
# Save results
|
||||||
|
output_path = "research/horizon_optimization_results.csv"
|
||||||
|
results_df.to_csv(output_path, index=False)
|
||||||
|
print(f"\nResults saved to {output_path}")
|
||||||
|
|
||||||
|
return results_df
|
||||||
|
|
||||||
# 5. Plot
|
|
||||||
plot_interactive_results(data, y_test, y_pred, y_prob)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
47
scripts/download_multi_pair_data.py
Normal file
47
scripts/download_multi_pair_data.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Download historical data for Multi-Pair Divergence Strategy.
|
||||||
|
|
||||||
|
Downloads 1h OHLCV data for top 10 cryptocurrencies from OKX.
|
||||||
|
"""
|
||||||
|
import sys
|
||||||
|
sys.path.insert(0, '.')
|
||||||
|
|
||||||
|
from engine.data_manager import DataManager
|
||||||
|
from engine.market import MarketType
|
||||||
|
from engine.logging_config import setup_logging, get_logger
|
||||||
|
from strategies.multi_pair import MultiPairConfig
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Download data for all configured assets."""
|
||||||
|
setup_logging()
|
||||||
|
|
||||||
|
config = MultiPairConfig()
|
||||||
|
dm = DataManager()
|
||||||
|
|
||||||
|
logger.info("Downloading data for %d assets...", len(config.assets))
|
||||||
|
|
||||||
|
for symbol in config.assets:
|
||||||
|
logger.info("Downloading %s perpetual 1h data...", symbol)
|
||||||
|
try:
|
||||||
|
df = dm.download_data(
|
||||||
|
exchange_id=config.exchange_id,
|
||||||
|
symbol=symbol,
|
||||||
|
timeframe=config.timeframe,
|
||||||
|
market_type=MarketType.PERPETUAL
|
||||||
|
)
|
||||||
|
if df is not None:
|
||||||
|
logger.info("Downloaded %d candles for %s", len(df), symbol)
|
||||||
|
else:
|
||||||
|
logger.warning("No data downloaded for %s", symbol)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Failed to download %s: %s", symbol, e)
|
||||||
|
|
||||||
|
logger.info("Download complete!")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
156
scripts/run_multi_pair_backtest.py
Normal file
156
scripts/run_multi_pair_backtest.py
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Run Multi-Pair Divergence Strategy backtest and compare with baseline.
|
||||||
|
|
||||||
|
Compares the multi-pair strategy against the single-pair BTC/ETH regime strategy.
|
||||||
|
"""
|
||||||
|
import sys
|
||||||
|
sys.path.insert(0, '.')
|
||||||
|
|
||||||
|
from engine.backtester import Backtester
|
||||||
|
from engine.data_manager import DataManager
|
||||||
|
from engine.logging_config import setup_logging, get_logger
|
||||||
|
from engine.reporting import Reporter
|
||||||
|
from strategies.multi_pair import MultiPairDivergenceStrategy, MultiPairConfig
|
||||||
|
from strategies.regime_strategy import RegimeReversionStrategy
|
||||||
|
from engine.market import MarketType
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def run_baseline():
|
||||||
|
"""Run baseline BTC/ETH regime strategy."""
|
||||||
|
logger.info("=" * 60)
|
||||||
|
logger.info("BASELINE: BTC/ETH Regime Reversion Strategy")
|
||||||
|
logger.info("=" * 60)
|
||||||
|
|
||||||
|
dm = DataManager()
|
||||||
|
bt = Backtester(dm)
|
||||||
|
|
||||||
|
strategy = RegimeReversionStrategy()
|
||||||
|
|
||||||
|
result = bt.run_strategy(
|
||||||
|
strategy,
|
||||||
|
'okx',
|
||||||
|
'ETH-USDT',
|
||||||
|
timeframe='1h',
|
||||||
|
init_cash=10000
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Baseline Results:")
|
||||||
|
logger.info(" Total Return: %.2f%%", result.portfolio.total_return() * 100)
|
||||||
|
logger.info(" Total Trades: %d", result.portfolio.trades.count())
|
||||||
|
logger.info(" Win Rate: %.1f%%", result.portfolio.trades.win_rate() * 100)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def run_multi_pair(assets: list[str] | None = None):
|
||||||
|
"""Run multi-pair divergence strategy."""
|
||||||
|
logger.info("=" * 60)
|
||||||
|
logger.info("MULTI-PAIR: Divergence Selection Strategy")
|
||||||
|
logger.info("=" * 60)
|
||||||
|
|
||||||
|
dm = DataManager()
|
||||||
|
bt = Backtester(dm)
|
||||||
|
|
||||||
|
# Use provided assets or default
|
||||||
|
if assets:
|
||||||
|
config = MultiPairConfig(assets=assets)
|
||||||
|
else:
|
||||||
|
config = MultiPairConfig()
|
||||||
|
|
||||||
|
logger.info("Configured %d assets, %d pairs", len(config.assets), config.get_pair_count())
|
||||||
|
|
||||||
|
strategy = MultiPairDivergenceStrategy(config=config)
|
||||||
|
|
||||||
|
result = bt.run_strategy(
|
||||||
|
strategy,
|
||||||
|
'okx',
|
||||||
|
'ETH-USDT', # Reference asset (not used for trading, just index alignment)
|
||||||
|
timeframe='1h',
|
||||||
|
init_cash=10000
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Multi-Pair Results:")
|
||||||
|
logger.info(" Total Return: %.2f%%", result.portfolio.total_return() * 100)
|
||||||
|
logger.info(" Total Trades: %d", result.portfolio.trades.count())
|
||||||
|
logger.info(" Win Rate: %.1f%%", result.portfolio.trades.win_rate() * 100)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def compare_results(baseline, multi_pair):
|
||||||
|
"""Compare and display results."""
|
||||||
|
logger.info("=" * 60)
|
||||||
|
logger.info("COMPARISON")
|
||||||
|
logger.info("=" * 60)
|
||||||
|
|
||||||
|
baseline_return = baseline.portfolio.total_return() * 100
|
||||||
|
multi_return = multi_pair.portfolio.total_return() * 100
|
||||||
|
|
||||||
|
improvement = multi_return - baseline_return
|
||||||
|
|
||||||
|
logger.info("Baseline Return: %.2f%%", baseline_return)
|
||||||
|
logger.info("Multi-Pair Return: %.2f%%", multi_return)
|
||||||
|
logger.info("Improvement: %.2f%% (%.1fx)",
|
||||||
|
improvement,
|
||||||
|
multi_return / baseline_return if baseline_return != 0 else 0)
|
||||||
|
|
||||||
|
baseline_trades = baseline.portfolio.trades.count()
|
||||||
|
multi_trades = multi_pair.portfolio.trades.count()
|
||||||
|
|
||||||
|
logger.info("Baseline Trades: %d", baseline_trades)
|
||||||
|
logger.info("Multi-Pair Trades: %d", multi_trades)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'baseline_return': baseline_return,
|
||||||
|
'multi_pair_return': multi_return,
|
||||||
|
'improvement': improvement,
|
||||||
|
'baseline_trades': baseline_trades,
|
||||||
|
'multi_pair_trades': multi_trades
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main entry point."""
|
||||||
|
setup_logging()
|
||||||
|
|
||||||
|
# Check available assets
|
||||||
|
dm = DataManager()
|
||||||
|
available = []
|
||||||
|
|
||||||
|
for symbol in MultiPairConfig().assets:
|
||||||
|
try:
|
||||||
|
dm.load_data('okx', symbol, '1h', market_type=MarketType.PERPETUAL)
|
||||||
|
available.append(symbol)
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if len(available) < 2:
|
||||||
|
logger.error(
|
||||||
|
"Need at least 2 assets to run multi-pair strategy. "
|
||||||
|
"Run: uv run python scripts/download_multi_pair_data.py"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info("Found data for %d assets: %s", len(available), available)
|
||||||
|
|
||||||
|
# Run baseline
|
||||||
|
baseline_result = run_baseline()
|
||||||
|
|
||||||
|
# Run multi-pair
|
||||||
|
multi_result = run_multi_pair(available)
|
||||||
|
|
||||||
|
# Compare
|
||||||
|
comparison = compare_results(baseline_result, multi_result)
|
||||||
|
|
||||||
|
# Save reports
|
||||||
|
reporter = Reporter()
|
||||||
|
reporter.save_reports(multi_result, "multi_pair_divergence")
|
||||||
|
|
||||||
|
logger.info("Reports saved to backtest_logs/")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -36,6 +36,8 @@ def _build_registry() -> dict[str, StrategyConfig]:
|
|||||||
# Import here to avoid circular imports
|
# Import here to avoid circular imports
|
||||||
from strategies.examples import MaCrossStrategy, RsiStrategy
|
from strategies.examples import MaCrossStrategy, RsiStrategy
|
||||||
from strategies.supertrend import MetaSupertrendStrategy
|
from strategies.supertrend import MetaSupertrendStrategy
|
||||||
|
from strategies.regime_strategy import RegimeReversionStrategy
|
||||||
|
from strategies.multi_pair import MultiPairDivergenceStrategy, MultiPairConfig
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"rsi": StrategyConfig(
|
"rsi": StrategyConfig(
|
||||||
@@ -76,6 +78,40 @@ def _build_registry() -> dict[str, StrategyConfig]:
|
|||||||
'period3': 12, 'multiplier3': 1.0
|
'period3': 12, 'multiplier3': 1.0
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
|
"regime": StrategyConfig(
|
||||||
|
strategy_class=RegimeReversionStrategy,
|
||||||
|
default_params={
|
||||||
|
# Optimal from walk-forward research (research/horizon_optimization_results.csv)
|
||||||
|
'horizon': 102, # 4.25 days - best Net PnL
|
||||||
|
'z_window': 24, # 24h rolling Z-score window
|
||||||
|
'z_entry_threshold': 1.0, # Enter when |Z| > 1.0
|
||||||
|
'profit_target': 0.005, # 0.5% target for ML labels
|
||||||
|
'stop_loss': 0.06, # 6% stop loss
|
||||||
|
'take_profit': 0.05, # 5% take profit
|
||||||
|
'train_ratio': 0.7, # 70% train / 30% test
|
||||||
|
'trend_window': 0, # Disabled SMA filter
|
||||||
|
'use_funding_filter': True, # Enabled Funding filter
|
||||||
|
'funding_threshold': 0.005 # 0.005% threshold (Proven profitable)
|
||||||
|
},
|
||||||
|
grid_params={
|
||||||
|
'horizon': [84, 96, 102, 108, 120],
|
||||||
|
'z_entry_threshold': [0.8, 1.0, 1.2],
|
||||||
|
'stop_loss': [0.04, 0.06, 0.08],
|
||||||
|
'funding_threshold': [0.005, 0.01, 0.02]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
"multi_pair": StrategyConfig(
|
||||||
|
strategy_class=MultiPairDivergenceStrategy,
|
||||||
|
default_params={
|
||||||
|
# Multi-pair divergence strategy uses config object
|
||||||
|
# Parameters passed here will override MultiPairConfig defaults
|
||||||
|
},
|
||||||
|
grid_params={
|
||||||
|
'z_entry_threshold': [0.8, 1.0, 1.2],
|
||||||
|
'prob_threshold': [0.4, 0.5, 0.6],
|
||||||
|
'correlation_threshold': [0.75, 0.85, 0.95]
|
||||||
|
}
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
24
strategies/multi_pair/__init__.py
Normal file
24
strategies/multi_pair/__init__.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
"""
|
||||||
|
Multi-Pair Divergence Selection Strategy.
|
||||||
|
|
||||||
|
Extends regime detection to multiple cryptocurrency pairs and dynamically
|
||||||
|
selects the most divergent pair for trading.
|
||||||
|
"""
|
||||||
|
from .config import MultiPairConfig
|
||||||
|
from .pair_scanner import PairScanner, TradingPair
|
||||||
|
from .correlation import CorrelationFilter
|
||||||
|
from .feature_engine import MultiPairFeatureEngine
|
||||||
|
from .divergence_scorer import DivergenceScorer
|
||||||
|
from .strategy import MultiPairDivergenceStrategy
|
||||||
|
from .funding import FundingRateFetcher
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"MultiPairConfig",
|
||||||
|
"PairScanner",
|
||||||
|
"TradingPair",
|
||||||
|
"CorrelationFilter",
|
||||||
|
"MultiPairFeatureEngine",
|
||||||
|
"DivergenceScorer",
|
||||||
|
"MultiPairDivergenceStrategy",
|
||||||
|
"FundingRateFetcher",
|
||||||
|
]
|
||||||
88
strategies/multi_pair/config.py
Normal file
88
strategies/multi_pair/config.py
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
"""
|
||||||
|
Configuration for Multi-Pair Divergence Strategy.
|
||||||
|
"""
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MultiPairConfig:
|
||||||
|
"""
|
||||||
|
Configuration parameters for multi-pair divergence strategy.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
assets: List of asset symbols to analyze (top 10 by market cap)
|
||||||
|
z_window: Rolling window for Z-Score calculation (hours)
|
||||||
|
z_entry_threshold: Minimum |Z-Score| to consider for entry
|
||||||
|
prob_threshold: Minimum ML probability to consider for entry
|
||||||
|
correlation_threshold: Max correlation to allow between pairs
|
||||||
|
correlation_window: Rolling window for correlation (hours)
|
||||||
|
atr_period: ATR lookback period for dynamic stops
|
||||||
|
sl_atr_multiplier: Stop-loss as multiple of ATR
|
||||||
|
tp_atr_multiplier: Take-profit as multiple of ATR
|
||||||
|
train_ratio: Walk-forward train/test split ratio
|
||||||
|
horizon: Look-ahead horizon for target calculation (hours)
|
||||||
|
profit_target: Minimum profit threshold for target labels
|
||||||
|
funding_threshold: Funding rate threshold for filtering
|
||||||
|
"""
|
||||||
|
# Asset Universe
|
||||||
|
assets: list[str] = field(default_factory=lambda: [
|
||||||
|
"BTC-USDT", "ETH-USDT", "SOL-USDT", "XRP-USDT", "BNB-USDT",
|
||||||
|
"DOGE-USDT", "ADA-USDT", "AVAX-USDT", "LINK-USDT", "DOT-USDT"
|
||||||
|
])
|
||||||
|
|
||||||
|
# Z-Score Thresholds
|
||||||
|
z_window: int = 24
|
||||||
|
z_entry_threshold: float = 1.0
|
||||||
|
|
||||||
|
# ML Thresholds
|
||||||
|
prob_threshold: float = 0.5
|
||||||
|
train_ratio: float = 0.7
|
||||||
|
horizon: int = 102
|
||||||
|
profit_target: float = 0.005
|
||||||
|
|
||||||
|
# Correlation Filtering
|
||||||
|
correlation_threshold: float = 0.85
|
||||||
|
correlation_window: int = 168 # 7 days in hours
|
||||||
|
|
||||||
|
# Risk Management - ATR-Based Stops
|
||||||
|
# SL/TP are calculated as multiples of ATR
|
||||||
|
# Mean ATR for crypto is ~0.6% per hour, so:
|
||||||
|
# - 10x ATR = ~6% SL (matches previous fixed 6%)
|
||||||
|
# - 8x ATR = ~5% TP (matches previous fixed 5%)
|
||||||
|
atr_period: int = 14 # ATR lookback period (hours for 1h timeframe)
|
||||||
|
sl_atr_multiplier: float = 10.0 # Stop-loss = entry +/- (ATR * multiplier)
|
||||||
|
tp_atr_multiplier: float = 8.0 # Take-profit = entry +/- (ATR * multiplier)
|
||||||
|
|
||||||
|
# Fallback fixed percentages (used if ATR is unavailable)
|
||||||
|
base_sl_pct: float = 0.06
|
||||||
|
base_tp_pct: float = 0.05
|
||||||
|
|
||||||
|
# ATR bounds to prevent extreme stops
|
||||||
|
min_sl_pct: float = 0.02 # Minimum 2% stop-loss
|
||||||
|
max_sl_pct: float = 0.10 # Maximum 10% stop-loss
|
||||||
|
min_tp_pct: float = 0.02 # Minimum 2% take-profit
|
||||||
|
max_tp_pct: float = 0.15 # Maximum 15% take-profit
|
||||||
|
|
||||||
|
volatility_window: int = 24
|
||||||
|
|
||||||
|
# Funding Rate Filter
|
||||||
|
# OKX funding rates are typically 0.0001 (0.01%) per 8h
|
||||||
|
# Extreme funding is > 0.0005 (0.05%) which indicates crowded trade
|
||||||
|
funding_threshold: float = 0.0005 # 0.05% - filter extreme funding
|
||||||
|
|
||||||
|
# Trade Management
|
||||||
|
# Note: Setting min_hold_bars=0 and z_exit_threshold=0 gives best results
|
||||||
|
# The mean-reversion exit at Z=0 is the primary profit driver
|
||||||
|
min_hold_bars: int = 0 # Disabled - let mean reversion drive exits
|
||||||
|
switch_threshold: float = 999.0 # Disabled - don't switch mid-trade
|
||||||
|
cooldown_bars: int = 0 # Disabled - enter when signal appears
|
||||||
|
z_exit_threshold: float = 0.0 # Exit at Z=0 (mean reversion complete)
|
||||||
|
|
||||||
|
# Exchange
|
||||||
|
exchange_id: str = "okx"
|
||||||
|
timeframe: str = "1h"
|
||||||
|
|
||||||
|
def get_pair_count(self) -> int:
|
||||||
|
"""Calculate number of unique pairs from asset list."""
|
||||||
|
n = len(self.assets)
|
||||||
|
return n * (n - 1) // 2
|
||||||
173
strategies/multi_pair/correlation.py
Normal file
173
strategies/multi_pair/correlation.py
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
"""
|
||||||
|
Correlation Filter for Multi-Pair Divergence Strategy.
|
||||||
|
|
||||||
|
Calculates rolling correlation matrix and filters pairs
|
||||||
|
to avoid highly correlated positions.
|
||||||
|
"""
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from engine.logging_config import get_logger
|
||||||
|
from .config import MultiPairConfig
|
||||||
|
from .pair_scanner import TradingPair
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CorrelationFilter:
|
||||||
|
"""
|
||||||
|
Calculates and filters based on asset correlations.
|
||||||
|
|
||||||
|
Uses rolling correlation of returns to identify assets
|
||||||
|
moving together, avoiding redundant positions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config: MultiPairConfig):
|
||||||
|
self.config = config
|
||||||
|
self._correlation_matrix: pd.DataFrame | None = None
|
||||||
|
self._last_update_idx: int = -1
|
||||||
|
|
||||||
|
def calculate_correlation_matrix(
|
||||||
|
self,
|
||||||
|
price_data: dict[str, pd.Series],
|
||||||
|
current_idx: int | None = None
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Calculate rolling correlation matrix between all assets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
price_data: Dictionary mapping asset symbols to price series
|
||||||
|
current_idx: Current bar index (for caching)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Correlation matrix DataFrame
|
||||||
|
"""
|
||||||
|
# Use cached if recent
|
||||||
|
if (
|
||||||
|
current_idx is not None
|
||||||
|
and self._correlation_matrix is not None
|
||||||
|
and current_idx - self._last_update_idx < 24 # Update every 24 bars
|
||||||
|
):
|
||||||
|
return self._correlation_matrix
|
||||||
|
|
||||||
|
# Calculate returns
|
||||||
|
returns = {}
|
||||||
|
for symbol, prices in price_data.items():
|
||||||
|
returns[symbol] = prices.pct_change()
|
||||||
|
|
||||||
|
returns_df = pd.DataFrame(returns)
|
||||||
|
|
||||||
|
# Rolling correlation
|
||||||
|
window = self.config.correlation_window
|
||||||
|
|
||||||
|
# Get latest correlation (last row of rolling correlation)
|
||||||
|
if len(returns_df) >= window:
|
||||||
|
rolling_corr = returns_df.rolling(window=window).corr()
|
||||||
|
# Extract last timestamp correlation matrix
|
||||||
|
last_idx = returns_df.index[-1]
|
||||||
|
corr_matrix = rolling_corr.loc[last_idx]
|
||||||
|
else:
|
||||||
|
# Fallback to full-period correlation if not enough data
|
||||||
|
corr_matrix = returns_df.corr()
|
||||||
|
|
||||||
|
self._correlation_matrix = corr_matrix
|
||||||
|
if current_idx is not None:
|
||||||
|
self._last_update_idx = current_idx
|
||||||
|
|
||||||
|
return corr_matrix
|
||||||
|
|
||||||
|
def filter_pairs(
|
||||||
|
self,
|
||||||
|
pairs: list[TradingPair],
|
||||||
|
current_position_asset: str | None,
|
||||||
|
price_data: dict[str, pd.Series],
|
||||||
|
current_idx: int | None = None
|
||||||
|
) -> list[TradingPair]:
|
||||||
|
"""
|
||||||
|
Filter pairs based on correlation with current position.
|
||||||
|
|
||||||
|
If we have an open position in an asset, exclude pairs where
|
||||||
|
either asset is highly correlated with the held asset.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pairs: List of candidate pairs
|
||||||
|
current_position_asset: Currently held asset (or None)
|
||||||
|
price_data: Dictionary of price series by symbol
|
||||||
|
current_idx: Current bar index for caching
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Filtered list of pairs
|
||||||
|
"""
|
||||||
|
if current_position_asset is None:
|
||||||
|
return pairs
|
||||||
|
|
||||||
|
corr_matrix = self.calculate_correlation_matrix(price_data, current_idx)
|
||||||
|
threshold = self.config.correlation_threshold
|
||||||
|
|
||||||
|
filtered = []
|
||||||
|
for pair in pairs:
|
||||||
|
# Check correlation of base and quote with held asset
|
||||||
|
base_corr = self._get_correlation(
|
||||||
|
corr_matrix, pair.base_asset, current_position_asset
|
||||||
|
)
|
||||||
|
quote_corr = self._get_correlation(
|
||||||
|
corr_matrix, pair.quote_asset, current_position_asset
|
||||||
|
)
|
||||||
|
|
||||||
|
# Filter if either asset highly correlated with position
|
||||||
|
if abs(base_corr) > threshold or abs(quote_corr) > threshold:
|
||||||
|
logger.debug(
|
||||||
|
"Filtered %s: base_corr=%.2f, quote_corr=%.2f (held: %s)",
|
||||||
|
pair.name, base_corr, quote_corr, current_position_asset
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
filtered.append(pair)
|
||||||
|
|
||||||
|
if len(filtered) < len(pairs):
|
||||||
|
logger.info(
|
||||||
|
"Correlation filter: %d/%d pairs remaining (held: %s)",
|
||||||
|
len(filtered), len(pairs), current_position_asset
|
||||||
|
)
|
||||||
|
|
||||||
|
return filtered
|
||||||
|
|
||||||
|
def _get_correlation(
|
||||||
|
self,
|
||||||
|
corr_matrix: pd.DataFrame,
|
||||||
|
asset1: str,
|
||||||
|
asset2: str
|
||||||
|
) -> float:
|
||||||
|
"""
|
||||||
|
Get correlation between two assets from matrix.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
corr_matrix: Correlation matrix
|
||||||
|
asset1: First asset symbol
|
||||||
|
asset2: Second asset symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Correlation coefficient (-1 to 1), or 0 if not found
|
||||||
|
"""
|
||||||
|
if asset1 == asset2:
|
||||||
|
return 1.0
|
||||||
|
|
||||||
|
try:
|
||||||
|
return corr_matrix.loc[asset1, asset2]
|
||||||
|
except KeyError:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
def get_correlation_report(
|
||||||
|
self,
|
||||||
|
price_data: dict[str, pd.Series]
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Generate a readable correlation report.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
price_data: Dictionary of price series
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Correlation matrix as DataFrame
|
||||||
|
"""
|
||||||
|
return self.calculate_correlation_matrix(price_data)
|
||||||
311
strategies/multi_pair/divergence_scorer.py
Normal file
311
strategies/multi_pair/divergence_scorer.py
Normal file
@@ -0,0 +1,311 @@
|
|||||||
|
"""
|
||||||
|
Divergence Scorer for Multi-Pair Strategy.
|
||||||
|
|
||||||
|
Ranks pairs by divergence score and selects the best candidate.
|
||||||
|
"""
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
from sklearn.ensemble import RandomForestClassifier
|
||||||
|
import pickle
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from engine.logging_config import get_logger
|
||||||
|
from .config import MultiPairConfig
|
||||||
|
from .pair_scanner import TradingPair
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DivergenceSignal:
|
||||||
|
"""
|
||||||
|
Signal for a divergent pair.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
pair: Trading pair
|
||||||
|
z_score: Current Z-Score of the spread
|
||||||
|
probability: ML model probability of profitable reversion
|
||||||
|
divergence_score: Combined score (|z_score| * probability)
|
||||||
|
direction: 'long' or 'short' (relative to base asset)
|
||||||
|
base_price: Current price of base asset
|
||||||
|
quote_price: Current price of quote asset
|
||||||
|
atr: Average True Range in price units
|
||||||
|
atr_pct: ATR as percentage of price
|
||||||
|
"""
|
||||||
|
pair: TradingPair
|
||||||
|
z_score: float
|
||||||
|
probability: float
|
||||||
|
divergence_score: float
|
||||||
|
direction: str
|
||||||
|
base_price: float
|
||||||
|
quote_price: float
|
||||||
|
atr: float
|
||||||
|
atr_pct: float
|
||||||
|
timestamp: pd.Timestamp
|
||||||
|
|
||||||
|
|
||||||
|
class DivergenceScorer:
|
||||||
|
"""
|
||||||
|
Scores and ranks pairs by divergence potential.
|
||||||
|
|
||||||
|
Uses ML model predictions combined with Z-Score magnitude
|
||||||
|
to identify the most promising mean-reversion opportunity.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config: MultiPairConfig, model_path: str = "data/multi_pair_model.pkl"):
|
||||||
|
self.config = config
|
||||||
|
self.model_path = Path(model_path)
|
||||||
|
self.model: RandomForestClassifier | None = None
|
||||||
|
self.feature_cols: list[str] | None = None
|
||||||
|
self._load_model()
|
||||||
|
|
||||||
|
def _load_model(self) -> None:
|
||||||
|
"""Load pre-trained model if available."""
|
||||||
|
if self.model_path.exists():
|
||||||
|
try:
|
||||||
|
with open(self.model_path, 'rb') as f:
|
||||||
|
saved = pickle.load(f)
|
||||||
|
self.model = saved['model']
|
||||||
|
self.feature_cols = saved['feature_cols']
|
||||||
|
logger.info("Loaded model from %s", self.model_path)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Could not load model: %s", e)
|
||||||
|
|
||||||
|
def save_model(self) -> None:
|
||||||
|
"""Save trained model."""
|
||||||
|
if self.model is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.model_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(self.model_path, 'wb') as f:
|
||||||
|
pickle.dump({
|
||||||
|
'model': self.model,
|
||||||
|
'feature_cols': self.feature_cols,
|
||||||
|
}, f)
|
||||||
|
logger.info("Saved model to %s", self.model_path)
|
||||||
|
|
||||||
|
def train_model(
|
||||||
|
self,
|
||||||
|
combined_features: pd.DataFrame,
|
||||||
|
pair_features: dict[str, pd.DataFrame]
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Train universal model on all pairs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
combined_features: Combined feature DataFrame from all pairs
|
||||||
|
pair_features: Individual pair feature DataFrames (for target calculation)
|
||||||
|
"""
|
||||||
|
logger.info("Training universal model on %d samples...", len(combined_features))
|
||||||
|
|
||||||
|
z_thresh = self.config.z_entry_threshold
|
||||||
|
horizon = self.config.horizon
|
||||||
|
profit_target = self.config.profit_target
|
||||||
|
|
||||||
|
# Calculate targets for each pair
|
||||||
|
all_targets = []
|
||||||
|
all_features = []
|
||||||
|
|
||||||
|
for pair_id, features in pair_features.items():
|
||||||
|
if len(features) < horizon + 50:
|
||||||
|
continue
|
||||||
|
|
||||||
|
spread = features['spread']
|
||||||
|
z_score = features['z_score']
|
||||||
|
|
||||||
|
# Future price movements
|
||||||
|
future_min = spread.rolling(window=horizon).min().shift(-horizon)
|
||||||
|
future_max = spread.rolling(window=horizon).max().shift(-horizon)
|
||||||
|
|
||||||
|
# Target labels
|
||||||
|
target_short = spread * (1 - profit_target)
|
||||||
|
target_long = spread * (1 + profit_target)
|
||||||
|
|
||||||
|
success_short = (z_score > z_thresh) & (future_min < target_short)
|
||||||
|
success_long = (z_score < -z_thresh) & (future_max > target_long)
|
||||||
|
|
||||||
|
targets = np.select([success_short, success_long], [1, 1], default=0)
|
||||||
|
|
||||||
|
# Valid mask (exclude rows without complete future data)
|
||||||
|
valid_mask = future_min.notna() & future_max.notna()
|
||||||
|
|
||||||
|
# Collect valid samples
|
||||||
|
valid_features = features[valid_mask]
|
||||||
|
valid_targets = targets[valid_mask.values]
|
||||||
|
|
||||||
|
if len(valid_features) > 0:
|
||||||
|
all_features.append(valid_features)
|
||||||
|
all_targets.extend(valid_targets)
|
||||||
|
|
||||||
|
if not all_features:
|
||||||
|
logger.warning("No valid training samples")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Combine all training data
|
||||||
|
X_df = pd.concat(all_features, ignore_index=True)
|
||||||
|
y = np.array(all_targets)
|
||||||
|
|
||||||
|
# Get feature columns
|
||||||
|
exclude_cols = [
|
||||||
|
'pair_id', 'base_asset', 'quote_asset',
|
||||||
|
'spread', 'base_close', 'quote_close', 'base_volume'
|
||||||
|
]
|
||||||
|
self.feature_cols = [c for c in X_df.columns if c not in exclude_cols]
|
||||||
|
|
||||||
|
# Prepare features
|
||||||
|
X = X_df[self.feature_cols].fillna(0)
|
||||||
|
X = X.replace([np.inf, -np.inf], 0)
|
||||||
|
|
||||||
|
# Train model
|
||||||
|
self.model = RandomForestClassifier(
|
||||||
|
n_estimators=300,
|
||||||
|
max_depth=5,
|
||||||
|
min_samples_leaf=30,
|
||||||
|
class_weight={0: 1, 1: 3},
|
||||||
|
random_state=42
|
||||||
|
)
|
||||||
|
self.model.fit(X, y)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Model trained on %d samples, %d features, %.1f%% positive class",
|
||||||
|
len(X), len(self.feature_cols), y.mean() * 100
|
||||||
|
)
|
||||||
|
self.save_model()
|
||||||
|
|
||||||
|
def score_pairs(
|
||||||
|
self,
|
||||||
|
pair_features: dict[str, pd.DataFrame],
|
||||||
|
pairs: list[TradingPair],
|
||||||
|
timestamp: pd.Timestamp | None = None
|
||||||
|
) -> list[DivergenceSignal]:
|
||||||
|
"""
|
||||||
|
Score all pairs and return ranked signals.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pair_features: Feature DataFrames by pair_id
|
||||||
|
pairs: List of TradingPair objects
|
||||||
|
timestamp: Current timestamp for feature extraction
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of DivergenceSignal sorted by score (descending)
|
||||||
|
"""
|
||||||
|
if self.model is None:
|
||||||
|
logger.warning("Model not trained, returning empty signals")
|
||||||
|
return []
|
||||||
|
|
||||||
|
signals = []
|
||||||
|
pair_map = {p.pair_id: p for p in pairs}
|
||||||
|
|
||||||
|
for pair_id, features in pair_features.items():
|
||||||
|
if pair_id not in pair_map:
|
||||||
|
continue
|
||||||
|
|
||||||
|
pair = pair_map[pair_id]
|
||||||
|
|
||||||
|
# Get latest features
|
||||||
|
if timestamp is not None:
|
||||||
|
valid = features[features.index <= timestamp]
|
||||||
|
if len(valid) == 0:
|
||||||
|
continue
|
||||||
|
latest = valid.iloc[-1]
|
||||||
|
ts = valid.index[-1]
|
||||||
|
else:
|
||||||
|
latest = features.iloc[-1]
|
||||||
|
ts = features.index[-1]
|
||||||
|
|
||||||
|
z_score = latest['z_score']
|
||||||
|
|
||||||
|
# Skip if Z-score below threshold
|
||||||
|
if abs(z_score) < self.config.z_entry_threshold:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Prepare features for prediction
|
||||||
|
feature_row = latest[self.feature_cols].fillna(0).infer_objects(copy=False)
|
||||||
|
feature_row = feature_row.replace([np.inf, -np.inf], 0)
|
||||||
|
X = pd.DataFrame([feature_row.values], columns=self.feature_cols)
|
||||||
|
|
||||||
|
# Predict probability
|
||||||
|
prob = self.model.predict_proba(X)[0, 1]
|
||||||
|
|
||||||
|
# Skip if probability below threshold
|
||||||
|
if prob < self.config.prob_threshold:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Apply funding rate filter
|
||||||
|
# Block trades where funding opposes our direction
|
||||||
|
base_funding = latest.get('base_funding', 0) or 0
|
||||||
|
funding_thresh = self.config.funding_threshold
|
||||||
|
|
||||||
|
if z_score > 0: # Short signal
|
||||||
|
# High negative funding = shorts are paying -> skip
|
||||||
|
if base_funding < -funding_thresh:
|
||||||
|
logger.debug(
|
||||||
|
"Skipping %s short: funding too negative (%.4f)",
|
||||||
|
pair.name, base_funding
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
else: # Long signal
|
||||||
|
# High positive funding = longs are paying -> skip
|
||||||
|
if base_funding > funding_thresh:
|
||||||
|
logger.debug(
|
||||||
|
"Skipping %s long: funding too positive (%.4f)",
|
||||||
|
pair.name, base_funding
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Calculate divergence score
|
||||||
|
divergence_score = abs(z_score) * prob
|
||||||
|
|
||||||
|
# Determine direction
|
||||||
|
# Z > 0: Spread high (base expensive vs quote) -> Short base
|
||||||
|
# Z < 0: Spread low (base cheap vs quote) -> Long base
|
||||||
|
direction = 'short' if z_score > 0 else 'long'
|
||||||
|
|
||||||
|
signal = DivergenceSignal(
|
||||||
|
pair=pair,
|
||||||
|
z_score=z_score,
|
||||||
|
probability=prob,
|
||||||
|
divergence_score=divergence_score,
|
||||||
|
direction=direction,
|
||||||
|
base_price=latest['base_close'],
|
||||||
|
quote_price=latest['quote_close'],
|
||||||
|
atr=latest.get('atr_base', 0),
|
||||||
|
atr_pct=latest.get('atr_pct_base', 0.02),
|
||||||
|
timestamp=ts
|
||||||
|
)
|
||||||
|
signals.append(signal)
|
||||||
|
|
||||||
|
# Sort by divergence score (highest first)
|
||||||
|
signals.sort(key=lambda s: s.divergence_score, reverse=True)
|
||||||
|
|
||||||
|
if signals:
|
||||||
|
logger.debug(
|
||||||
|
"Scored %d pairs, top: %s (score=%.3f, z=%.2f, p=%.2f)",
|
||||||
|
len(signals),
|
||||||
|
signals[0].pair.name,
|
||||||
|
signals[0].divergence_score,
|
||||||
|
signals[0].z_score,
|
||||||
|
signals[0].probability
|
||||||
|
)
|
||||||
|
|
||||||
|
return signals
|
||||||
|
|
||||||
|
def select_best_pair(
|
||||||
|
self,
|
||||||
|
signals: list[DivergenceSignal]
|
||||||
|
) -> DivergenceSignal | None:
|
||||||
|
"""
|
||||||
|
Select the best pair from scored signals.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
signals: List of DivergenceSignal (pre-sorted by score)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Best signal or None if no valid candidates
|
||||||
|
"""
|
||||||
|
if not signals:
|
||||||
|
return None
|
||||||
|
return signals[0]
|
||||||
433
strategies/multi_pair/feature_engine.py
Normal file
433
strategies/multi_pair/feature_engine.py
Normal file
@@ -0,0 +1,433 @@
|
|||||||
|
"""
|
||||||
|
Feature Engineering for Multi-Pair Divergence Strategy.
|
||||||
|
|
||||||
|
Calculates features for all pairs in the universe, including
|
||||||
|
spread technicals, volatility, and on-chain data.
|
||||||
|
"""
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import ta
|
||||||
|
|
||||||
|
from engine.logging_config import get_logger
|
||||||
|
from engine.data_manager import DataManager
|
||||||
|
from engine.market import MarketType
|
||||||
|
from .config import MultiPairConfig
|
||||||
|
from .pair_scanner import TradingPair
|
||||||
|
from .funding import FundingRateFetcher
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class MultiPairFeatureEngine:
|
||||||
|
"""
|
||||||
|
Calculates features for multiple trading pairs.
|
||||||
|
|
||||||
|
Generates consistent feature sets across all pairs for
|
||||||
|
the universal ML model.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config: MultiPairConfig):
|
||||||
|
self.config = config
|
||||||
|
self.dm = DataManager()
|
||||||
|
self.funding_fetcher = FundingRateFetcher()
|
||||||
|
self._funding_data: pd.DataFrame | None = None
|
||||||
|
|
||||||
|
def load_all_assets(
|
||||||
|
self,
|
||||||
|
start_date: str | None = None,
|
||||||
|
end_date: str | None = None
|
||||||
|
) -> dict[str, pd.DataFrame]:
|
||||||
|
"""
|
||||||
|
Load OHLCV data for all assets in the universe.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start_date: Start date filter (YYYY-MM-DD)
|
||||||
|
end_date: End date filter (YYYY-MM-DD)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping symbol to OHLCV DataFrame
|
||||||
|
"""
|
||||||
|
data = {}
|
||||||
|
market_type = MarketType.PERPETUAL
|
||||||
|
|
||||||
|
for symbol in self.config.assets:
|
||||||
|
try:
|
||||||
|
df = self.dm.load_data(
|
||||||
|
self.config.exchange_id,
|
||||||
|
symbol,
|
||||||
|
self.config.timeframe,
|
||||||
|
market_type
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply date filters
|
||||||
|
if start_date:
|
||||||
|
df = df[df.index >= pd.Timestamp(start_date, tz="UTC")]
|
||||||
|
if end_date:
|
||||||
|
df = df[df.index <= pd.Timestamp(end_date, tz="UTC")]
|
||||||
|
|
||||||
|
if len(df) >= 200: # Minimum data requirement
|
||||||
|
data[symbol] = df
|
||||||
|
logger.debug("Loaded %s: %d bars", symbol, len(df))
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
"Skipping %s: insufficient data (%d bars)",
|
||||||
|
symbol, len(df)
|
||||||
|
)
|
||||||
|
except FileNotFoundError:
|
||||||
|
logger.warning("Data not found for %s", symbol)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Error loading %s: %s", symbol, e)
|
||||||
|
|
||||||
|
logger.info("Loaded %d/%d assets", len(data), len(self.config.assets))
|
||||||
|
return data
|
||||||
|
|
||||||
|
def load_funding_data(
|
||||||
|
self,
|
||||||
|
start_date: str | None = None,
|
||||||
|
end_date: str | None = None,
|
||||||
|
use_cache: bool = True
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Load funding rate data for all assets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start_date: Start date filter
|
||||||
|
end_date: End date filter
|
||||||
|
use_cache: Whether to use cached data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DataFrame with funding rates for all assets
|
||||||
|
"""
|
||||||
|
self._funding_data = self.funding_fetcher.get_funding_data(
|
||||||
|
self.config.assets,
|
||||||
|
start_date=start_date,
|
||||||
|
end_date=end_date,
|
||||||
|
use_cache=use_cache
|
||||||
|
)
|
||||||
|
|
||||||
|
if self._funding_data is not None and not self._funding_data.empty:
|
||||||
|
logger.info(
|
||||||
|
"Loaded funding data: %d rows, %d assets",
|
||||||
|
len(self._funding_data),
|
||||||
|
len(self._funding_data.columns)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warning("No funding data available")
|
||||||
|
|
||||||
|
return self._funding_data
|
||||||
|
|
||||||
|
def calculate_pair_features(
|
||||||
|
self,
|
||||||
|
pair: TradingPair,
|
||||||
|
asset_data: dict[str, pd.DataFrame],
|
||||||
|
on_chain_data: pd.DataFrame | None = None
|
||||||
|
) -> pd.DataFrame | None:
|
||||||
|
"""
|
||||||
|
Calculate features for a single pair.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pair: Trading pair
|
||||||
|
asset_data: Dictionary of OHLCV DataFrames by symbol
|
||||||
|
on_chain_data: Optional on-chain data (funding, inflows)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DataFrame with features, or None if insufficient data
|
||||||
|
"""
|
||||||
|
base = pair.base_asset
|
||||||
|
quote = pair.quote_asset
|
||||||
|
|
||||||
|
if base not in asset_data or quote not in asset_data:
|
||||||
|
return None
|
||||||
|
|
||||||
|
df_base = asset_data[base]
|
||||||
|
df_quote = asset_data[quote]
|
||||||
|
|
||||||
|
# Align indices
|
||||||
|
common_idx = df_base.index.intersection(df_quote.index)
|
||||||
|
if len(common_idx) < 200:
|
||||||
|
logger.debug("Pair %s: insufficient aligned data", pair.name)
|
||||||
|
return None
|
||||||
|
|
||||||
|
df_a = df_base.loc[common_idx]
|
||||||
|
df_b = df_quote.loc[common_idx]
|
||||||
|
|
||||||
|
# Calculate spread (base / quote)
|
||||||
|
spread = df_a['close'] / df_b['close']
|
||||||
|
|
||||||
|
# Z-Score
|
||||||
|
z_window = self.config.z_window
|
||||||
|
rolling_mean = spread.rolling(window=z_window).mean()
|
||||||
|
rolling_std = spread.rolling(window=z_window).std()
|
||||||
|
z_score = (spread - rolling_mean) / rolling_std
|
||||||
|
|
||||||
|
# Spread Technicals
|
||||||
|
spread_rsi = ta.momentum.RSIIndicator(spread, window=14).rsi()
|
||||||
|
spread_roc = spread.pct_change(periods=5) * 100
|
||||||
|
spread_change_1h = spread.pct_change(periods=1)
|
||||||
|
|
||||||
|
# Volume Analysis
|
||||||
|
vol_ratio = df_a['volume'] / (df_b['volume'] + 1e-10)
|
||||||
|
vol_ratio_ma = vol_ratio.rolling(window=12).mean()
|
||||||
|
vol_ratio_rel = vol_ratio / (vol_ratio_ma + 1e-10)
|
||||||
|
|
||||||
|
# Volatility
|
||||||
|
ret_a = df_a['close'].pct_change()
|
||||||
|
ret_b = df_b['close'].pct_change()
|
||||||
|
vol_a = ret_a.rolling(window=z_window).std()
|
||||||
|
vol_b = ret_b.rolling(window=z_window).std()
|
||||||
|
vol_spread_ratio = vol_a / (vol_b + 1e-10)
|
||||||
|
|
||||||
|
# Realized Volatility (for dynamic SL/TP)
|
||||||
|
realized_vol_a = ret_a.rolling(window=self.config.volatility_window).std()
|
||||||
|
realized_vol_b = ret_b.rolling(window=self.config.volatility_window).std()
|
||||||
|
|
||||||
|
# ATR (Average True Range) for dynamic stops
|
||||||
|
# ATR = average of max(high-low, |high-prev_close|, |low-prev_close|)
|
||||||
|
high_a, low_a, close_a = df_a['high'], df_a['low'], df_a['close']
|
||||||
|
high_b, low_b, close_b = df_b['high'], df_b['low'], df_b['close']
|
||||||
|
|
||||||
|
# True Range for base asset
|
||||||
|
tr_a = pd.concat([
|
||||||
|
high_a - low_a,
|
||||||
|
(high_a - close_a.shift(1)).abs(),
|
||||||
|
(low_a - close_a.shift(1)).abs()
|
||||||
|
], axis=1).max(axis=1)
|
||||||
|
atr_a = tr_a.rolling(window=self.config.atr_period).mean()
|
||||||
|
|
||||||
|
# True Range for quote asset
|
||||||
|
tr_b = pd.concat([
|
||||||
|
high_b - low_b,
|
||||||
|
(high_b - close_b.shift(1)).abs(),
|
||||||
|
(low_b - close_b.shift(1)).abs()
|
||||||
|
], axis=1).max(axis=1)
|
||||||
|
atr_b = tr_b.rolling(window=self.config.atr_period).mean()
|
||||||
|
|
||||||
|
# ATR as percentage of price (normalized)
|
||||||
|
atr_pct_a = atr_a / close_a
|
||||||
|
atr_pct_b = atr_b / close_b
|
||||||
|
|
||||||
|
# Build feature DataFrame
|
||||||
|
features = pd.DataFrame(index=common_idx)
|
||||||
|
features['pair_id'] = pair.pair_id
|
||||||
|
features['base_asset'] = base
|
||||||
|
features['quote_asset'] = quote
|
||||||
|
|
||||||
|
# Price data (for reference, not features)
|
||||||
|
features['spread'] = spread
|
||||||
|
features['base_close'] = df_a['close']
|
||||||
|
features['quote_close'] = df_b['close']
|
||||||
|
features['base_volume'] = df_a['volume']
|
||||||
|
|
||||||
|
# Core Features
|
||||||
|
features['z_score'] = z_score
|
||||||
|
features['spread_rsi'] = spread_rsi
|
||||||
|
features['spread_roc'] = spread_roc
|
||||||
|
features['spread_change_1h'] = spread_change_1h
|
||||||
|
features['vol_ratio'] = vol_ratio
|
||||||
|
features['vol_ratio_rel'] = vol_ratio_rel
|
||||||
|
features['vol_diff_ratio'] = vol_spread_ratio
|
||||||
|
|
||||||
|
# Volatility for SL/TP
|
||||||
|
features['realized_vol_base'] = realized_vol_a
|
||||||
|
features['realized_vol_quote'] = realized_vol_b
|
||||||
|
features['realized_vol_avg'] = (realized_vol_a + realized_vol_b) / 2
|
||||||
|
|
||||||
|
# ATR for dynamic stops (in price units and as percentage)
|
||||||
|
features['atr_base'] = atr_a
|
||||||
|
features['atr_quote'] = atr_b
|
||||||
|
features['atr_pct_base'] = atr_pct_a
|
||||||
|
features['atr_pct_quote'] = atr_pct_b
|
||||||
|
features['atr_pct_avg'] = (atr_pct_a + atr_pct_b) / 2
|
||||||
|
|
||||||
|
# Pair encoding (for universal model)
|
||||||
|
# Using base and quote indices for hierarchical encoding
|
||||||
|
assets = self.config.assets
|
||||||
|
features['base_idx'] = assets.index(base) if base in assets else -1
|
||||||
|
features['quote_idx'] = assets.index(quote) if quote in assets else -1
|
||||||
|
|
||||||
|
# Add funding and on-chain features
|
||||||
|
# Funding data is always added from self._funding_data (OKX, all 10 assets)
|
||||||
|
# On-chain data is optional (CryptoQuant, BTC/ETH only)
|
||||||
|
features = self._add_on_chain_features(
|
||||||
|
features, on_chain_data, base, quote
|
||||||
|
)
|
||||||
|
|
||||||
|
# Drop rows with NaN in core features only (not funding/on-chain)
|
||||||
|
core_cols = [
|
||||||
|
'z_score', 'spread_rsi', 'spread_roc', 'spread_change_1h',
|
||||||
|
'vol_ratio', 'vol_ratio_rel', 'vol_diff_ratio',
|
||||||
|
'realized_vol_base', 'realized_vol_quote', 'realized_vol_avg',
|
||||||
|
'atr_base', 'atr_pct_base' # ATR is core for SL/TP
|
||||||
|
]
|
||||||
|
features = features.dropna(subset=core_cols)
|
||||||
|
|
||||||
|
# Fill missing funding/on-chain features with 0 (neutral)
|
||||||
|
optional_cols = [
|
||||||
|
'base_funding', 'quote_funding', 'funding_diff', 'funding_avg',
|
||||||
|
'base_inflow', 'quote_inflow', 'inflow_ratio'
|
||||||
|
]
|
||||||
|
for col in optional_cols:
|
||||||
|
if col in features.columns:
|
||||||
|
features[col] = features[col].fillna(0)
|
||||||
|
|
||||||
|
return features
|
||||||
|
|
||||||
|
def calculate_all_pair_features(
|
||||||
|
self,
|
||||||
|
pairs: list[TradingPair],
|
||||||
|
asset_data: dict[str, pd.DataFrame],
|
||||||
|
on_chain_data: pd.DataFrame | None = None
|
||||||
|
) -> dict[str, pd.DataFrame]:
|
||||||
|
"""
|
||||||
|
Calculate features for all pairs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pairs: List of trading pairs
|
||||||
|
asset_data: Dictionary of OHLCV DataFrames
|
||||||
|
on_chain_data: Optional on-chain data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping pair_id to feature DataFrame
|
||||||
|
"""
|
||||||
|
all_features = {}
|
||||||
|
|
||||||
|
for pair in pairs:
|
||||||
|
features = self.calculate_pair_features(
|
||||||
|
pair, asset_data, on_chain_data
|
||||||
|
)
|
||||||
|
if features is not None and len(features) > 0:
|
||||||
|
all_features[pair.pair_id] = features
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Calculated features for %d/%d pairs",
|
||||||
|
len(all_features), len(pairs)
|
||||||
|
)
|
||||||
|
|
||||||
|
return all_features
|
||||||
|
|
||||||
|
def get_combined_features(
|
||||||
|
self,
|
||||||
|
pair_features: dict[str, pd.DataFrame],
|
||||||
|
timestamp: pd.Timestamp | None = None
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Combine all pair features into a single DataFrame.
|
||||||
|
|
||||||
|
Useful for batch model prediction across all pairs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pair_features: Dictionary of feature DataFrames by pair_id
|
||||||
|
timestamp: Optional specific timestamp to filter to
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Combined DataFrame with all pairs as rows
|
||||||
|
"""
|
||||||
|
if not pair_features:
|
||||||
|
return pd.DataFrame()
|
||||||
|
|
||||||
|
if timestamp is not None:
|
||||||
|
# Get latest row from each pair at or before timestamp
|
||||||
|
rows = []
|
||||||
|
for pair_id, features in pair_features.items():
|
||||||
|
valid = features[features.index <= timestamp]
|
||||||
|
if len(valid) > 0:
|
||||||
|
row = valid.iloc[-1:].copy()
|
||||||
|
rows.append(row)
|
||||||
|
|
||||||
|
if rows:
|
||||||
|
return pd.concat(rows, ignore_index=False)
|
||||||
|
return pd.DataFrame()
|
||||||
|
|
||||||
|
# Combine all features (for training)
|
||||||
|
return pd.concat(pair_features.values(), ignore_index=False)
|
||||||
|
|
||||||
|
def _add_on_chain_features(
|
||||||
|
self,
|
||||||
|
features: pd.DataFrame,
|
||||||
|
on_chain_data: pd.DataFrame | None,
|
||||||
|
base_asset: str,
|
||||||
|
quote_asset: str
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Add on-chain and funding rate features for the pair.
|
||||||
|
|
||||||
|
Uses funding data from OKX (all 10 assets) and on-chain data
|
||||||
|
from CryptoQuant (BTC/ETH only for inflows).
|
||||||
|
"""
|
||||||
|
base_short = base_asset.replace('-USDT', '').lower()
|
||||||
|
quote_short = quote_asset.replace('-USDT', '').lower()
|
||||||
|
|
||||||
|
# Add funding rates from cached funding data
|
||||||
|
if self._funding_data is not None and not self._funding_data.empty:
|
||||||
|
funding_aligned = self._funding_data.reindex(
|
||||||
|
features.index, method='ffill'
|
||||||
|
)
|
||||||
|
|
||||||
|
base_funding_col = f'{base_short}_funding'
|
||||||
|
quote_funding_col = f'{quote_short}_funding'
|
||||||
|
|
||||||
|
if base_funding_col in funding_aligned.columns:
|
||||||
|
features['base_funding'] = funding_aligned[base_funding_col]
|
||||||
|
if quote_funding_col in funding_aligned.columns:
|
||||||
|
features['quote_funding'] = funding_aligned[quote_funding_col]
|
||||||
|
|
||||||
|
# Funding difference (positive = base has higher funding)
|
||||||
|
if 'base_funding' in features.columns and 'quote_funding' in features.columns:
|
||||||
|
features['funding_diff'] = (
|
||||||
|
features['base_funding'] - features['quote_funding']
|
||||||
|
)
|
||||||
|
|
||||||
|
# Funding sentiment: average of both assets
|
||||||
|
features['funding_avg'] = (
|
||||||
|
features['base_funding'] + features['quote_funding']
|
||||||
|
) / 2
|
||||||
|
|
||||||
|
# Add on-chain features from CryptoQuant (BTC/ETH only)
|
||||||
|
if on_chain_data is not None and not on_chain_data.empty:
|
||||||
|
cq_aligned = on_chain_data.reindex(features.index, method='ffill')
|
||||||
|
|
||||||
|
# Inflows (only available for BTC/ETH)
|
||||||
|
base_inflow_col = f'{base_short}_inflow'
|
||||||
|
quote_inflow_col = f'{quote_short}_inflow'
|
||||||
|
|
||||||
|
if base_inflow_col in cq_aligned.columns:
|
||||||
|
features['base_inflow'] = cq_aligned[base_inflow_col]
|
||||||
|
if quote_inflow_col in cq_aligned.columns:
|
||||||
|
features['quote_inflow'] = cq_aligned[quote_inflow_col]
|
||||||
|
|
||||||
|
if 'base_inflow' in features.columns and 'quote_inflow' in features.columns:
|
||||||
|
features['inflow_ratio'] = (
|
||||||
|
features['base_inflow'] /
|
||||||
|
(features['quote_inflow'] + 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
return features
|
||||||
|
|
||||||
|
def get_feature_columns(self) -> list[str]:
|
||||||
|
"""
|
||||||
|
Get list of feature columns for ML model.
|
||||||
|
|
||||||
|
Excludes metadata and target-related columns.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of feature column names
|
||||||
|
"""
|
||||||
|
# Core features (always present)
|
||||||
|
core_features = [
|
||||||
|
'z_score', 'spread_rsi', 'spread_roc', 'spread_change_1h',
|
||||||
|
'vol_ratio', 'vol_ratio_rel', 'vol_diff_ratio',
|
||||||
|
'realized_vol_base', 'realized_vol_quote', 'realized_vol_avg',
|
||||||
|
'base_idx', 'quote_idx'
|
||||||
|
]
|
||||||
|
|
||||||
|
# Funding features (now available for all 10 assets via OKX)
|
||||||
|
funding_features = [
|
||||||
|
'base_funding', 'quote_funding', 'funding_diff', 'funding_avg'
|
||||||
|
]
|
||||||
|
|
||||||
|
# On-chain features (BTC/ETH only via CryptoQuant)
|
||||||
|
onchain_features = [
|
||||||
|
'base_inflow', 'quote_inflow', 'inflow_ratio'
|
||||||
|
]
|
||||||
|
|
||||||
|
return core_features + funding_features + onchain_features
|
||||||
272
strategies/multi_pair/funding.py
Normal file
272
strategies/multi_pair/funding.py
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
"""
|
||||||
|
Funding Rate Fetcher for Multi-Pair Strategy.
|
||||||
|
|
||||||
|
Fetches historical funding rates from OKX for all assets.
|
||||||
|
CryptoQuant only supports BTC/ETH, so we use OKX for the full universe.
|
||||||
|
"""
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
import ccxt
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
from engine.logging_config import get_logger
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class FundingRateFetcher:
|
||||||
|
"""
|
||||||
|
Fetches and caches funding rate data from OKX.
|
||||||
|
|
||||||
|
OKX funding rates are settled every 8 hours (00:00, 08:00, 16:00 UTC).
|
||||||
|
This fetcher retrieves historical funding rate data and aligns it
|
||||||
|
to hourly candles for use in the multi-pair strategy.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, cache_dir: str = "data/funding"):
|
||||||
|
self.cache_dir = Path(cache_dir)
|
||||||
|
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
self.exchange: ccxt.okx | None = None
|
||||||
|
|
||||||
|
def _init_exchange(self) -> None:
|
||||||
|
"""Initialize OKX exchange connection."""
|
||||||
|
if self.exchange is None:
|
||||||
|
self.exchange = ccxt.okx({
|
||||||
|
'enableRateLimit': True,
|
||||||
|
'options': {'defaultType': 'swap'}
|
||||||
|
})
|
||||||
|
self.exchange.load_markets()
|
||||||
|
|
||||||
|
def fetch_funding_history(
|
||||||
|
self,
|
||||||
|
symbol: str,
|
||||||
|
start_date: str | None = None,
|
||||||
|
end_date: str | None = None,
|
||||||
|
limit: int = 100
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Fetch historical funding rates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Asset symbol (e.g., 'BTC-USDT')
|
||||||
|
start_date: Start date (YYYY-MM-DD)
|
||||||
|
end_date: End date (YYYY-MM-DD)
|
||||||
|
limit: Max records per request
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DataFrame with funding rate history
|
||||||
|
"""
|
||||||
|
self._init_exchange()
|
||||||
|
|
||||||
|
# Convert symbol format
|
||||||
|
base = symbol.replace('-USDT', '')
|
||||||
|
okx_symbol = f"{base}/USDT:USDT"
|
||||||
|
|
||||||
|
try:
|
||||||
|
# OKX funding rate history endpoint
|
||||||
|
# Uses fetch_funding_rate_history if available
|
||||||
|
all_funding = []
|
||||||
|
|
||||||
|
# Parse dates
|
||||||
|
if start_date:
|
||||||
|
since = self.exchange.parse8601(f"{start_date}T00:00:00Z")
|
||||||
|
else:
|
||||||
|
# Default to 1 year ago
|
||||||
|
since = self.exchange.milliseconds() - 365 * 24 * 60 * 60 * 1000
|
||||||
|
|
||||||
|
if end_date:
|
||||||
|
until = self.exchange.parse8601(f"{end_date}T23:59:59Z")
|
||||||
|
else:
|
||||||
|
until = self.exchange.milliseconds()
|
||||||
|
|
||||||
|
# Fetch in batches
|
||||||
|
current_since = since
|
||||||
|
while current_since < until:
|
||||||
|
try:
|
||||||
|
funding = self.exchange.fetch_funding_rate_history(
|
||||||
|
okx_symbol,
|
||||||
|
since=current_since,
|
||||||
|
limit=limit
|
||||||
|
)
|
||||||
|
|
||||||
|
if not funding:
|
||||||
|
break
|
||||||
|
|
||||||
|
all_funding.extend(funding)
|
||||||
|
|
||||||
|
# Move to next batch
|
||||||
|
last_ts = funding[-1]['timestamp']
|
||||||
|
if last_ts <= current_since:
|
||||||
|
break
|
||||||
|
current_since = last_ts + 1
|
||||||
|
|
||||||
|
time.sleep(0.1) # Rate limit
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(
|
||||||
|
"Error fetching funding batch for %s: %s",
|
||||||
|
symbol, str(e)[:50]
|
||||||
|
)
|
||||||
|
break
|
||||||
|
|
||||||
|
if not all_funding:
|
||||||
|
return pd.DataFrame()
|
||||||
|
|
||||||
|
# Convert to DataFrame
|
||||||
|
df = pd.DataFrame(all_funding)
|
||||||
|
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms', utc=True)
|
||||||
|
df.set_index('timestamp', inplace=True)
|
||||||
|
df = df[['fundingRate']].rename(columns={'fundingRate': 'funding_rate'})
|
||||||
|
df.sort_index(inplace=True)
|
||||||
|
|
||||||
|
# Remove duplicates
|
||||||
|
df = df[~df.index.duplicated(keep='first')]
|
||||||
|
|
||||||
|
logger.info("Fetched %d funding records for %s", len(df), symbol)
|
||||||
|
return df
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Failed to fetch funding for %s: %s", symbol, e)
|
||||||
|
return pd.DataFrame()
|
||||||
|
|
||||||
|
def fetch_all_assets(
|
||||||
|
self,
|
||||||
|
assets: list[str],
|
||||||
|
start_date: str | None = None,
|
||||||
|
end_date: str | None = None
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Fetch funding rates for all assets and combine.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
assets: List of asset symbols (e.g., ['BTC-USDT', 'ETH-USDT'])
|
||||||
|
start_date: Start date
|
||||||
|
end_date: End date
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Combined DataFrame with columns like 'btc_funding', 'eth_funding', etc.
|
||||||
|
"""
|
||||||
|
combined = pd.DataFrame()
|
||||||
|
|
||||||
|
for symbol in assets:
|
||||||
|
df = self.fetch_funding_history(symbol, start_date, end_date)
|
||||||
|
|
||||||
|
if df.empty:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Rename column
|
||||||
|
asset_name = symbol.replace('-USDT', '').lower()
|
||||||
|
col_name = f"{asset_name}_funding"
|
||||||
|
df = df.rename(columns={'funding_rate': col_name})
|
||||||
|
|
||||||
|
if combined.empty:
|
||||||
|
combined = df
|
||||||
|
else:
|
||||||
|
combined = combined.join(df, how='outer')
|
||||||
|
|
||||||
|
time.sleep(0.2) # Be nice to API
|
||||||
|
|
||||||
|
# Forward fill to hourly (funding is every 8h)
|
||||||
|
if not combined.empty:
|
||||||
|
combined = combined.sort_index()
|
||||||
|
combined = combined.ffill()
|
||||||
|
|
||||||
|
return combined
|
||||||
|
|
||||||
|
def save_to_cache(self, df: pd.DataFrame, filename: str = "funding_rates.csv") -> None:
|
||||||
|
"""Save funding data to cache file."""
|
||||||
|
path = self.cache_dir / filename
|
||||||
|
df.to_csv(path)
|
||||||
|
logger.info("Saved funding rates to %s", path)
|
||||||
|
|
||||||
|
def load_from_cache(self, filename: str = "funding_rates.csv") -> pd.DataFrame | None:
|
||||||
|
"""Load funding data from cache if available."""
|
||||||
|
path = self.cache_dir / filename
|
||||||
|
if path.exists():
|
||||||
|
df = pd.read_csv(path, index_col='timestamp', parse_dates=True)
|
||||||
|
logger.info("Loaded funding rates from cache: %d rows", len(df))
|
||||||
|
return df
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_funding_data(
|
||||||
|
self,
|
||||||
|
assets: list[str],
|
||||||
|
start_date: str | None = None,
|
||||||
|
end_date: str | None = None,
|
||||||
|
use_cache: bool = True,
|
||||||
|
force_refresh: bool = False
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Get funding data, using cache if available.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
assets: List of asset symbols
|
||||||
|
start_date: Start date
|
||||||
|
end_date: End date
|
||||||
|
use_cache: Whether to use cached data
|
||||||
|
force_refresh: Force refresh even if cache exists
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DataFrame with funding rates for all assets
|
||||||
|
"""
|
||||||
|
cache_file = "funding_rates.csv"
|
||||||
|
|
||||||
|
# Try cache first
|
||||||
|
if use_cache and not force_refresh:
|
||||||
|
cached = self.load_from_cache(cache_file)
|
||||||
|
if cached is not None:
|
||||||
|
# Check if cache covers requested range
|
||||||
|
if start_date and end_date:
|
||||||
|
start_ts = pd.Timestamp(start_date, tz='UTC')
|
||||||
|
end_ts = pd.Timestamp(end_date, tz='UTC')
|
||||||
|
|
||||||
|
if cached.index.min() <= start_ts and cached.index.max() >= end_ts:
|
||||||
|
# Filter to requested range
|
||||||
|
return cached[(cached.index >= start_ts) & (cached.index <= end_ts)]
|
||||||
|
|
||||||
|
# Fetch fresh data
|
||||||
|
logger.info("Fetching fresh funding rate data...")
|
||||||
|
df = self.fetch_all_assets(assets, start_date, end_date)
|
||||||
|
|
||||||
|
if not df.empty and use_cache:
|
||||||
|
self.save_to_cache(df, cache_file)
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
|
def download_funding_data():
|
||||||
|
"""Download funding data for all multi-pair assets."""
|
||||||
|
from strategies.multi_pair.config import MultiPairConfig
|
||||||
|
|
||||||
|
config = MultiPairConfig()
|
||||||
|
fetcher = FundingRateFetcher()
|
||||||
|
|
||||||
|
# Fetch last year of data
|
||||||
|
end_date = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||||
|
start_date = (datetime.now(timezone.utc) - pd.Timedelta(days=365)).strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
logger.info("Downloading funding rates for %d assets...", len(config.assets))
|
||||||
|
logger.info("Date range: %s to %s", start_date, end_date)
|
||||||
|
|
||||||
|
df = fetcher.get_funding_data(
|
||||||
|
config.assets,
|
||||||
|
start_date=start_date,
|
||||||
|
end_date=end_date,
|
||||||
|
force_refresh=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not df.empty:
|
||||||
|
logger.info("Downloaded %d funding rate records", len(df))
|
||||||
|
logger.info("Columns: %s", list(df.columns))
|
||||||
|
else:
|
||||||
|
logger.warning("No funding data downloaded")
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
from engine.logging_config import setup_logging
|
||||||
|
setup_logging()
|
||||||
|
download_funding_data()
|
||||||
168
strategies/multi_pair/pair_scanner.py
Normal file
168
strategies/multi_pair/pair_scanner.py
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
"""
|
||||||
|
Pair Scanner for Multi-Pair Divergence Strategy.
|
||||||
|
|
||||||
|
Generates all possible pairs from asset universe and checks tradeability.
|
||||||
|
"""
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from itertools import combinations
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import ccxt
|
||||||
|
|
||||||
|
from engine.logging_config import get_logger
|
||||||
|
from .config import MultiPairConfig
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TradingPair:
|
||||||
|
"""
|
||||||
|
Represents a tradeable pair for spread analysis.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
base_asset: First asset in the pair (numerator)
|
||||||
|
quote_asset: Second asset in the pair (denominator)
|
||||||
|
pair_id: Unique identifier for the pair
|
||||||
|
is_direct: Whether pair can be traded directly on exchange
|
||||||
|
exchange_symbol: Symbol for direct trading (if available)
|
||||||
|
"""
|
||||||
|
base_asset: str
|
||||||
|
quote_asset: str
|
||||||
|
pair_id: str
|
||||||
|
is_direct: bool = False
|
||||||
|
exchange_symbol: Optional[str] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self) -> str:
|
||||||
|
"""Human-readable pair name."""
|
||||||
|
return f"{self.base_asset}/{self.quote_asset}"
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash(self.pair_id)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not isinstance(other, TradingPair):
|
||||||
|
return False
|
||||||
|
return self.pair_id == other.pair_id
|
||||||
|
|
||||||
|
|
||||||
|
class PairScanner:
|
||||||
|
"""
|
||||||
|
Scans and generates tradeable pairs from asset universe.
|
||||||
|
|
||||||
|
Checks OKX for directly tradeable cross-pairs and generates
|
||||||
|
synthetic pairs via USDT for others.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config: MultiPairConfig):
|
||||||
|
self.config = config
|
||||||
|
self.exchange: Optional[ccxt.Exchange] = None
|
||||||
|
self._available_markets: set[str] = set()
|
||||||
|
|
||||||
|
def _init_exchange(self) -> None:
|
||||||
|
"""Initialize exchange connection for market lookup."""
|
||||||
|
if self.exchange is None:
|
||||||
|
exchange_class = getattr(ccxt, self.config.exchange_id)
|
||||||
|
self.exchange = exchange_class({'enableRateLimit': True})
|
||||||
|
self.exchange.load_markets()
|
||||||
|
self._available_markets = set(self.exchange.symbols)
|
||||||
|
logger.info(
|
||||||
|
"Loaded %d markets from %s",
|
||||||
|
len(self._available_markets),
|
||||||
|
self.config.exchange_id
|
||||||
|
)
|
||||||
|
|
||||||
|
def generate_pairs(self, check_exchange: bool = True) -> list[TradingPair]:
|
||||||
|
"""
|
||||||
|
Generate all unique pairs from asset universe.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
check_exchange: Whether to check OKX for direct trading
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of TradingPair objects
|
||||||
|
"""
|
||||||
|
if check_exchange:
|
||||||
|
self._init_exchange()
|
||||||
|
|
||||||
|
pairs = []
|
||||||
|
assets = self.config.assets
|
||||||
|
|
||||||
|
for base, quote in combinations(assets, 2):
|
||||||
|
pair_id = f"{base}__{quote}"
|
||||||
|
|
||||||
|
# Check if directly tradeable as cross-pair on OKX
|
||||||
|
is_direct = False
|
||||||
|
exchange_symbol = None
|
||||||
|
|
||||||
|
if check_exchange:
|
||||||
|
# Check perpetual cross-pair (e.g., ETH/BTC:BTC)
|
||||||
|
# OKX perpetuals are typically quoted in USDT
|
||||||
|
# Cross-pairs like ETH/BTC are less common
|
||||||
|
cross_symbol = f"{base.replace('-USDT', '')}/{quote.replace('-USDT', '')}:USDT"
|
||||||
|
if cross_symbol in self._available_markets:
|
||||||
|
is_direct = True
|
||||||
|
exchange_symbol = cross_symbol
|
||||||
|
|
||||||
|
pair = TradingPair(
|
||||||
|
base_asset=base,
|
||||||
|
quote_asset=quote,
|
||||||
|
pair_id=pair_id,
|
||||||
|
is_direct=is_direct,
|
||||||
|
exchange_symbol=exchange_symbol
|
||||||
|
)
|
||||||
|
pairs.append(pair)
|
||||||
|
|
||||||
|
# Log summary
|
||||||
|
direct_count = sum(1 for p in pairs if p.is_direct)
|
||||||
|
logger.info(
|
||||||
|
"Generated %d pairs: %d direct, %d synthetic",
|
||||||
|
len(pairs), direct_count, len(pairs) - direct_count
|
||||||
|
)
|
||||||
|
|
||||||
|
return pairs
|
||||||
|
|
||||||
|
def get_required_symbols(self, pairs: list[TradingPair]) -> list[str]:
|
||||||
|
"""
|
||||||
|
Get list of symbols needed to calculate all pair spreads.
|
||||||
|
|
||||||
|
For synthetic pairs, we need both USDT pairs.
|
||||||
|
For direct pairs, we still load USDT pairs for simplicity.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pairs: List of trading pairs
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of unique symbols to load (e.g., ['BTC-USDT', 'ETH-USDT'])
|
||||||
|
"""
|
||||||
|
symbols = set()
|
||||||
|
for pair in pairs:
|
||||||
|
symbols.add(pair.base_asset)
|
||||||
|
symbols.add(pair.quote_asset)
|
||||||
|
return list(symbols)
|
||||||
|
|
||||||
|
def filter_by_assets(
|
||||||
|
self,
|
||||||
|
pairs: list[TradingPair],
|
||||||
|
exclude_assets: list[str]
|
||||||
|
) -> list[TradingPair]:
|
||||||
|
"""
|
||||||
|
Filter pairs that contain any of the excluded assets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pairs: List of trading pairs
|
||||||
|
exclude_assets: Assets to exclude
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Filtered list of pairs
|
||||||
|
"""
|
||||||
|
if not exclude_assets:
|
||||||
|
return pairs
|
||||||
|
|
||||||
|
exclude_set = set(exclude_assets)
|
||||||
|
return [
|
||||||
|
p for p in pairs
|
||||||
|
if p.base_asset not in exclude_set
|
||||||
|
and p.quote_asset not in exclude_set
|
||||||
|
]
|
||||||
525
strategies/multi_pair/strategy.py
Normal file
525
strategies/multi_pair/strategy.py
Normal file
@@ -0,0 +1,525 @@
|
|||||||
|
"""
|
||||||
|
Multi-Pair Divergence Selection Strategy.
|
||||||
|
|
||||||
|
Main strategy class that orchestrates pair scanning, feature calculation,
|
||||||
|
model training, and signal generation for backtesting.
|
||||||
|
"""
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from strategies.base import BaseStrategy
|
||||||
|
from engine.market import MarketType
|
||||||
|
from engine.logging_config import get_logger
|
||||||
|
from .config import MultiPairConfig
|
||||||
|
from .pair_scanner import PairScanner, TradingPair
|
||||||
|
from .correlation import CorrelationFilter
|
||||||
|
from .feature_engine import MultiPairFeatureEngine
|
||||||
|
from .divergence_scorer import DivergenceScorer, DivergenceSignal
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PositionState:
|
||||||
|
"""Tracks current position state."""
|
||||||
|
pair: TradingPair | None = None
|
||||||
|
direction: str | None = None # 'long' or 'short'
|
||||||
|
entry_price: float = 0.0
|
||||||
|
entry_idx: int = -1
|
||||||
|
stop_loss: float = 0.0
|
||||||
|
take_profit: float = 0.0
|
||||||
|
atr: float = 0.0 # ATR at entry for reference
|
||||||
|
last_exit_idx: int = -100 # For cooldown tracking
|
||||||
|
|
||||||
|
|
||||||
|
class MultiPairDivergenceStrategy(BaseStrategy):
|
||||||
|
"""
|
||||||
|
Multi-Pair Divergence Selection Strategy.
|
||||||
|
|
||||||
|
Scans multiple cryptocurrency pairs for spread divergence,
|
||||||
|
selects the most divergent pair using ML-enhanced scoring,
|
||||||
|
and trades mean-reversion opportunities.
|
||||||
|
|
||||||
|
Key Features:
|
||||||
|
- Universal ML model across all pairs
|
||||||
|
- Correlation-based pair filtering
|
||||||
|
- Dynamic SL/TP based on volatility
|
||||||
|
- Walk-forward training
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
config: MultiPairConfig | None = None,
|
||||||
|
model_path: str = "data/multi_pair_model.pkl"
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.config = config or MultiPairConfig()
|
||||||
|
|
||||||
|
# Initialize components
|
||||||
|
self.pair_scanner = PairScanner(self.config)
|
||||||
|
self.correlation_filter = CorrelationFilter(self.config)
|
||||||
|
self.feature_engine = MultiPairFeatureEngine(self.config)
|
||||||
|
self.divergence_scorer = DivergenceScorer(self.config, model_path)
|
||||||
|
|
||||||
|
# Strategy configuration
|
||||||
|
self.default_market_type = MarketType.PERPETUAL
|
||||||
|
self.default_leverage = 1
|
||||||
|
|
||||||
|
# Runtime state
|
||||||
|
self.pairs: list[TradingPair] = []
|
||||||
|
self.asset_data: dict[str, pd.DataFrame] = {}
|
||||||
|
self.pair_features: dict[str, pd.DataFrame] = {}
|
||||||
|
self.position = PositionState()
|
||||||
|
self.train_end_idx: int = 0
|
||||||
|
|
||||||
|
def run(self, close: pd.Series, **kwargs) -> tuple:
|
||||||
|
"""
|
||||||
|
Execute the multi-pair divergence strategy.
|
||||||
|
|
||||||
|
This method is called by the backtester with the primary asset's
|
||||||
|
close prices. For multi-pair, we load all assets internally.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
close: Primary close prices (used for index alignment)
|
||||||
|
**kwargs: Additional data (high, low, volume)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (long_entries, long_exits, short_entries, short_exits, size)
|
||||||
|
"""
|
||||||
|
logger.info("Starting Multi-Pair Divergence Strategy")
|
||||||
|
|
||||||
|
# 1. Load all asset data
|
||||||
|
start_date = close.index.min().strftime("%Y-%m-%d")
|
||||||
|
end_date = close.index.max().strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
self.asset_data = self.feature_engine.load_all_assets(
|
||||||
|
start_date=start_date,
|
||||||
|
end_date=end_date
|
||||||
|
)
|
||||||
|
|
||||||
|
# 1b. Load funding rate data for all assets
|
||||||
|
self.feature_engine.load_funding_data(
|
||||||
|
start_date=start_date,
|
||||||
|
end_date=end_date,
|
||||||
|
use_cache=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(self.asset_data) < 2:
|
||||||
|
logger.error("Insufficient assets loaded, need at least 2")
|
||||||
|
return self._empty_signals(close)
|
||||||
|
|
||||||
|
# 2. Generate pairs
|
||||||
|
self.pairs = self.pair_scanner.generate_pairs(check_exchange=False)
|
||||||
|
|
||||||
|
# Filter to pairs with available data
|
||||||
|
available_assets = set(self.asset_data.keys())
|
||||||
|
self.pairs = [
|
||||||
|
p for p in self.pairs
|
||||||
|
if p.base_asset in available_assets
|
||||||
|
and p.quote_asset in available_assets
|
||||||
|
]
|
||||||
|
|
||||||
|
logger.info("Trading %d pairs from %d assets", len(self.pairs), len(self.asset_data))
|
||||||
|
|
||||||
|
# 3. Calculate features for all pairs
|
||||||
|
self.pair_features = self.feature_engine.calculate_all_pair_features(
|
||||||
|
self.pairs, self.asset_data
|
||||||
|
)
|
||||||
|
|
||||||
|
if not self.pair_features:
|
||||||
|
logger.error("No pair features calculated")
|
||||||
|
return self._empty_signals(close)
|
||||||
|
|
||||||
|
# 4. Align to common index
|
||||||
|
common_index = self._get_common_index()
|
||||||
|
if len(common_index) < 200:
|
||||||
|
logger.error("Insufficient common data across pairs")
|
||||||
|
return self._empty_signals(close)
|
||||||
|
|
||||||
|
# 5. Walk-forward split
|
||||||
|
n_samples = len(common_index)
|
||||||
|
train_size = int(n_samples * self.config.train_ratio)
|
||||||
|
self.train_end_idx = train_size
|
||||||
|
|
||||||
|
train_end_date = common_index[train_size - 1]
|
||||||
|
test_start_date = common_index[train_size]
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Walk-Forward Split: Train=%d bars (until %s), Test=%d bars (from %s)",
|
||||||
|
train_size, train_end_date.strftime('%Y-%m-%d'),
|
||||||
|
n_samples - train_size, test_start_date.strftime('%Y-%m-%d')
|
||||||
|
)
|
||||||
|
|
||||||
|
# 6. Train model on training period
|
||||||
|
if self.divergence_scorer.model is None:
|
||||||
|
train_features = {
|
||||||
|
pid: feat[feat.index <= train_end_date]
|
||||||
|
for pid, feat in self.pair_features.items()
|
||||||
|
}
|
||||||
|
combined = self.feature_engine.get_combined_features(train_features)
|
||||||
|
self.divergence_scorer.train_model(combined, train_features)
|
||||||
|
|
||||||
|
# 7. Generate signals for test period
|
||||||
|
return self._generate_signals(common_index, train_size, close)
|
||||||
|
|
||||||
|
def _generate_signals(
|
||||||
|
self,
|
||||||
|
index: pd.DatetimeIndex,
|
||||||
|
train_size: int,
|
||||||
|
reference_close: pd.Series
|
||||||
|
) -> tuple:
|
||||||
|
"""
|
||||||
|
Generate entry/exit signals for the test period.
|
||||||
|
|
||||||
|
Iterates through each bar in the test period, scoring pairs
|
||||||
|
and generating signals based on divergence scores.
|
||||||
|
"""
|
||||||
|
# Initialize signal arrays aligned to reference close
|
||||||
|
long_entries = pd.Series(False, index=reference_close.index)
|
||||||
|
long_exits = pd.Series(False, index=reference_close.index)
|
||||||
|
short_entries = pd.Series(False, index=reference_close.index)
|
||||||
|
short_exits = pd.Series(False, index=reference_close.index)
|
||||||
|
size = pd.Series(1.0, index=reference_close.index)
|
||||||
|
|
||||||
|
# Track position state
|
||||||
|
self.position = PositionState()
|
||||||
|
|
||||||
|
# Price data for correlation calculation
|
||||||
|
price_data = {
|
||||||
|
symbol: df['close'] for symbol, df in self.asset_data.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
# Iterate through test period
|
||||||
|
test_indices = index[train_size:]
|
||||||
|
|
||||||
|
trade_count = 0
|
||||||
|
|
||||||
|
for i, timestamp in enumerate(test_indices):
|
||||||
|
current_idx = train_size + i
|
||||||
|
|
||||||
|
# Check exit conditions first
|
||||||
|
if self.position.pair is not None:
|
||||||
|
# Enforce minimum hold period
|
||||||
|
bars_held = current_idx - self.position.entry_idx
|
||||||
|
if bars_held < self.config.min_hold_bars:
|
||||||
|
# Only allow SL/TP exits during min hold period
|
||||||
|
should_exit, exit_reason = self._check_sl_tp_only(timestamp)
|
||||||
|
else:
|
||||||
|
should_exit, exit_reason = self._check_exit(timestamp)
|
||||||
|
|
||||||
|
if should_exit:
|
||||||
|
# Map exit signal to reference index
|
||||||
|
if timestamp in reference_close.index:
|
||||||
|
if self.position.direction == 'long':
|
||||||
|
long_exits.loc[timestamp] = True
|
||||||
|
else:
|
||||||
|
short_exits.loc[timestamp] = True
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
"Exit %s %s at %s: %s (held %d bars)",
|
||||||
|
self.position.direction,
|
||||||
|
self.position.pair.name,
|
||||||
|
timestamp.strftime('%Y-%m-%d %H:%M'),
|
||||||
|
exit_reason,
|
||||||
|
bars_held
|
||||||
|
)
|
||||||
|
self.position = PositionState(last_exit_idx=current_idx)
|
||||||
|
|
||||||
|
# Score pairs (with correlation filter if position exists)
|
||||||
|
held_asset = None
|
||||||
|
if self.position.pair is not None:
|
||||||
|
held_asset = self.position.pair.base_asset
|
||||||
|
|
||||||
|
# Filter pairs by correlation
|
||||||
|
candidate_pairs = self.correlation_filter.filter_pairs(
|
||||||
|
self.pairs,
|
||||||
|
held_asset,
|
||||||
|
price_data,
|
||||||
|
current_idx
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get candidate features
|
||||||
|
candidate_features = {
|
||||||
|
pid: feat for pid, feat in self.pair_features.items()
|
||||||
|
if any(p.pair_id == pid for p in candidate_pairs)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Score pairs
|
||||||
|
signals = self.divergence_scorer.score_pairs(
|
||||||
|
candidate_features,
|
||||||
|
candidate_pairs,
|
||||||
|
timestamp
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get best signal
|
||||||
|
best = self.divergence_scorer.select_best_pair(signals)
|
||||||
|
|
||||||
|
if best is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if we should switch positions or enter new
|
||||||
|
should_enter = False
|
||||||
|
|
||||||
|
# Check cooldown
|
||||||
|
bars_since_exit = current_idx - self.position.last_exit_idx
|
||||||
|
in_cooldown = bars_since_exit < self.config.cooldown_bars
|
||||||
|
|
||||||
|
if self.position.pair is None and not in_cooldown:
|
||||||
|
# No position and not in cooldown, can enter
|
||||||
|
should_enter = True
|
||||||
|
elif self.position.pair is not None:
|
||||||
|
# Check if we should switch (requires min hold + significant improvement)
|
||||||
|
bars_held = current_idx - self.position.entry_idx
|
||||||
|
current_score = self._get_current_score(timestamp)
|
||||||
|
|
||||||
|
if (bars_held >= self.config.min_hold_bars and
|
||||||
|
best.divergence_score > current_score * self.config.switch_threshold):
|
||||||
|
# New opportunity is significantly better
|
||||||
|
if timestamp in reference_close.index:
|
||||||
|
if self.position.direction == 'long':
|
||||||
|
long_exits.loc[timestamp] = True
|
||||||
|
else:
|
||||||
|
short_exits.loc[timestamp] = True
|
||||||
|
self.position = PositionState(last_exit_idx=current_idx)
|
||||||
|
should_enter = True
|
||||||
|
|
||||||
|
if should_enter:
|
||||||
|
# Calculate ATR-based dynamic SL/TP
|
||||||
|
sl_price, tp_price = self._calculate_sl_tp(
|
||||||
|
best.base_price,
|
||||||
|
best.direction,
|
||||||
|
best.atr,
|
||||||
|
best.atr_pct
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set position
|
||||||
|
self.position = PositionState(
|
||||||
|
pair=best.pair,
|
||||||
|
direction=best.direction,
|
||||||
|
entry_price=best.base_price,
|
||||||
|
entry_idx=current_idx,
|
||||||
|
stop_loss=sl_price,
|
||||||
|
take_profit=tp_price,
|
||||||
|
atr=best.atr
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate position size based on divergence
|
||||||
|
pos_size = self._calculate_size(best.divergence_score)
|
||||||
|
|
||||||
|
# Generate entry signal
|
||||||
|
if timestamp in reference_close.index:
|
||||||
|
if best.direction == 'long':
|
||||||
|
long_entries.loc[timestamp] = True
|
||||||
|
else:
|
||||||
|
short_entries.loc[timestamp] = True
|
||||||
|
size.loc[timestamp] = pos_size
|
||||||
|
|
||||||
|
trade_count += 1
|
||||||
|
logger.debug(
|
||||||
|
"Entry %s %s at %s: z=%.2f, prob=%.2f, score=%.3f",
|
||||||
|
best.direction,
|
||||||
|
best.pair.name,
|
||||||
|
timestamp.strftime('%Y-%m-%d %H:%M'),
|
||||||
|
best.z_score,
|
||||||
|
best.probability,
|
||||||
|
best.divergence_score
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Generated %d trades in test period", trade_count)
|
||||||
|
|
||||||
|
return long_entries, long_exits, short_entries, short_exits, size
|
||||||
|
|
||||||
|
def _check_exit(self, timestamp: pd.Timestamp) -> tuple[bool, str]:
|
||||||
|
"""
|
||||||
|
Check if current position should be exited.
|
||||||
|
|
||||||
|
Exit conditions:
|
||||||
|
1. Z-Score reverted to mean (|Z| < threshold)
|
||||||
|
2. Stop-loss hit
|
||||||
|
3. Take-profit hit
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (should_exit, reason)
|
||||||
|
"""
|
||||||
|
if self.position.pair is None:
|
||||||
|
return False, ""
|
||||||
|
|
||||||
|
pair_id = self.position.pair.pair_id
|
||||||
|
if pair_id not in self.pair_features:
|
||||||
|
return True, "pair_data_missing"
|
||||||
|
|
||||||
|
features = self.pair_features[pair_id]
|
||||||
|
valid = features[features.index <= timestamp]
|
||||||
|
|
||||||
|
if len(valid) == 0:
|
||||||
|
return True, "no_data"
|
||||||
|
|
||||||
|
latest = valid.iloc[-1]
|
||||||
|
z_score = latest['z_score']
|
||||||
|
current_price = latest['base_close']
|
||||||
|
|
||||||
|
# Check mean reversion (primary exit)
|
||||||
|
if abs(z_score) < self.config.z_exit_threshold:
|
||||||
|
return True, f"mean_reversion (z={z_score:.2f})"
|
||||||
|
|
||||||
|
# Check SL/TP
|
||||||
|
return self._check_sl_tp(current_price)
|
||||||
|
|
||||||
|
def _check_sl_tp_only(self, timestamp: pd.Timestamp) -> tuple[bool, str]:
|
||||||
|
"""
|
||||||
|
Check only stop-loss and take-profit conditions.
|
||||||
|
Used during minimum hold period.
|
||||||
|
"""
|
||||||
|
if self.position.pair is None:
|
||||||
|
return False, ""
|
||||||
|
|
||||||
|
pair_id = self.position.pair.pair_id
|
||||||
|
if pair_id not in self.pair_features:
|
||||||
|
return True, "pair_data_missing"
|
||||||
|
|
||||||
|
features = self.pair_features[pair_id]
|
||||||
|
valid = features[features.index <= timestamp]
|
||||||
|
|
||||||
|
if len(valid) == 0:
|
||||||
|
return True, "no_data"
|
||||||
|
|
||||||
|
latest = valid.iloc[-1]
|
||||||
|
current_price = latest['base_close']
|
||||||
|
|
||||||
|
return self._check_sl_tp(current_price)
|
||||||
|
|
||||||
|
def _check_sl_tp(self, current_price: float) -> tuple[bool, str]:
|
||||||
|
"""Check stop-loss and take-profit levels."""
|
||||||
|
if self.position.direction == 'long':
|
||||||
|
if current_price <= self.position.stop_loss:
|
||||||
|
return True, f"stop_loss ({current_price:.2f} <= {self.position.stop_loss:.2f})"
|
||||||
|
if current_price >= self.position.take_profit:
|
||||||
|
return True, f"take_profit ({current_price:.2f} >= {self.position.take_profit:.2f})"
|
||||||
|
else: # short
|
||||||
|
if current_price >= self.position.stop_loss:
|
||||||
|
return True, f"stop_loss ({current_price:.2f} >= {self.position.stop_loss:.2f})"
|
||||||
|
if current_price <= self.position.take_profit:
|
||||||
|
return True, f"take_profit ({current_price:.2f} <= {self.position.take_profit:.2f})"
|
||||||
|
|
||||||
|
return False, ""
|
||||||
|
|
||||||
|
def _get_current_score(self, timestamp: pd.Timestamp) -> float:
|
||||||
|
"""Get current position's divergence score for comparison."""
|
||||||
|
if self.position.pair is None:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
pair_id = self.position.pair.pair_id
|
||||||
|
if pair_id not in self.pair_features:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
features = self.pair_features[pair_id]
|
||||||
|
valid = features[features.index <= timestamp]
|
||||||
|
|
||||||
|
if len(valid) == 0:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
latest = valid.iloc[-1]
|
||||||
|
z_score = abs(latest['z_score'])
|
||||||
|
|
||||||
|
# Re-score with model
|
||||||
|
if self.divergence_scorer.model is not None:
|
||||||
|
feature_row = latest[self.divergence_scorer.feature_cols].fillna(0)
|
||||||
|
feature_row = feature_row.replace([np.inf, -np.inf], 0)
|
||||||
|
X = pd.DataFrame(
|
||||||
|
[feature_row.values],
|
||||||
|
columns=self.divergence_scorer.feature_cols
|
||||||
|
)
|
||||||
|
prob = self.divergence_scorer.model.predict_proba(X)[0, 1]
|
||||||
|
return z_score * prob
|
||||||
|
|
||||||
|
return z_score * 0.5
|
||||||
|
|
||||||
|
def _calculate_sl_tp(
|
||||||
|
self,
|
||||||
|
entry_price: float,
|
||||||
|
direction: str,
|
||||||
|
atr: float,
|
||||||
|
atr_pct: float
|
||||||
|
) -> tuple[float, float]:
|
||||||
|
"""
|
||||||
|
Calculate ATR-based dynamic stop-loss and take-profit prices.
|
||||||
|
|
||||||
|
Uses ATR (Average True Range) to set stops that adapt to
|
||||||
|
each asset's volatility. More volatile assets get wider stops.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
entry_price: Entry price
|
||||||
|
direction: 'long' or 'short'
|
||||||
|
atr: ATR in price units
|
||||||
|
atr_pct: ATR as percentage of price
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (stop_loss_price, take_profit_price)
|
||||||
|
"""
|
||||||
|
# Calculate SL/TP as ATR multiples
|
||||||
|
if atr > 0 and atr_pct > 0:
|
||||||
|
# ATR-based calculation
|
||||||
|
sl_distance = atr * self.config.sl_atr_multiplier
|
||||||
|
tp_distance = atr * self.config.tp_atr_multiplier
|
||||||
|
|
||||||
|
# Convert to percentage for bounds checking
|
||||||
|
sl_pct = sl_distance / entry_price
|
||||||
|
tp_pct = tp_distance / entry_price
|
||||||
|
else:
|
||||||
|
# Fallback to fixed percentages if ATR unavailable
|
||||||
|
sl_pct = self.config.base_sl_pct
|
||||||
|
tp_pct = self.config.base_tp_pct
|
||||||
|
|
||||||
|
# Apply bounds to prevent extreme stops
|
||||||
|
sl_pct = max(self.config.min_sl_pct, min(sl_pct, self.config.max_sl_pct))
|
||||||
|
tp_pct = max(self.config.min_tp_pct, min(tp_pct, self.config.max_tp_pct))
|
||||||
|
|
||||||
|
# Calculate actual prices
|
||||||
|
if direction == 'long':
|
||||||
|
stop_loss = entry_price * (1 - sl_pct)
|
||||||
|
take_profit = entry_price * (1 + tp_pct)
|
||||||
|
else: # short
|
||||||
|
stop_loss = entry_price * (1 + sl_pct)
|
||||||
|
take_profit = entry_price * (1 - tp_pct)
|
||||||
|
|
||||||
|
return stop_loss, take_profit
|
||||||
|
|
||||||
|
def _calculate_size(self, divergence_score: float) -> float:
|
||||||
|
"""
|
||||||
|
Calculate position size based on divergence score.
|
||||||
|
|
||||||
|
Higher divergence = larger position (up to 2x).
|
||||||
|
"""
|
||||||
|
# Base score threshold (Z=1.0, prob=0.5 -> score=0.5)
|
||||||
|
base_threshold = 0.5
|
||||||
|
|
||||||
|
# Scale factor
|
||||||
|
if divergence_score <= base_threshold:
|
||||||
|
return 1.0
|
||||||
|
|
||||||
|
# Linear scaling: 1.0 at threshold, up to 2.0 at 2x threshold
|
||||||
|
scale = 1.0 + (divergence_score - base_threshold) / base_threshold
|
||||||
|
return min(scale, 2.0)
|
||||||
|
|
||||||
|
def _get_common_index(self) -> pd.DatetimeIndex:
|
||||||
|
"""Get the intersection of all pair feature indices."""
|
||||||
|
if not self.pair_features:
|
||||||
|
return pd.DatetimeIndex([])
|
||||||
|
|
||||||
|
common = None
|
||||||
|
for features in self.pair_features.values():
|
||||||
|
if common is None:
|
||||||
|
common = features.index
|
||||||
|
else:
|
||||||
|
common = common.intersection(features.index)
|
||||||
|
|
||||||
|
return common.sort_values()
|
||||||
|
|
||||||
|
def _empty_signals(self, close: pd.Series) -> tuple:
|
||||||
|
"""Return empty signal arrays."""
|
||||||
|
empty = self.create_empty_signals(close)
|
||||||
|
size = pd.Series(1.0, index=close.index)
|
||||||
|
return empty, empty, empty, empty, size
|
||||||
365
strategies/regime_strategy.py
Normal file
365
strategies/regime_strategy.py
Normal file
@@ -0,0 +1,365 @@
|
|||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import ta
|
||||||
|
import vectorbt as vbt
|
||||||
|
from sklearn.ensemble import RandomForestClassifier
|
||||||
|
|
||||||
|
from strategies.base import BaseStrategy
|
||||||
|
from engine.market import MarketType
|
||||||
|
from engine.data_manager import DataManager
|
||||||
|
from engine.logging_config import get_logger
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
class RegimeReversionStrategy(BaseStrategy):
|
||||||
|
"""
|
||||||
|
ML-Based Regime Detection & Mean Reversion Strategy.
|
||||||
|
|
||||||
|
Logic:
|
||||||
|
1. Tracks the BTC/ETH Spread and its Z-Score (24h window).
|
||||||
|
2. Uses a Random Forest model to predict if an extreme Z-Score will revert profitably.
|
||||||
|
3. Features: Spread Technicals (RSI, ROC) + On-Chain Flows (Inflow, Funding).
|
||||||
|
4. Entry: When Model Probability > 0.5.
|
||||||
|
5. Exit: Z-Score reversion to 0 or SL/TP.
|
||||||
|
|
||||||
|
Walk-Forward Training:
|
||||||
|
- Trains on first `train_ratio` of data (default 70%)
|
||||||
|
- Generates signals only for remaining test period (30%)
|
||||||
|
- Eliminates look-ahead bias for realistic backtest results
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Optimal parameters from walk-forward research (2025-10 to 2025-12)
|
||||||
|
# Research: research/horizon_optimization_results.csv
|
||||||
|
OPTIMAL_HORIZON = 102 # 4.25 days - best Net PnL (+232%)
|
||||||
|
OPTIMAL_Z_WINDOW = 24 # 24h rolling window for spread Z-score
|
||||||
|
OPTIMAL_TRAIN_RATIO = 0.7 # 70% train / 30% test split
|
||||||
|
OPTIMAL_PROFIT_TARGET = 0.005 # 0.5% profit threshold for target definition
|
||||||
|
OPTIMAL_Z_ENTRY = 1.0 # Enter when |Z| > 1.0
|
||||||
|
|
||||||
|
def __init__(self,
|
||||||
|
model_path: str = "data/regime_model.pkl",
|
||||||
|
horizon: int = OPTIMAL_HORIZON,
|
||||||
|
z_window: int = OPTIMAL_Z_WINDOW,
|
||||||
|
z_entry_threshold: float = OPTIMAL_Z_ENTRY,
|
||||||
|
profit_target: float = OPTIMAL_PROFIT_TARGET,
|
||||||
|
stop_loss: float = 0.06, # 6% - accommodates 1.95% avg MAE
|
||||||
|
take_profit: float = 0.05, # 5% swing target
|
||||||
|
train_ratio: float = OPTIMAL_TRAIN_RATIO,
|
||||||
|
trend_window: int = 0, # Disable SMA filter
|
||||||
|
use_funding_filter: bool = True, # Enable Funding Rate filter
|
||||||
|
funding_threshold: float = 0.005 # Tightened to 0.005%
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.model_path = model_path
|
||||||
|
self.horizon = horizon
|
||||||
|
self.z_window = z_window
|
||||||
|
self.z_entry_threshold = z_entry_threshold
|
||||||
|
self.profit_target = profit_target
|
||||||
|
self.stop_loss = stop_loss
|
||||||
|
self.take_profit = take_profit
|
||||||
|
self.train_ratio = train_ratio
|
||||||
|
self.trend_window = trend_window
|
||||||
|
self.use_funding_filter = use_funding_filter
|
||||||
|
self.funding_threshold = funding_threshold
|
||||||
|
|
||||||
|
# Default Strategy Config
|
||||||
|
self.default_market_type = MarketType.PERPETUAL
|
||||||
|
self.default_leverage = 1
|
||||||
|
|
||||||
|
self.dm = DataManager()
|
||||||
|
self.model = None
|
||||||
|
self.feature_cols = None
|
||||||
|
self.train_end_idx = None # Will store the training cutoff point
|
||||||
|
|
||||||
|
def run(self, close, **kwargs):
|
||||||
|
"""
|
||||||
|
Execute the strategy logic.
|
||||||
|
We assume this strategy is run on ETH-USDT (the active asset).
|
||||||
|
We will fetch BTC-USDT internally to calculate the spread.
|
||||||
|
"""
|
||||||
|
# 1. Identify Context
|
||||||
|
# We need BTC data aligned with the incoming ETH 'close' series
|
||||||
|
start_date = close.index.min()
|
||||||
|
end_date = close.index.max()
|
||||||
|
|
||||||
|
logger.info("Fetching BTC context data...")
|
||||||
|
try:
|
||||||
|
# Load BTC data (Context) - Must match the timeframe of the backtest
|
||||||
|
# Research was done on 1h candles, so strategy should be run on 1h
|
||||||
|
# Use PERPETUAL data to match the trading instrument (ETH Perp)
|
||||||
|
df_btc = self.dm.load_data("okx", "BTC-USDT", "1h", MarketType.PERPETUAL)
|
||||||
|
|
||||||
|
# Align BTC to ETH (close)
|
||||||
|
df_btc = df_btc.reindex(close.index, method='ffill')
|
||||||
|
btc_close = df_btc['close']
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to load BTC context: {e}")
|
||||||
|
empty = self.create_empty_signals(close)
|
||||||
|
return empty, empty, empty, empty
|
||||||
|
|
||||||
|
# 2. Construct DataFrames for Feature Engineering
|
||||||
|
# We need volume/high/low for features, but 'run' signature primarily gives 'close'.
|
||||||
|
# kwargs might have high/low/volume if passed by Backtester.run_strategy
|
||||||
|
eth_vol = kwargs.get('volume')
|
||||||
|
|
||||||
|
if eth_vol is None:
|
||||||
|
logger.warning("Volume data missing. Feature calculation might fail.")
|
||||||
|
# Fallback or error handling
|
||||||
|
eth_vol = pd.Series(0, index=close.index)
|
||||||
|
|
||||||
|
# Construct dummy dfs for prepare_features
|
||||||
|
# We only really need Close and Volume for the current feature set
|
||||||
|
df_a = pd.DataFrame({'close': btc_close, 'volume': df_btc['volume']})
|
||||||
|
df_b = pd.DataFrame({'close': close, 'volume': eth_vol})
|
||||||
|
|
||||||
|
# 3. Load On-Chain Data (CryptoQuant)
|
||||||
|
# We use the saved CSV for training/inference
|
||||||
|
# In a live setting, this would query the API for recent data
|
||||||
|
cq_df = None
|
||||||
|
try:
|
||||||
|
cq_path = "data/cq_training_data.csv"
|
||||||
|
cq_df = pd.read_csv(cq_path, index_col='timestamp', parse_dates=True)
|
||||||
|
if cq_df.index.tz is None:
|
||||||
|
cq_df.index = cq_df.index.tz_localize('UTC')
|
||||||
|
except Exception:
|
||||||
|
logger.warning("CryptoQuant data not found. Running without on-chain features.")
|
||||||
|
|
||||||
|
# 4. Calculate Features
|
||||||
|
features = self.prepare_features(df_a, df_b, cq_df)
|
||||||
|
|
||||||
|
# 5. Walk-Forward Split
|
||||||
|
# Train on first `train_ratio` of data, test on remainder
|
||||||
|
n_samples = len(features)
|
||||||
|
train_size = int(n_samples * self.train_ratio)
|
||||||
|
|
||||||
|
train_features = features.iloc[:train_size]
|
||||||
|
test_features = features.iloc[train_size:]
|
||||||
|
|
||||||
|
train_end_date = train_features.index[-1]
|
||||||
|
test_start_date = test_features.index[0]
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Walk-Forward Split: Train={len(train_features)} bars "
|
||||||
|
f"(until {train_end_date.strftime('%Y-%m-%d')}), "
|
||||||
|
f"Test={len(test_features)} bars "
|
||||||
|
f"(from {test_start_date.strftime('%Y-%m-%d')})"
|
||||||
|
)
|
||||||
|
|
||||||
|
# 6. Train Model on Training Period ONLY
|
||||||
|
if self.model is None:
|
||||||
|
logger.info("Training Regime Model on training period only...")
|
||||||
|
self.model, self.feature_cols = self.train_model(train_features)
|
||||||
|
|
||||||
|
# 7. Predict on TEST Period ONLY
|
||||||
|
# Use valid columns only
|
||||||
|
X_test = test_features[self.feature_cols].fillna(0)
|
||||||
|
X_test = X_test.replace([np.inf, -np.inf], 0)
|
||||||
|
|
||||||
|
# Predict Probabilities for test period
|
||||||
|
probs = self.model.predict_proba(X_test)[:, 1]
|
||||||
|
|
||||||
|
# 8. Generate Entry Signals (TEST period only)
|
||||||
|
# If Z > threshold (Spread High, ETH Expensive) -> Short ETH
|
||||||
|
# If Z < -threshold (Spread Low, ETH Cheap) -> Long ETH
|
||||||
|
z_thresh = self.z_entry_threshold
|
||||||
|
|
||||||
|
short_signal_test = (probs > 0.5) & (test_features['z_score'].values > z_thresh)
|
||||||
|
long_signal_test = (probs > 0.5) & (test_features['z_score'].values < -z_thresh)
|
||||||
|
|
||||||
|
# 8b. Apply Trend Filter (Macro Regime)
|
||||||
|
# Rule: Long only if BTC > SMA (Bull), Short only if BTC < SMA (Bear)
|
||||||
|
if self.trend_window > 0:
|
||||||
|
# Calculate SMA on full BTC history first
|
||||||
|
btc_sma = btc_close.rolling(window=self.trend_window).mean()
|
||||||
|
|
||||||
|
# Align with test period
|
||||||
|
test_btc_close = btc_close.reindex(test_features.index)
|
||||||
|
test_btc_sma = btc_sma.reindex(test_features.index)
|
||||||
|
|
||||||
|
# Define Regimes
|
||||||
|
is_bull = (test_btc_close > test_btc_sma).values
|
||||||
|
is_bear = (test_btc_close < test_btc_sma).values
|
||||||
|
|
||||||
|
# Apply Filter
|
||||||
|
long_signal_test = long_signal_test & is_bull
|
||||||
|
short_signal_test = short_signal_test & is_bear
|
||||||
|
|
||||||
|
# 8c. Apply Funding Rate Filter
|
||||||
|
# Rule: If Funding > Threshold (Greedy) -> No Longs.
|
||||||
|
# If Funding < -Threshold (Fearful) -> No Shorts.
|
||||||
|
if self.use_funding_filter and 'btc_funding' in test_features.columns:
|
||||||
|
funding = test_features['btc_funding'].values
|
||||||
|
thresh = self.funding_threshold
|
||||||
|
|
||||||
|
# Greedy Market (High Positive Funding) -> Risk of Long Squeeze -> Block Longs
|
||||||
|
# (Or implies trend is up? Actually for Mean Reversion, high funding often marks tops)
|
||||||
|
# We block Longs because we don't want to buy into an overheated market?
|
||||||
|
# Actually, "Greedy" means Longs are paying Shorts.
|
||||||
|
# If we Long, we pay funding.
|
||||||
|
# If we Short, we receive funding.
|
||||||
|
# So High Funding = Good for Shorts (receive yield + reversion).
|
||||||
|
# Bad for Longs (pay yield + likely top).
|
||||||
|
|
||||||
|
is_overheated = funding > thresh
|
||||||
|
is_oversold = funding < -thresh
|
||||||
|
|
||||||
|
# Block Longs if Overheated
|
||||||
|
long_signal_test = long_signal_test & (~is_overheated)
|
||||||
|
|
||||||
|
# Block Shorts if Oversold (Negative Funding) -> Risk of Short Squeeze
|
||||||
|
short_signal_test = short_signal_test & (~is_oversold)
|
||||||
|
|
||||||
|
n_blocked_long = (is_overheated & (probs > 0.5) & (test_features['z_score'].values < -z_thresh)).sum()
|
||||||
|
n_blocked_short = (is_oversold & (probs > 0.5) & (test_features['z_score'].values > z_thresh)).sum()
|
||||||
|
|
||||||
|
if n_blocked_long > 0 or n_blocked_short > 0:
|
||||||
|
logger.info(f"Funding Filter: Blocked {n_blocked_long} Longs, {n_blocked_short} Shorts")
|
||||||
|
|
||||||
|
# 9. Calculate Position Sizing (Probability-Based)
|
||||||
|
# Base size = 1.0 (100% of equity)
|
||||||
|
# Scale: 1.0 + (Prob - 0.5) * 2
|
||||||
|
# Example: Prob=0.6 -> Size=1.2, Prob=0.8 -> Size=1.6
|
||||||
|
|
||||||
|
# Align probabilities to close index
|
||||||
|
probs_series = pd.Series(0.0, index=test_features.index)
|
||||||
|
probs_series[:] = probs
|
||||||
|
probs_aligned = probs_series.reindex(close.index, fill_value=0.0)
|
||||||
|
|
||||||
|
# Calculate dynamic size
|
||||||
|
dynamic_size = 1.0 + (probs_aligned - 0.5) * 2.0
|
||||||
|
# Cap leverage between 1x and 2x
|
||||||
|
size = dynamic_size.clip(lower=1.0, upper=2.0)
|
||||||
|
|
||||||
|
# Create full-length signal series (False for training period)
|
||||||
|
long_entries = pd.Series(False, index=close.index)
|
||||||
|
short_entries = pd.Series(False, index=close.index)
|
||||||
|
|
||||||
|
# Map test signals to their correct indices
|
||||||
|
test_idx = test_features.index
|
||||||
|
for i, idx in enumerate(test_idx):
|
||||||
|
if idx in close.index:
|
||||||
|
long_entries.loc[idx] = bool(long_signal_test[i])
|
||||||
|
short_entries.loc[idx] = bool(short_signal_test[i])
|
||||||
|
|
||||||
|
# 9. Generate Exits
|
||||||
|
# Exit when Z-Score crosses back through 0 (mean reversion complete)
|
||||||
|
z_reindexed = features['z_score'].reindex(close.index, fill_value=0)
|
||||||
|
|
||||||
|
# Exit Long when Z > 0, Exit Short when Z < 0
|
||||||
|
long_exits = z_reindexed > 0
|
||||||
|
short_exits = z_reindexed < 0
|
||||||
|
|
||||||
|
# Log signal counts for verification
|
||||||
|
n_long = long_entries.sum()
|
||||||
|
n_short = short_entries.sum()
|
||||||
|
logger.info(f"Generated {n_long} long signals, {n_short} short signals (test period only)")
|
||||||
|
|
||||||
|
return long_entries, long_exits, short_entries, short_exits, size
|
||||||
|
|
||||||
|
def prepare_features(self, df_btc, df_eth, cq_df=None):
|
||||||
|
"""Replicate research feature engineering"""
|
||||||
|
# Align
|
||||||
|
common = df_btc.index.intersection(df_eth.index)
|
||||||
|
df_a = df_btc.loc[common].copy()
|
||||||
|
df_b = df_eth.loc[common].copy()
|
||||||
|
|
||||||
|
# Spread
|
||||||
|
spread = df_b['close'] / df_a['close']
|
||||||
|
|
||||||
|
# Z-Score
|
||||||
|
rolling_mean = spread.rolling(window=self.z_window).mean()
|
||||||
|
rolling_std = spread.rolling(window=self.z_window).std()
|
||||||
|
z_score = (spread - rolling_mean) / rolling_std
|
||||||
|
|
||||||
|
# Technicals
|
||||||
|
spread_rsi = ta.momentum.RSIIndicator(spread, window=14).rsi()
|
||||||
|
spread_roc = spread.pct_change(periods=5) * 100
|
||||||
|
spread_change_1h = spread.pct_change(periods=1)
|
||||||
|
|
||||||
|
# Volume
|
||||||
|
vol_ratio = df_b['volume'] / df_a['volume']
|
||||||
|
vol_ratio_ma = vol_ratio.rolling(window=12).mean()
|
||||||
|
|
||||||
|
# Volatility
|
||||||
|
ret_a = df_a['close'].pct_change()
|
||||||
|
ret_b = df_b['close'].pct_change()
|
||||||
|
vol_a = ret_a.rolling(window=self.z_window).std()
|
||||||
|
vol_b = ret_b.rolling(window=self.z_window).std()
|
||||||
|
vol_spread_ratio = vol_b / vol_a
|
||||||
|
|
||||||
|
features = pd.DataFrame(index=spread.index)
|
||||||
|
features['spread'] = spread
|
||||||
|
features['z_score'] = z_score
|
||||||
|
features['spread_rsi'] = spread_rsi
|
||||||
|
features['spread_roc'] = spread_roc
|
||||||
|
features['spread_change_1h'] = spread_change_1h
|
||||||
|
features['vol_ratio'] = vol_ratio
|
||||||
|
features['vol_ratio_rel'] = vol_ratio / vol_ratio_ma
|
||||||
|
features['vol_diff_ratio'] = vol_spread_ratio
|
||||||
|
|
||||||
|
# CQ Merge
|
||||||
|
if cq_df is not None:
|
||||||
|
cq_aligned = cq_df.reindex(features.index, method='ffill')
|
||||||
|
if 'btc_funding' in cq_aligned.columns and 'eth_funding' in cq_aligned.columns:
|
||||||
|
cq_aligned['funding_diff'] = cq_aligned['eth_funding'] - cq_aligned['btc_funding']
|
||||||
|
if 'btc_inflow' in cq_aligned.columns and 'eth_inflow' in cq_aligned.columns:
|
||||||
|
cq_aligned['inflow_ratio'] = cq_aligned['eth_inflow'] / (cq_aligned['btc_inflow'] + 1)
|
||||||
|
features = features.join(cq_aligned)
|
||||||
|
|
||||||
|
return features.dropna()
|
||||||
|
|
||||||
|
def train_model(self, train_features):
|
||||||
|
"""
|
||||||
|
Train Random Forest on training data only.
|
||||||
|
|
||||||
|
This method receives ONLY the training subset of features,
|
||||||
|
ensuring no look-ahead bias. The model learns from historical
|
||||||
|
patterns and is then applied to unseen test data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
train_features: DataFrame containing features for training period only
|
||||||
|
"""
|
||||||
|
threshold = self.profit_target
|
||||||
|
horizon = self.horizon
|
||||||
|
z_thresh = self.z_entry_threshold
|
||||||
|
|
||||||
|
# Define targets using ONLY training data
|
||||||
|
# For Short Spread (Z > threshold): Did spread drop below target within horizon?
|
||||||
|
future_min = train_features['spread'].rolling(window=horizon).min().shift(-horizon)
|
||||||
|
target_short = train_features['spread'] * (1 - threshold)
|
||||||
|
success_short = (train_features['z_score'] > z_thresh) & (future_min < target_short)
|
||||||
|
|
||||||
|
# For Long Spread (Z < -threshold): Did spread rise above target within horizon?
|
||||||
|
future_max = train_features['spread'].rolling(window=horizon).max().shift(-horizon)
|
||||||
|
target_long = train_features['spread'] * (1 + threshold)
|
||||||
|
success_long = (train_features['z_score'] < -z_thresh) & (future_max > target_long)
|
||||||
|
|
||||||
|
targets = np.select([success_short, success_long], [1, 1], default=0)
|
||||||
|
|
||||||
|
# Build model
|
||||||
|
model = RandomForestClassifier(
|
||||||
|
n_estimators=300, max_depth=5, min_samples_leaf=30,
|
||||||
|
class_weight={0: 1, 1: 3}, random_state=42
|
||||||
|
)
|
||||||
|
|
||||||
|
# Exclude non-feature columns
|
||||||
|
exclude = ['spread']
|
||||||
|
cols = [c for c in train_features.columns if c not in exclude]
|
||||||
|
|
||||||
|
# Clean features
|
||||||
|
X_train = train_features[cols].fillna(0)
|
||||||
|
X_train = X_train.replace([np.inf, -np.inf], 0)
|
||||||
|
|
||||||
|
# Remove rows with NaN targets (from rolling window at end of training period)
|
||||||
|
valid_mask = ~np.isnan(targets) & ~np.isinf(targets)
|
||||||
|
# Also check for rows where future data doesn't exist (shift created NaNs)
|
||||||
|
valid_mask = valid_mask & (future_min.notna().values) & (future_max.notna().values)
|
||||||
|
|
||||||
|
X_train_clean = X_train[valid_mask]
|
||||||
|
targets_clean = targets[valid_mask]
|
||||||
|
|
||||||
|
logger.info(f"Training on {len(X_train_clean)} valid samples (removed {len(X_train) - len(X_train_clean)} with incomplete future data)")
|
||||||
|
|
||||||
|
model.fit(X_train_clean, targets_clean)
|
||||||
|
return model, cols
|
||||||
@@ -1,295 +0,0 @@
|
|||||||
# PRD: Market Type Selection for Backtesting
|
|
||||||
|
|
||||||
## Introduction/Overview
|
|
||||||
|
|
||||||
Currently, the backtesting system operates with a single, implicit market type assumption. This PRD defines the implementation of **market type selection** (Spot vs. USDT-M Perpetual Futures) to enable realistic simulation of different trading conditions.
|
|
||||||
|
|
||||||
**Problem Statement:**
|
|
||||||
- Strategies cannot be backtested against different market mechanics (leverage, funding, short-selling)
|
|
||||||
- Fee structures are uniform regardless of market type
|
|
||||||
- No support for short-selling strategies
|
|
||||||
- Data fetching doesn't distinguish between spot and futures markets
|
|
||||||
|
|
||||||
**Goal:**
|
|
||||||
Enable users to backtest strategies against specific market types (Spot or USDT-M Perpetual) with realistic trading conditions matching OKX's live environment.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Goals
|
|
||||||
|
|
||||||
1. **Support two market types:** Spot and USDT-M Perpetual Futures
|
|
||||||
2. **Realistic fee simulation:** Match OKX's fee structure per market type
|
|
||||||
3. **Leverage support:** Per-strategy configurable leverage (perpetuals only)
|
|
||||||
4. **Funding rate simulation:** Simplified funding rate model for perpetuals
|
|
||||||
5. **Short-selling support:** Enable strategies to generate short signals
|
|
||||||
6. **Liquidation awareness:** Warn when positions would be liquidated (no full simulation)
|
|
||||||
7. **Separate data storage:** Download and store data per market type
|
|
||||||
8. **Grid search integration:** Allow leverage optimization in parameter grids
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## User Stories
|
|
||||||
|
|
||||||
1. **As a trader**, I want to backtest my strategy on perpetual futures so that I can simulate leveraged trading with funding costs.
|
|
||||||
|
|
||||||
2. **As a trader**, I want to backtest on spot markets so that I can compare performance without leverage or funding overhead.
|
|
||||||
|
|
||||||
3. **As a strategy developer**, I want to define a default market type for my strategy so that it runs with appropriate settings by default.
|
|
||||||
|
|
||||||
4. **As a trader**, I want to test different leverage levels so that I can find the optimal risk/reward balance.
|
|
||||||
|
|
||||||
5. **As a trader**, I want to see warnings when my position would have been liquidated so that I can adjust my risk parameters.
|
|
||||||
|
|
||||||
6. **As a strategy developer**, I want to create strategies that can go short so that I can profit from downward price movements.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Functional Requirements
|
|
||||||
|
|
||||||
### FR1: Market Type Enum and Configuration
|
|
||||||
|
|
||||||
1.1. Create a `MarketType` enum with values: `SPOT`, `PERPETUAL`
|
|
||||||
|
|
||||||
1.2. Each strategy class must have a `default_market_type` class attribute
|
|
||||||
|
|
||||||
1.3. Market type can be overridden via CLI (optional, for testing)
|
|
||||||
|
|
||||||
### FR2: Data Management
|
|
||||||
|
|
||||||
2.1. Modify `DataManager` to support market type in data paths:
|
|
||||||
- Spot: `data/ccxt/{exchange}/spot/{symbol}/{timeframe}.csv`
|
|
||||||
- Perpetual: `data/ccxt/{exchange}/perpetual/{symbol}/{timeframe}.csv`
|
|
||||||
|
|
||||||
2.2. Update `download` command to accept `--market` flag:
|
|
||||||
```bash
|
|
||||||
uv run python main.py download --pair BTC/USDT --market perpetual
|
|
||||||
```
|
|
||||||
|
|
||||||
2.3. Use CCXT's market type parameter when fetching data:
|
|
||||||
- Spot: `exchange.fetch_ohlcv(symbol, timeframe, ...)`
|
|
||||||
- Perpetual: `exchange.fetch_ohlcv(symbol + ':USDT', timeframe, ...)`
|
|
||||||
|
|
||||||
### FR3: Fee Structure
|
|
||||||
|
|
||||||
3.1. Define default fees per market type (matching OKX):
|
|
||||||
|
|
||||||
| Market Type | Maker Fee | Taker Fee | Notes |
|
|
||||||
|-------------|-----------|-----------|-------|
|
|
||||||
| Spot | 0.08% | 0.10% | No funding |
|
|
||||||
| Perpetual | 0.02% | 0.05% | + funding |
|
|
||||||
|
|
||||||
3.2. Allow fee override via CLI (existing `--fees` flag)
|
|
||||||
|
|
||||||
### FR4: Leverage Support (Perpetual Only)
|
|
||||||
|
|
||||||
4.1. Add `default_leverage` class attribute to strategies (default: 1 for spot, configurable for perpetual)
|
|
||||||
|
|
||||||
4.2. Add `--leverage` CLI flag for backtest command
|
|
||||||
|
|
||||||
4.3. Leverage affects:
|
|
||||||
- Position sizing (notional = cash * leverage)
|
|
||||||
- PnL calculation (multiplied by leverage)
|
|
||||||
- Liquidation threshold calculation
|
|
||||||
|
|
||||||
4.4. Support leverage in grid search parameter grids
|
|
||||||
|
|
||||||
### FR5: Funding Rate Simulation (Perpetual Only)
|
|
||||||
|
|
||||||
5.1. Implement simplified funding rate model:
|
|
||||||
- Default rate: 0.01% per 8 hours (configurable)
|
|
||||||
- Applied every 8 hours to open positions
|
|
||||||
- Positive rate: Longs pay shorts
|
|
||||||
- Negative rate: Shorts pay longs
|
|
||||||
|
|
||||||
5.2. Add `--funding-rate` CLI flag to override default
|
|
||||||
|
|
||||||
5.3. Track cumulative funding paid/received in backtest stats
|
|
||||||
|
|
||||||
### FR6: Short-Selling Support
|
|
||||||
|
|
||||||
6.1. Modify `BaseStrategy.run()` signature to return 4 signal arrays:
|
|
||||||
```python
|
|
||||||
def run(self, close, **kwargs) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
|
|
||||||
"""
|
|
||||||
Returns:
|
|
||||||
long_entries: Boolean signals to open long positions
|
|
||||||
long_exits: Boolean signals to close long positions
|
|
||||||
short_entries: Boolean signals to open short positions
|
|
||||||
short_exits: Boolean signals to close short positions
|
|
||||||
"""
|
|
||||||
```
|
|
||||||
|
|
||||||
6.2. Update `Backtester` to use VectorBT's `direction` parameter or dual portfolio simulation
|
|
||||||
|
|
||||||
6.3. For spot market: Ignore short signals (log warning if present)
|
|
||||||
|
|
||||||
6.4. For perpetual market: Process both long and short signals
|
|
||||||
|
|
||||||
### FR7: Liquidation Warning
|
|
||||||
|
|
||||||
7.1. Calculate liquidation price based on:
|
|
||||||
- Entry price
|
|
||||||
- Leverage
|
|
||||||
- Maintenance margin rate (OKX: ~0.4% for BTC)
|
|
||||||
|
|
||||||
7.2. During backtest, check if price crosses liquidation threshold
|
|
||||||
|
|
||||||
7.3. Log warning with details:
|
|
||||||
```
|
|
||||||
WARNING: Position would be liquidated at bar 1234 (price: $45,000, liq_price: $44,820)
|
|
||||||
```
|
|
||||||
|
|
||||||
7.4. Include liquidation event count in backtest summary stats
|
|
||||||
|
|
||||||
### FR8: Backtester Integration
|
|
||||||
|
|
||||||
8.1. Modify `Backtester.run_strategy()` to accept market type from strategy
|
|
||||||
|
|
||||||
8.2. Apply market-specific simulation parameters:
|
|
||||||
- Fees (if not overridden)
|
|
||||||
- Leverage
|
|
||||||
- Funding rate calculation
|
|
||||||
- Short-selling capability
|
|
||||||
|
|
||||||
8.3. Update portfolio simulation to handle leveraged positions
|
|
||||||
|
|
||||||
### FR9: Reporting Updates
|
|
||||||
|
|
||||||
9.1. Add market type to backtest summary output
|
|
||||||
|
|
||||||
9.2. Add new stats for perpetual backtests:
|
|
||||||
- Total funding paid/received
|
|
||||||
- Number of liquidation warnings
|
|
||||||
- Effective leverage used
|
|
||||||
|
|
||||||
9.3. Update CSV exports to include market-specific columns
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Non-Goals (Out of Scope)
|
|
||||||
|
|
||||||
- **Coin-M (Inverse) Perpetuals:** Not included in v1
|
|
||||||
- **Spot Margin Trading:** Not included in v1
|
|
||||||
- **Expiry Futures:** Not included in v1
|
|
||||||
- **Full Liquidation Simulation:** Only warnings, no automatic position closure
|
|
||||||
- **Real Funding Rate Data:** Use simplified model; historical funding API integration is future work
|
|
||||||
- **Cross-Margin Mode:** Assume isolated margin for simplicity
|
|
||||||
- **Partial Liquidation:** Assume full liquidation threshold only
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Design Considerations
|
|
||||||
|
|
||||||
### Data Directory Structure (New)
|
|
||||||
|
|
||||||
```
|
|
||||||
data/ccxt/
|
|
||||||
okx/
|
|
||||||
spot/
|
|
||||||
BTC-USDT/
|
|
||||||
1m.csv
|
|
||||||
1d.csv
|
|
||||||
perpetual/
|
|
||||||
BTC-USDT/
|
|
||||||
1m.csv
|
|
||||||
1d.csv
|
|
||||||
```
|
|
||||||
|
|
||||||
### Strategy Class Example
|
|
||||||
|
|
||||||
```python
|
|
||||||
class MetaSupertrendStrategy(BaseStrategy):
|
|
||||||
default_market_type = MarketType.PERPETUAL
|
|
||||||
default_leverage = 5
|
|
||||||
default_sl_stop = 0.02
|
|
||||||
|
|
||||||
def run(self, close, **kwargs):
|
|
||||||
# ... indicator logic ...
|
|
||||||
return long_entries, long_exits, short_entries, short_exits
|
|
||||||
```
|
|
||||||
|
|
||||||
### CLI Usage Examples
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Download perpetual data
|
|
||||||
uv run python main.py download --pair BTC/USDT --market perpetual
|
|
||||||
|
|
||||||
# Backtest with strategy defaults (uses strategy's default_market_type)
|
|
||||||
uv run python main.py backtest --strategy meta_st --pair BTC/USDT
|
|
||||||
|
|
||||||
# Override leverage
|
|
||||||
uv run python main.py backtest --strategy meta_st --pair BTC/USDT --leverage 10
|
|
||||||
|
|
||||||
# Grid search including leverage
|
|
||||||
uv run python main.py backtest --strategy meta_st --pair BTC/USDT --grid
|
|
||||||
# (leverage can be part of param grid in strategy factory)
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Technical Considerations
|
|
||||||
|
|
||||||
1. **VectorBT Compatibility:**
|
|
||||||
- VectorBT's `Portfolio.from_signals()` supports `direction` parameter for long/short
|
|
||||||
- Alternatively, run two portfolios (long-only, short-only) and combine
|
|
||||||
- Leverage can be simulated via `size` parameter or post-processing returns
|
|
||||||
|
|
||||||
2. **CCXT Market Type Handling:**
|
|
||||||
- OKX perpetual symbols use format: `BTC/USDT:USDT`
|
|
||||||
- Need to handle symbol conversion in DataManager
|
|
||||||
|
|
||||||
3. **Funding Rate Timing:**
|
|
||||||
- OKX funding at 00:00, 08:00, 16:00 UTC
|
|
||||||
- Need to identify these timestamps in the data and apply funding
|
|
||||||
|
|
||||||
4. **Backward Compatibility:**
|
|
||||||
- Existing strategies should work with minimal changes
|
|
||||||
- Default to `MarketType.SPOT` if not specified
|
|
||||||
- Existing 2-tuple return from `run()` should be interpreted as long-only
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Success Metrics
|
|
||||||
|
|
||||||
1. **Functional:** All existing backtests produce same results when run with `MarketType.SPOT`
|
|
||||||
2. **Functional:** Perpetual backtests correctly apply funding every 8 hours
|
|
||||||
3. **Functional:** Leverage multiplies both gains and losses correctly
|
|
||||||
4. **Functional:** Short signals are processed for perpetual, ignored for spot
|
|
||||||
5. **Usability:** Users can switch market types with minimal configuration
|
|
||||||
6. **Accuracy:** Fee structures match OKX's published rates
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Open Questions
|
|
||||||
|
|
||||||
1. **Position Sizing with Leverage:**
|
|
||||||
- Should leverage affect `init_cash` interpretation (notional value) or position size directly?
|
|
||||||
- Recommendation: Affect position size; `init_cash` remains the actual margin deposited.
|
|
||||||
|
|
||||||
2. **Multiple Positions:**
|
|
||||||
- Can strategies hold both long and short simultaneously (hedging)?
|
|
||||||
- Recommendation: No for v1; only one direction at a time.
|
|
||||||
|
|
||||||
3. **Funding Rate Sign:**
|
|
||||||
- When funding is positive, longs pay shorts. Should we assume the user is always the "taker" of funding?
|
|
||||||
- Recommendation: Yes, apply funding based on position direction.
|
|
||||||
|
|
||||||
4. **Migration Path:**
|
|
||||||
- Should we migrate existing data to new directory structure?
|
|
||||||
- Recommendation: No auto-migration; users re-download with `--market` flag.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Implementation Priority
|
|
||||||
|
|
||||||
| Priority | Component | Complexity |
|
|
||||||
|----------|-----------|------------|
|
|
||||||
| 1 | MarketType enum + strategy defaults | Low |
|
|
||||||
| 2 | DataManager market type support | Medium |
|
|
||||||
| 3 | Fee structure per market type | Low |
|
|
||||||
| 4 | Short-selling signal support | Medium |
|
|
||||||
| 5 | Leverage simulation | Medium |
|
|
||||||
| 6 | Funding rate simulation | Medium |
|
|
||||||
| 7 | Liquidation warnings | Low |
|
|
||||||
| 8 | Reporting updates | Low |
|
|
||||||
| 9 | Grid search leverage support | Low |
|
|
||||||
321
tasks/prd-multi-pair-divergence-strategy.md
Normal file
321
tasks/prd-multi-pair-divergence-strategy.md
Normal file
@@ -0,0 +1,321 @@
|
|||||||
|
# PRD: Multi-Pair Divergence Selection Strategy
|
||||||
|
|
||||||
|
## 1. Introduction / Overview
|
||||||
|
|
||||||
|
This document describes the **Multi-Pair Divergence Selection Strategy**, an extension of the existing BTC/ETH regime reversion system. The strategy expands spread analysis to the **top 10 cryptocurrencies by market cap**, calculates divergence scores for all tradeable pairs, and dynamically selects the **most divergent pair** for trading.
|
||||||
|
|
||||||
|
The core hypothesis: by scanning multiple pairs simultaneously, we can identify stronger mean-reversion opportunities than focusing on a single pair, improving net PnL while maintaining the proven ML-based regime detection approach.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Goals
|
||||||
|
|
||||||
|
1. **Extend regime detection** to top 10 market cap cryptocurrencies
|
||||||
|
2. **Dynamically select** the most divergent tradeable pair each cycle
|
||||||
|
3. **Integrate volatility** into dynamic SL/TP calculations
|
||||||
|
4. **Filter correlated pairs** to avoid redundant positions
|
||||||
|
5. **Improve net PnL** compared to single-pair BTC/ETH strategy
|
||||||
|
6. **Backtest-first** implementation with walk-forward validation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. User Stories
|
||||||
|
|
||||||
|
### US-1: Multi-Pair Analysis
|
||||||
|
> As a trader, I want the system to analyze spread divergence across multiple cryptocurrency pairs so that I can identify the best trading opportunity at any given moment.
|
||||||
|
|
||||||
|
### US-2: Dynamic Pair Selection
|
||||||
|
> As a trader, I want the system to automatically select and trade the pair with the highest divergence score (combination of Z-score magnitude and ML probability) so that I maximize mean-reversion profit potential.
|
||||||
|
|
||||||
|
### US-3: Volatility-Adjusted Risk
|
||||||
|
> As a trader, I want stop-loss and take-profit levels to adapt to each pair's volatility so that I avoid being stopped out prematurely on volatile assets while protecting profits on stable ones.
|
||||||
|
|
||||||
|
### US-4: Correlation Filtering
|
||||||
|
> As a trader, I want the system to avoid selecting pairs that are highly correlated with my current position so that I don't inadvertently double-down on the same market exposure.
|
||||||
|
|
||||||
|
### US-5: Backtest Validation
|
||||||
|
> As a researcher, I want to backtest this multi-pair strategy with walk-forward training so that I can validate improvement over the single-pair baseline without look-ahead bias.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Functional Requirements
|
||||||
|
|
||||||
|
### 4.1 Data Management
|
||||||
|
|
||||||
|
| ID | Requirement |
|
||||||
|
|----|-------------|
|
||||||
|
| FR-1.1 | System must support loading OHLCV data for top 10 market cap cryptocurrencies |
|
||||||
|
| FR-1.2 | Target assets: BTC, ETH, SOL, XRP, BNB, DOGE, ADA, AVAX, LINK, DOT (configurable) |
|
||||||
|
| FR-1.3 | System must identify all directly tradeable cross-pairs on OKX perpetuals |
|
||||||
|
| FR-1.4 | System must align timestamps across all pairs for synchronized analysis |
|
||||||
|
| FR-1.5 | System must handle missing data gracefully (skip pair if insufficient history) |
|
||||||
|
|
||||||
|
### 4.2 Pair Generation
|
||||||
|
|
||||||
|
| ID | Requirement |
|
||||||
|
|----|-------------|
|
||||||
|
| FR-2.1 | Generate all unique pairs from asset universe: N*(N-1)/2 pairs (e.g., 45 pairs for 10 assets) |
|
||||||
|
| FR-2.2 | Filter pairs to only those directly tradeable on OKX (no USDT intermediate) |
|
||||||
|
| FR-2.3 | Fallback: If cross-pair not available, calculate synthetic spread via USDT pairs |
|
||||||
|
| FR-2.4 | Store pair metadata: base asset, quote asset, exchange symbol, tradeable flag |
|
||||||
|
|
||||||
|
### 4.3 Feature Engineering (Per Pair)
|
||||||
|
|
||||||
|
| ID | Requirement |
|
||||||
|
|----|-------------|
|
||||||
|
| FR-3.1 | Calculate spread ratio: `asset_a_close / asset_b_close` |
|
||||||
|
| FR-3.2 | Calculate Z-Score with configurable rolling window (default: 24h) |
|
||||||
|
| FR-3.3 | Calculate spread technicals: RSI(14), ROC(5), 1h change |
|
||||||
|
| FR-3.4 | Calculate volume ratio and relative volume |
|
||||||
|
| FR-3.5 | Calculate volatility ratio: `std(returns_a) / std(returns_b)` over Z-window |
|
||||||
|
| FR-3.6 | Calculate realized volatility for each asset (for dynamic SL/TP) |
|
||||||
|
| FR-3.7 | Merge on-chain data (funding rates, inflows) if available per asset |
|
||||||
|
| FR-3.8 | Add pair identifier as categorical feature for universal model |
|
||||||
|
|
||||||
|
### 4.4 Correlation Filtering
|
||||||
|
|
||||||
|
| ID | Requirement |
|
||||||
|
|----|-------------|
|
||||||
|
| FR-4.1 | Calculate rolling correlation matrix between all assets (default: 168h / 7 days) |
|
||||||
|
| FR-4.2 | Define correlation threshold (default: 0.85) |
|
||||||
|
| FR-4.3 | If current position exists, exclude pairs where either asset has correlation > threshold with held asset |
|
||||||
|
| FR-4.4 | Log filtered pairs with reason for exclusion |
|
||||||
|
|
||||||
|
### 4.5 Divergence Scoring & Pair Selection
|
||||||
|
|
||||||
|
| ID | Requirement |
|
||||||
|
|----|-------------|
|
||||||
|
| FR-5.1 | Calculate divergence score: `abs(z_score) * model_probability` |
|
||||||
|
| FR-5.2 | Only consider pairs where `abs(z_score) > z_entry_threshold` (default: 1.0) |
|
||||||
|
| FR-5.3 | Only consider pairs where `model_probability > prob_threshold` (default: 0.5) |
|
||||||
|
| FR-5.4 | Apply correlation filter to eligible pairs |
|
||||||
|
| FR-5.5 | Select pair with highest divergence score |
|
||||||
|
| FR-5.6 | If no pair qualifies, signal "hold" |
|
||||||
|
| FR-5.7 | Log all pair scores for analysis/debugging |
|
||||||
|
|
||||||
|
### 4.6 ML Model (Universal)
|
||||||
|
|
||||||
|
| ID | Requirement |
|
||||||
|
|----|-------------|
|
||||||
|
| FR-6.1 | Train single Random Forest model on all pairs combined |
|
||||||
|
| FR-6.2 | Include `pair_id` as one-hot encoded or label-encoded feature |
|
||||||
|
| FR-6.3 | Target: binary (1 = profitable reversion within horizon, 0 = no reversion) |
|
||||||
|
| FR-6.4 | Walk-forward training: 70% train / 30% test split |
|
||||||
|
| FR-6.5 | Daily retraining schedule (for live, configurable for backtest) |
|
||||||
|
| FR-6.6 | Model hyperparameters: `n_estimators=300, max_depth=5, min_samples_leaf=30, class_weight={0:1, 1:3}` |
|
||||||
|
| FR-6.7 | Save/load model with feature column metadata |
|
||||||
|
|
||||||
|
### 4.7 Signal Generation
|
||||||
|
|
||||||
|
| ID | Requirement |
|
||||||
|
|----|-------------|
|
||||||
|
| FR-7.1 | Direction: If `z_score > threshold` -> Short spread (short asset_a), If `z_score < -threshold` -> Long spread (long asset_a) |
|
||||||
|
| FR-7.2 | Apply funding rate filter per asset (block if extreme funding opposes direction) |
|
||||||
|
| FR-7.3 | Output signal: `{pair, action, side, probability, z_score, divergence_score, reason}` |
|
||||||
|
|
||||||
|
### 4.8 Position Sizing
|
||||||
|
|
||||||
|
| ID | Requirement |
|
||||||
|
|----|-------------|
|
||||||
|
| FR-8.1 | Base size: 100% of available subaccount balance |
|
||||||
|
| FR-8.2 | Scale by divergence: `size_multiplier = 1.0 + (divergence_score - base_threshold) * scaling_factor` |
|
||||||
|
| FR-8.3 | Cap multiplier between 1.0x and 2.0x |
|
||||||
|
| FR-8.4 | Respect exchange minimum order size per asset |
|
||||||
|
|
||||||
|
### 4.9 Dynamic SL/TP (Volatility-Adjusted)
|
||||||
|
|
||||||
|
| ID | Requirement |
|
||||||
|
|----|-------------|
|
||||||
|
| FR-9.1 | Calculate asset realized volatility: `std(returns) * sqrt(24)` for daily vol |
|
||||||
|
| FR-9.2 | Base SL: `entry_price * (1 - base_sl_pct * vol_multiplier)` for longs |
|
||||||
|
| FR-9.3 | Base TP: `entry_price * (1 + base_tp_pct * vol_multiplier)` for longs |
|
||||||
|
| FR-9.4 | `vol_multiplier = asset_volatility / baseline_volatility` (baseline = BTC volatility) |
|
||||||
|
| FR-9.5 | Cap vol_multiplier between 0.5x and 2.0x to prevent extreme values |
|
||||||
|
| FR-9.6 | Invert logic for short positions |
|
||||||
|
|
||||||
|
### 4.10 Exit Conditions
|
||||||
|
|
||||||
|
| ID | Requirement |
|
||||||
|
|----|-------------|
|
||||||
|
| FR-10.1 | Exit when Z-score crosses back through 0 (mean reversion complete) |
|
||||||
|
| FR-10.2 | Exit when dynamic SL or TP hit |
|
||||||
|
| FR-10.3 | No minimum holding period (can switch pairs immediately) |
|
||||||
|
| FR-10.4 | If new pair has higher divergence score, close current and open new |
|
||||||
|
|
||||||
|
### 4.11 Backtest Integration
|
||||||
|
|
||||||
|
| ID | Requirement |
|
||||||
|
|----|-------------|
|
||||||
|
| FR-11.1 | Integrate with existing `engine/backtester.py` framework |
|
||||||
|
| FR-11.2 | Support 1h timeframe (matching live trading) |
|
||||||
|
| FR-11.3 | Walk-forward validation: train on 70%, test on 30% |
|
||||||
|
| FR-11.4 | Output: trades log, equity curve, performance metrics |
|
||||||
|
| FR-11.5 | Compare against single-pair BTC/ETH baseline |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Non-Goals (Out of Scope)
|
||||||
|
|
||||||
|
1. **Live trading implementation** - Backtest validation first
|
||||||
|
2. **Multi-position portfolio** - Single pair at a time for v1
|
||||||
|
3. **Cross-exchange arbitrage** - OKX only
|
||||||
|
4. **Alternative ML models** - Stick with Random Forest for consistency
|
||||||
|
5. **Sub-1h timeframes** - 1h candles only for initial version
|
||||||
|
6. **Leveraged positions** - 1x leverage for backtest
|
||||||
|
7. **Portfolio-level VaR/risk budgeting** - Full subaccount allocation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6. Design Considerations
|
||||||
|
|
||||||
|
### 6.1 Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
strategies/
|
||||||
|
multi_pair/
|
||||||
|
__init__.py
|
||||||
|
pair_scanner.py # Generates all pairs, filters tradeable
|
||||||
|
feature_engine.py # Calculates features for all pairs
|
||||||
|
correlation.py # Rolling correlation matrix & filtering
|
||||||
|
divergence_scorer.py # Ranks pairs by divergence score
|
||||||
|
strategy.py # Main strategy orchestration
|
||||||
|
```
|
||||||
|
|
||||||
|
### 6.2 Data Flow
|
||||||
|
|
||||||
|
```
|
||||||
|
1. Load OHLCV for all 10 assets
|
||||||
|
2. Generate pair combinations (45 pairs)
|
||||||
|
3. Filter to tradeable pairs (OKX check)
|
||||||
|
4. Calculate features for each pair
|
||||||
|
5. Train/load universal ML model
|
||||||
|
6. Predict probability for all pairs
|
||||||
|
7. Calculate divergence scores
|
||||||
|
8. Apply correlation filter
|
||||||
|
9. Select top pair
|
||||||
|
10. Generate signal with dynamic SL/TP
|
||||||
|
11. Execute in backtest engine
|
||||||
|
```
|
||||||
|
|
||||||
|
### 6.3 Configuration
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class MultiPairConfig:
|
||||||
|
# Assets
|
||||||
|
assets: list[str] = field(default_factory=lambda: [
|
||||||
|
"BTC", "ETH", "SOL", "XRP", "BNB",
|
||||||
|
"DOGE", "ADA", "AVAX", "LINK", "DOT"
|
||||||
|
])
|
||||||
|
|
||||||
|
# Thresholds
|
||||||
|
z_window: int = 24
|
||||||
|
z_entry_threshold: float = 1.0
|
||||||
|
prob_threshold: float = 0.5
|
||||||
|
correlation_threshold: float = 0.85
|
||||||
|
correlation_window: int = 168 # 7 days in hours
|
||||||
|
|
||||||
|
# Risk
|
||||||
|
base_sl_pct: float = 0.06
|
||||||
|
base_tp_pct: float = 0.05
|
||||||
|
vol_multiplier_min: float = 0.5
|
||||||
|
vol_multiplier_max: float = 2.0
|
||||||
|
|
||||||
|
# Model
|
||||||
|
train_ratio: float = 0.7
|
||||||
|
horizon: int = 102
|
||||||
|
profit_target: float = 0.005
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7. Technical Considerations
|
||||||
|
|
||||||
|
### 7.1 Dependencies
|
||||||
|
|
||||||
|
- Extend `DataManager` to load multiple symbols
|
||||||
|
- Query OKX API for available perpetual cross-pairs
|
||||||
|
- Reuse existing feature engineering from `RegimeReversionStrategy`
|
||||||
|
|
||||||
|
### 7.2 Performance
|
||||||
|
|
||||||
|
- Pre-calculate all pair features in batch (vectorized)
|
||||||
|
- Cache correlation matrix (update every N candles, not every minute)
|
||||||
|
- Model inference is fast (single predict call with all pairs as rows)
|
||||||
|
|
||||||
|
### 7.3 Edge Cases
|
||||||
|
|
||||||
|
- Handle pairs with insufficient history (< 200 bars) - exclude
|
||||||
|
- Handle assets delisted mid-backtest - skip pair
|
||||||
|
- Handle zero-volume periods - use last valid price
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8. Success Metrics
|
||||||
|
|
||||||
|
| Metric | Baseline (BTC/ETH) | Target |
|
||||||
|
|--------|-------------------|--------|
|
||||||
|
| Net PnL | Current performance | > 10% improvement |
|
||||||
|
| Number of Trades | N | Comparable or higher |
|
||||||
|
| Win Rate | Baseline % | Maintain or improve |
|
||||||
|
| Average Trade Duration | Baseline hours | Flexible |
|
||||||
|
| Max Drawdown | Baseline % | Not significantly worse |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9. Open Questions
|
||||||
|
|
||||||
|
1. **OKX Cross-Pairs**: Need to verify which cross-pairs are available on OKX perpetuals. May need to fallback to synthetic spreads for most pairs.
|
||||||
|
|
||||||
|
2. **On-Chain Data**: CryptoQuant data currently covers BTC/ETH. Should we:
|
||||||
|
- Run without on-chain features for other assets?
|
||||||
|
- Source alternative on-chain data?
|
||||||
|
- Use funding rates only (available from OKX)?
|
||||||
|
|
||||||
|
3. **Pair ID Encoding**: For the universal model, should pair_id be:
|
||||||
|
- One-hot encoded (adds 45 features)?
|
||||||
|
- Label encoded (single ordinal feature)?
|
||||||
|
- Hierarchical (base_asset + quote_asset as separate features)?
|
||||||
|
|
||||||
|
4. **Synthetic Spreads**: If trading SOL/DOT spread but only USDT pairs available:
|
||||||
|
- Calculate spread synthetically: `SOL-USDT / DOT-USDT`
|
||||||
|
- Execute as two legs: Long SOL-USDT, Short DOT-USDT
|
||||||
|
- This doubles fees and adds execution complexity. Include in v1?
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 10. Implementation Phases
|
||||||
|
|
||||||
|
### Phase 1: Data & Infrastructure (Est. 2-3 days)
|
||||||
|
- Extend DataManager for multi-symbol loading
|
||||||
|
- Build pair scanner with OKX tradeable filter
|
||||||
|
- Implement correlation matrix calculation
|
||||||
|
|
||||||
|
### Phase 2: Feature Engineering (Est. 2 days)
|
||||||
|
- Adapt existing feature calculation for arbitrary pairs
|
||||||
|
- Add pair identifier feature
|
||||||
|
- Batch feature calculation for all pairs
|
||||||
|
|
||||||
|
### Phase 3: Model & Scoring (Est. 2 days)
|
||||||
|
- Train universal model on all pairs
|
||||||
|
- Implement divergence scoring
|
||||||
|
- Add correlation filtering to pair selection
|
||||||
|
|
||||||
|
### Phase 4: Strategy Integration (Est. 2-3 days)
|
||||||
|
- Implement dynamic SL/TP with volatility
|
||||||
|
- Integrate with backtester
|
||||||
|
- Build strategy orchestration class
|
||||||
|
|
||||||
|
### Phase 5: Validation & Comparison (Est. 2 days)
|
||||||
|
- Run walk-forward backtest
|
||||||
|
- Compare against BTC/ETH baseline
|
||||||
|
- Generate performance report
|
||||||
|
|
||||||
|
**Total Estimated Effort: 10-12 days**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Document Version: 1.0*
|
||||||
|
*Created: 2026-01-15*
|
||||||
|
*Author: AI Assistant*
|
||||||
|
*Status: Draft - Awaiting Review*
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
# PRD: VectorBT Migration & CCXT Integration
|
|
||||||
|
|
||||||
## 1. Introduction
|
|
||||||
The goal of this project is to refactor the current backtesting infrastructure to a professional-grade stack using **VectorBT** for high-performance backtesting and **CCXT** for robust historical data acquisition. The system will support rapid prototyping of "many simple strategies," parameter optimization (Grid Search), and stability testing (Walk-Forward Analysis).
|
|
||||||
|
|
||||||
## 2. Goals
|
|
||||||
- **Replace Custom Backtester:** Retire the existing loop-based backtesting logic in favor of vectorized operations using `vectorbt`.
|
|
||||||
- **Automate Data Collection:** Implement a `ccxt` based downloader to fetch and cache OHLCV data from OKX (and other exchanges) automatically.
|
|
||||||
- **Enable Optimization:** Built-in support for Grid Search to find optimal strategy parameters.
|
|
||||||
- **Validation:** Implement Walk-Forward Analysis (WFA) to validate strategy robustness and prevent overfitting.
|
|
||||||
- **Standardized Reporting:** Generate consistent outputs: Console summaries, CSV logs, and VectorBT interactive plots.
|
|
||||||
|
|
||||||
## 3. User Stories
|
|
||||||
- **Data Acquisition:** "As a user, I want to run a command `download_data --pair BTC/USDT --exchange okx` and have the system fetch historical 1-minute candles and save them to `data/ccxt/okx/BTC-USDT/1m.csv`."
|
|
||||||
- **Strategy Dev:** "As a researcher, I want to define a new strategy by simply writing a class/function that defines entry/exit signals, without worrying about the backtesting loop."
|
|
||||||
- **Optimization:** "As a researcher, I want to say 'Optimize RSI period between 10 and 20' and get a heatmap of results."
|
|
||||||
- **Validation:** "As a researcher, I want to verify if my 'best' parameters work on unseen data using Walk-Forward Analysis."
|
|
||||||
- **Analysis:** "As a user, I want to see an equity curve and key metrics (Sharpe, Drawdown) immediately after a test run."
|
|
||||||
|
|
||||||
## 4. Functional Requirements
|
|
||||||
|
|
||||||
### 4.1 Data Module (`data_manager`)
|
|
||||||
- **Exchange Interface:** Use `ccxt` to connect to exchanges (initially OKX).
|
|
||||||
- **Fetching Logic:** Fetch OHLCV data in chunks to handle rate limits and long histories.
|
|
||||||
- **Storage:** Save data to standardized paths: `data/ccxt/{exchange}/{pair}_{timeframe}.csv`.
|
|
||||||
- **Loading:** Utility to load saved CSVs into a Pandas DataFrame compatible with `vectorbt`.
|
|
||||||
|
|
||||||
### 4.2 Strategy Interface (`strategies/`)
|
|
||||||
- **Base Protocol:** Define a standard structure for strategies. A strategy should return/define:
|
|
||||||
- Indicator calculations (Vectorized).
|
|
||||||
- Entry signals (Boolean Series).
|
|
||||||
- Exit signals (Boolean Series).
|
|
||||||
- **Parameterization:** Strategies must accept dynamic parameters to support Grid Search.
|
|
||||||
|
|
||||||
### 4.3 Backtest Engine (`engine.py`)
|
|
||||||
- **Simulation:** Use `vectorbt.Portfolio.from_signals` (or similar) for fast simulation.
|
|
||||||
- **Cost Model:** Support configurable fees (maker/taker) and slippage estimates.
|
|
||||||
- **Grid Search:** Utilize `vectorbt`'s parameter broadcasting to run many variations simultaneously.
|
|
||||||
- **Walk-Forward Analysis:**
|
|
||||||
- Implement a splitting mechanism (e.g., `vectorbt.Splitter`) to divide data into In-Sample (Train) and Out-of-Sample (Test) sets.
|
|
||||||
- Execute optimization on Train, validate on Test.
|
|
||||||
|
|
||||||
### 4.4 Reporting (`reporting.py`)
|
|
||||||
- **Console:** Print key metrics: Total Return, Sharpe Ratio, Max Drawdown, Win Rate, Count of Trades.
|
|
||||||
- **Files:** Save detailed trade logs and metrics summaries to `backtest_logs/`.
|
|
||||||
- **Visuals:** Generate and save/show `vectorbt` plots (Equity curve, Drawdowns).
|
|
||||||
|
|
||||||
## 5. Non-Goals
|
|
||||||
- Real-time live trading execution (this is strictly for research/backtesting).
|
|
||||||
- Complex Machine Learning models (initially focusing on indicator-based logic).
|
|
||||||
- High-frequency tick-level backtesting (1-minute granularity is the target).
|
|
||||||
|
|
||||||
## 6. Technical Architecture Proposal
|
|
||||||
```text
|
|
||||||
project_root/
|
|
||||||
├── data/
|
|
||||||
│ └── ccxt/ # New data storage structure
|
|
||||||
├── strategies/ # Strategy definitions
|
|
||||||
│ ├── __init__.py
|
|
||||||
│ ├── base.py # Abstract Base Class
|
|
||||||
│ └── ma_cross.py # Example strategy
|
|
||||||
├── engine/
|
|
||||||
│ ├── data_loader.py # CCXT wrapper
|
|
||||||
│ ├── backtester.py # VBT runner
|
|
||||||
│ └── optimizer.py # Grid Search & WFA logic
|
|
||||||
├── main.py # CLI entry point
|
|
||||||
└── pyproject.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
## 7. Success Metrics
|
|
||||||
- Can download 1 year of 1m BTC/USDT data from OKX in < 2 minutes.
|
|
||||||
- Can run a 100-parameter grid search on 1 year of 1m data in < 10 seconds.
|
|
||||||
- Walk-forward analysis produces a clear "Robustness Score" or visual comparison of Train vs Test performance.
|
|
||||||
|
|
||||||
## 8. Open Questions
|
|
||||||
- Do we need to handle funding rates for perp futures in the PnL calculation immediately? (Assumed NO for V1, stick to spot/simple futures price action).
|
|
||||||
500
uv.lock
generated
500
uv.lock
generated
@@ -125,6 +125,37 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" },
|
{ url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "annotated-doc"
|
||||||
|
version = "0.0.4"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/57/ba/046ceea27344560984e26a590f90bc7f4a75b06701f653222458922b558c/annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4", size = 7288, upload-time = "2025-11-10T22:07:42.062Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "annotated-types"
|
||||||
|
version = "0.7.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anyio"
|
||||||
|
version = "4.12.1"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "idna" },
|
||||||
|
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/96/f0/5eb65b2bb0d09ac6776f2eb54adee6abe8228ea05b20a5ad0e4945de8aac/anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703", size = 228685, upload-time = "2026-01-06T11:45:21.246Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "asttokens"
|
name = "asttokens"
|
||||||
version = "3.0.1"
|
version = "3.0.1"
|
||||||
@@ -286,6 +317,18 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" },
|
{ url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "click"
|
||||||
|
version = "8.3.1"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "coincurve"
|
name = "coincurve"
|
||||||
version = "21.0.0"
|
version = "21.0.0"
|
||||||
@@ -505,6 +548,21 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017", size = 28317, upload-time = "2025-09-01T09:48:08.5Z" },
|
{ url = "https://files.pythonhosted.org/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017", size = 28317, upload-time = "2025-09-01T09:48:08.5Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "fastapi"
|
||||||
|
version = "0.128.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "annotated-doc" },
|
||||||
|
{ name = "pydantic" },
|
||||||
|
{ name = "starlette" },
|
||||||
|
{ name = "typing-extensions" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/52/08/8c8508db6c7b9aae8f7175046af41baad690771c9bcde676419965e338c7/fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a", size = 365682, upload-time = "2025-12-27T15:21:13.714Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5c/05/5cbb59154b093548acd0f4c7c474a118eda06da25aa75c616b72d8fcd92a/fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d", size = 103094, upload-time = "2025-12-27T15:21:12.154Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fonttools"
|
name = "fonttools"
|
||||||
version = "4.61.1"
|
version = "4.61.1"
|
||||||
@@ -635,6 +693,79 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" },
|
{ url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "greenlet"
|
||||||
|
version = "3.3.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/c7/e5/40dbda2736893e3e53d25838e0f19a2b417dfc122b9989c91918db30b5d3/greenlet-3.3.0.tar.gz", hash = "sha256:a82bb225a4e9e4d653dd2fb7b8b2d36e4fb25bc0165422a11e48b88e9e6f78fb", size = 190651, upload-time = "2025-12-04T14:49:44.05Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f8/0a/a3871375c7b9727edaeeea994bfff7c63ff7804c9829c19309ba2e058807/greenlet-3.3.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:b01548f6e0b9e9784a2c99c5651e5dc89ffcbe870bc5fb2e5ef864e9cc6b5dcb", size = 276379, upload-time = "2025-12-04T14:23:30.498Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/43/ab/7ebfe34dce8b87be0d11dae91acbf76f7b8246bf9d6b319c741f99fa59c6/greenlet-3.3.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:349345b770dc88f81506c6861d22a6ccd422207829d2c854ae2af8025af303e3", size = 597294, upload-time = "2025-12-04T14:50:06.847Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a4/39/f1c8da50024feecd0793dbd5e08f526809b8ab5609224a2da40aad3a7641/greenlet-3.3.0-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e8e18ed6995e9e2c0b4ed264d2cf89260ab3ac7e13555b8032b25a74c6d18655", size = 607742, upload-time = "2025-12-04T14:57:42.349Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/75/b0/6bde0b1011a60782108c01de5913c588cf51a839174538d266de15e4bf4d/greenlet-3.3.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:047ab3df20ede6a57c35c14bf5200fcf04039d50f908270d3f9a7a82064f543b", size = 609885, upload-time = "2025-12-04T14:26:02.368Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/49/0e/49b46ac39f931f59f987b7cd9f34bfec8ef81d2a1e6e00682f55be5de9f4/greenlet-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d9ad37fc657b1102ec880e637cccf20191581f75c64087a549e66c57e1ceb53", size = 1567424, upload-time = "2025-12-04T15:04:23.757Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/05/f5/49a9ac2dff7f10091935def9165c90236d8f175afb27cbed38fb1d61ab6b/greenlet-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83cd0e36932e0e7f36a64b732a6f60c2fc2df28c351bae79fbaf4f8092fe7614", size = 1636017, upload-time = "2025-12-04T14:27:29.688Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6c/79/3912a94cf27ec503e51ba493692d6db1e3cd8ac7ac52b0b47c8e33d7f4f9/greenlet-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7a34b13d43a6b78abf828a6d0e87d3385680eaf830cd60d20d52f249faabf39", size = 301964, upload-time = "2025-12-04T14:36:58.316Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/02/2f/28592176381b9ab2cafa12829ba7b472d177f3acc35d8fbcf3673d966fff/greenlet-3.3.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a1e41a81c7e2825822f4e068c48cb2196002362619e2d70b148f20a831c00739", size = 275140, upload-time = "2025-12-04T14:23:01.282Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2c/80/fbe937bf81e9fca98c981fe499e59a3f45df2a04da0baa5c2be0dca0d329/greenlet-3.3.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9f515a47d02da4d30caaa85b69474cec77b7929b2e936ff7fb853d42f4bf8808", size = 599219, upload-time = "2025-12-04T14:50:08.309Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c2/ff/7c985128f0514271b8268476af89aee6866df5eec04ac17dcfbc676213df/greenlet-3.3.0-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7d2d9fd66bfadf230b385fdc90426fcd6eb64db54b40c495b72ac0feb5766c54", size = 610211, upload-time = "2025-12-04T14:57:43.968Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:087ea5e004437321508a8d6f20efc4cfec5e3c30118e1417ea96ed1d93950527", size = 612833, upload-time = "2025-12-04T14:26:03.669Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b5/ba/56699ff9b7c76ca12f1cdc27a886d0f81f2189c3455ff9f65246780f713d/greenlet-3.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ab97cf74045343f6c60a39913fa59710e4bd26a536ce7ab2397adf8b27e67c39", size = 1567256, upload-time = "2025-12-04T15:04:25.276Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1e/37/f31136132967982d698c71a281a8901daf1a8fbab935dce7c0cf15f942cc/greenlet-3.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5375d2e23184629112ca1ea89a53389dddbffcf417dad40125713d88eb5f96e8", size = 1636483, upload-time = "2025-12-04T14:27:30.804Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:9ee1942ea19550094033c35d25d20726e4f1c40d59545815e1128ac58d416d38", size = 301833, upload-time = "2025-12-04T14:32:23.929Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d7/7c/f0a6d0ede2c7bf092d00bc83ad5bafb7e6ec9b4aab2fbdfa6f134dc73327/greenlet-3.3.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:60c2ef0f578afb3c8d92ea07ad327f9a062547137afe91f38408f08aacab667f", size = 275671, upload-time = "2025-12-04T14:23:05.267Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/44/06/dac639ae1a50f5969d82d2e3dd9767d30d6dbdbab0e1a54010c8fe90263c/greenlet-3.3.0-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a5d554d0712ba1de0a6c94c640f7aeba3f85b3a6e1f2899c11c2c0428da9365", size = 646360, upload-time = "2025-12-04T14:50:10.026Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e0/94/0fb76fe6c5369fba9bf98529ada6f4c3a1adf19e406a47332245ef0eb357/greenlet-3.3.0-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3a898b1e9c5f7307ebbde4102908e6cbfcb9ea16284a3abe15cab996bee8b9b3", size = 658160, upload-time = "2025-12-04T14:57:45.41Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5773edda4dc00e173820722711d043799d3adb4f01731f40619e07ea2750b955", size = 660166, upload-time = "2025-12-04T14:26:05.099Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4b/d2/91465d39164eaa0085177f61983d80ffe746c5a1860f009811d498e7259c/greenlet-3.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ac0549373982b36d5fd5d30beb8a7a33ee541ff98d2b502714a09f1169f31b55", size = 1615193, upload-time = "2025-12-04T15:04:27.041Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/42/1b/83d110a37044b92423084d52d5d5a3b3a73cafb51b547e6d7366ff62eff1/greenlet-3.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d198d2d977460358c3b3a4dc844f875d1adb33817f0613f663a656f463764ccc", size = 1683653, upload-time = "2025-12-04T14:27:32.366Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:73f51dd0e0bdb596fb0417e475fa3c5e32d4c83638296e560086b8d7da7c4170", size = 305387, upload-time = "2025-12-04T14:26:51.063Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a0/66/bd6317bc5932accf351fc19f177ffba53712a202f9df10587da8df257c7e/greenlet-3.3.0-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:d6ed6f85fae6cdfdb9ce04c9bf7a08d666cfcfb914e7d006f44f840b46741931", size = 282638, upload-time = "2025-12-04T14:25:20.941Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/30/cf/cc81cb030b40e738d6e69502ccbd0dd1bced0588e958f9e757945de24404/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d9125050fcf24554e69c4cacb086b87b3b55dc395a8b3ebe6487b045b2614388", size = 651145, upload-time = "2025-12-04T14:50:11.039Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/9c/ea/1020037b5ecfe95ca7df8d8549959baceb8186031da83d5ecceff8b08cd2/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:87e63ccfa13c0a0f6234ed0add552af24cc67dd886731f2261e46e241608bee3", size = 654236, upload-time = "2025-12-04T14:57:47.007Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/57/b9/f8025d71a6085c441a7eaff0fd928bbb275a6633773667023d19179fe815/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3c6e9b9c1527a78520357de498b0e709fb9e2f49c3a513afd5a249007261911b", size = 653783, upload-time = "2025-12-04T14:26:06.225Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f6/c7/876a8c7a7485d5d6b5c6821201d542ef28be645aa024cfe1145b35c120c1/greenlet-3.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:286d093f95ec98fdd92fcb955003b8a3d054b4e2cab3e2707a5039e7b50520fd", size = 1614857, upload-time = "2025-12-04T15:04:28.484Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4f/dc/041be1dff9f23dac5f48a43323cd0789cb798342011c19a248d9c9335536/greenlet-3.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c10513330af5b8ae16f023e8ddbfb486ab355d04467c4679c5cfe4659975dd9", size = 1676034, upload-time = "2025-12-04T14:27:33.531Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "h11"
|
||||||
|
version = "0.16.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "httptools"
|
||||||
|
version = "0.7.1"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/b5/46/120a669232c7bdedb9d52d4aeae7e6c7dfe151e99dc70802e2fc7a5e1993/httptools-0.7.1.tar.gz", hash = "sha256:abd72556974f8e7c74a259655924a717a2365b236c882c3f6f8a45fe94703ac9", size = 258961, upload-time = "2025-10-10T03:55:08.559Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/53/7f/403e5d787dc4942316e515e949b0c8a013d84078a915910e9f391ba9b3ed/httptools-0.7.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:38e0c83a2ea9746ebbd643bdfb521b9aa4a91703e2cd705c20443405d2fd16a5", size = 206280, upload-time = "2025-10-10T03:54:39.274Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2a/0d/7f3fd28e2ce311ccc998c388dd1c53b18120fda3b70ebb022b135dc9839b/httptools-0.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f25bbaf1235e27704f1a7b86cd3304eabc04f569c828101d94a0e605ef7205a5", size = 110004, upload-time = "2025-10-10T03:54:40.403Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/84/a6/b3965e1e146ef5762870bbe76117876ceba51a201e18cc31f5703e454596/httptools-0.7.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2c15f37ef679ab9ecc06bfc4e6e8628c32a8e4b305459de7cf6785acd57e4d03", size = 517655, upload-time = "2025-10-10T03:54:41.347Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/11/7d/71fee6f1844e6fa378f2eddde6c3e41ce3a1fb4b2d81118dd544e3441ec0/httptools-0.7.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7fe6e96090df46b36ccfaf746f03034e5ab723162bc51b0a4cf58305324036f2", size = 511440, upload-time = "2025-10-10T03:54:42.452Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/22/a5/079d216712a4f3ffa24af4a0381b108aa9c45b7a5cc6eb141f81726b1823/httptools-0.7.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f72fdbae2dbc6e68b8239defb48e6a5937b12218e6ffc2c7846cc37befa84362", size = 495186, upload-time = "2025-10-10T03:54:43.937Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e9/9e/025ad7b65278745dee3bd0ebf9314934c4592560878308a6121f7f812084/httptools-0.7.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e99c7b90a29fd82fea9ef57943d501a16f3404d7b9ee81799d41639bdaae412c", size = 499192, upload-time = "2025-10-10T03:54:45.003Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6d/de/40a8f202b987d43afc4d54689600ff03ce65680ede2f31df348d7f368b8f/httptools-0.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:3e14f530fefa7499334a79b0cf7e7cd2992870eb893526fb097d51b4f2d0f321", size = 86694, upload-time = "2025-10-10T03:54:45.923Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/09/8f/c77b1fcbfd262d422f12da02feb0d218fa228d52485b77b953832105bb90/httptools-0.7.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6babce6cfa2a99545c60bfef8bee0cc0545413cb0018f617c8059a30ad985de3", size = 202889, upload-time = "2025-10-10T03:54:47.089Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/0a/1a/22887f53602feaa066354867bc49a68fc295c2293433177ee90870a7d517/httptools-0.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:601b7628de7504077dd3dcb3791c6b8694bbd967148a6d1f01806509254fb1ca", size = 108180, upload-time = "2025-10-10T03:54:48.052Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/32/6a/6aaa91937f0010d288d3d124ca2946d48d60c3a5ee7ca62afe870e3ea011/httptools-0.7.1-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:04c6c0e6c5fb0739c5b8a9eb046d298650a0ff38cf42537fc372b28dc7e4472c", size = 478596, upload-time = "2025-10-10T03:54:48.919Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6d/70/023d7ce117993107be88d2cbca566a7c1323ccbaf0af7eabf2064fe356f6/httptools-0.7.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:69d4f9705c405ae3ee83d6a12283dc9feba8cc6aaec671b412917e644ab4fa66", size = 473268, upload-time = "2025-10-10T03:54:49.993Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/32/4d/9dd616c38da088e3f436e9a616e1d0cc66544b8cdac405cc4e81c8679fc7/httptools-0.7.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:44c8f4347d4b31269c8a9205d8a5ee2df5322b09bbbd30f8f862185bb6b05346", size = 455517, upload-time = "2025-10-10T03:54:51.066Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1d/3a/a6c595c310b7df958e739aae88724e24f9246a514d909547778d776799be/httptools-0.7.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:465275d76db4d554918aba40bf1cbebe324670f3dfc979eaffaa5d108e2ed650", size = 458337, upload-time = "2025-10-10T03:54:52.196Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/fd/82/88e8d6d2c51edc1cc391b6e044c6c435b6aebe97b1abc33db1b0b24cd582/httptools-0.7.1-cp313-cp313-win_amd64.whl", hash = "sha256:322d00c2068d125bd570f7bf78b2d367dad02b919d8581d7476d8b75b294e3e6", size = 85743, upload-time = "2025-10-10T03:54:53.448Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/34/50/9d095fcbb6de2d523e027a2f304d4551855c2f46e0b82befd718b8b20056/httptools-0.7.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:c08fe65728b8d70b6923ce31e3956f859d5e1e8548e6f22ec520a962c6757270", size = 203619, upload-time = "2025-10-10T03:54:54.321Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/07/f0/89720dc5139ae54b03f861b5e2c55a37dba9a5da7d51e1e824a1f343627f/httptools-0.7.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:7aea2e3c3953521c3c51106ee11487a910d45586e351202474d45472db7d72d3", size = 108714, upload-time = "2025-10-10T03:54:55.163Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b3/cb/eea88506f191fb552c11787c23f9a405f4c7b0c5799bf73f2249cd4f5228/httptools-0.7.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0e68b8582f4ea9166be62926077a3334064d422cf08ab87d8b74664f8e9058e1", size = 472909, upload-time = "2025-10-10T03:54:56.056Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e0/4a/a548bdfae6369c0d078bab5769f7b66f17f1bfaa6fa28f81d6be6959066b/httptools-0.7.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:df091cf961a3be783d6aebae963cc9b71e00d57fa6f149025075217bc6a55a7b", size = 470831, upload-time = "2025-10-10T03:54:57.219Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4d/31/14df99e1c43bd132eec921c2e7e11cda7852f65619bc0fc5bdc2d0cb126c/httptools-0.7.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f084813239e1eb403ddacd06a30de3d3e09a9b76e7894dcda2b22f8a726e9c60", size = 452631, upload-time = "2025-10-10T03:54:58.219Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/22/d2/b7e131f7be8d854d48cb6d048113c30f9a46dca0c9a8b08fcb3fcd588cdc/httptools-0.7.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7347714368fb2b335e9063bc2b96f2f87a9ceffcd9758ac295f8bbcd3ffbc0ca", size = 452910, upload-time = "2025-10-10T03:54:59.366Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/53/cf/878f3b91e4e6e011eff6d1fa9ca39f7eb17d19c9d7971b04873734112f30/httptools-0.7.1-cp314-cp314-win_amd64.whl", hash = "sha256:cfabda2a5bb85aa2a904ce06d974a3f30fb36cc63d7feaddec05d2050acede96", size = 88205, upload-time = "2025-10-10T03:55:00.389Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "idna"
|
name = "idna"
|
||||||
version = "3.11"
|
version = "3.11"
|
||||||
@@ -843,6 +974,7 @@ version = "0.1.0"
|
|||||||
source = { virtual = "." }
|
source = { virtual = "." }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "ccxt" },
|
{ name = "ccxt" },
|
||||||
|
{ name = "fastapi" },
|
||||||
{ name = "matplotlib" },
|
{ name = "matplotlib" },
|
||||||
{ name = "numpy" },
|
{ name = "numpy" },
|
||||||
{ name = "pandas" },
|
{ name = "pandas" },
|
||||||
@@ -850,7 +982,9 @@ dependencies = [
|
|||||||
{ name = "python-dotenv" },
|
{ name = "python-dotenv" },
|
||||||
{ name = "requests" },
|
{ name = "requests" },
|
||||||
{ name = "scikit-learn" },
|
{ name = "scikit-learn" },
|
||||||
|
{ name = "sqlalchemy" },
|
||||||
{ name = "ta" },
|
{ name = "ta" },
|
||||||
|
{ name = "uvicorn", extra = ["standard"] },
|
||||||
{ name = "vectorbt" },
|
{ name = "vectorbt" },
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -862,6 +996,7 @@ dev = [
|
|||||||
[package.metadata]
|
[package.metadata]
|
||||||
requires-dist = [
|
requires-dist = [
|
||||||
{ name = "ccxt", specifier = ">=4.5.32" },
|
{ name = "ccxt", specifier = ">=4.5.32" },
|
||||||
|
{ name = "fastapi", specifier = ">=0.115.0" },
|
||||||
{ name = "matplotlib", specifier = ">=3.10.0" },
|
{ name = "matplotlib", specifier = ">=3.10.0" },
|
||||||
{ name = "numpy", specifier = ">=2.3.2" },
|
{ name = "numpy", specifier = ">=2.3.2" },
|
||||||
{ name = "pandas", specifier = ">=2.3.1" },
|
{ name = "pandas", specifier = ">=2.3.1" },
|
||||||
@@ -870,7 +1005,9 @@ requires-dist = [
|
|||||||
{ name = "python-dotenv", specifier = ">=1.2.1" },
|
{ name = "python-dotenv", specifier = ">=1.2.1" },
|
||||||
{ name = "requests", specifier = ">=2.32.5" },
|
{ name = "requests", specifier = ">=2.32.5" },
|
||||||
{ name = "scikit-learn", specifier = ">=1.6.0" },
|
{ name = "scikit-learn", specifier = ">=1.6.0" },
|
||||||
|
{ name = "sqlalchemy", specifier = ">=2.0.0" },
|
||||||
{ name = "ta", specifier = ">=0.11.0" },
|
{ name = "ta", specifier = ">=0.11.0" },
|
||||||
|
{ name = "uvicorn", extras = ["standard"], specifier = ">=0.34.0" },
|
||||||
{ name = "vectorbt", specifier = ">=0.28.2" },
|
{ name = "vectorbt", specifier = ">=0.28.2" },
|
||||||
]
|
]
|
||||||
provides-extras = ["dev"]
|
provides-extras = ["dev"]
|
||||||
@@ -1486,6 +1623,92 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" },
|
{ url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pydantic"
|
||||||
|
version = "2.12.5"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "annotated-types" },
|
||||||
|
{ name = "pydantic-core" },
|
||||||
|
{ name = "typing-extensions" },
|
||||||
|
{ name = "typing-inspection" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pydantic-core"
|
||||||
|
version = "2.41.5"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "typing-extensions" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pygments"
|
name = "pygments"
|
||||||
version = "2.19.2"
|
version = "2.19.2"
|
||||||
@@ -1550,6 +1773,52 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" },
|
{ url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pyyaml"
|
||||||
|
version = "6.0.3"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "regex"
|
name = "regex"
|
||||||
version = "2025.11.3"
|
version = "2025.11.3"
|
||||||
@@ -1775,6 +2044,41 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
|
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "sqlalchemy"
|
||||||
|
version = "2.0.45"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "greenlet", marker = "platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64'" },
|
||||||
|
{ name = "typing-extensions" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/be/f9/5e4491e5ccf42f5d9cfc663741d261b3e6e1683ae7812114e7636409fcc6/sqlalchemy-2.0.45.tar.gz", hash = "sha256:1632a4bda8d2d25703fdad6363058d882541bdaaee0e5e3ddfa0cd3229efce88", size = 9869912, upload-time = "2025-12-09T21:05:16.737Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2d/c7/1900b56ce19bff1c26f39a4ce427faec7716c81ac792bfac8b6a9f3dca93/sqlalchemy-2.0.45-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b3ee2aac15169fb0d45822983631466d60b762085bc4535cd39e66bea362df5f", size = 3333760, upload-time = "2025-12-09T22:11:02.66Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/0a/93/3be94d96bb442d0d9a60e55a6bb6e0958dd3457751c6f8502e56ef95fed0/sqlalchemy-2.0.45-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba547ac0b361ab4f1608afbc8432db669bd0819b3e12e29fb5fa9529a8bba81d", size = 3348268, upload-time = "2025-12-09T22:13:49.054Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/48/4b/f88ded696e61513595e4a9778f9d3f2bf7332cce4eb0c7cedaabddd6687b/sqlalchemy-2.0.45-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:215f0528b914e5c75ef2559f69dca86878a3beeb0c1be7279d77f18e8d180ed4", size = 3278144, upload-time = "2025-12-09T22:11:04.14Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ed/6a/310ecb5657221f3e1bd5288ed83aa554923fb5da48d760a9f7622afeb065/sqlalchemy-2.0.45-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:107029bf4f43d076d4011f1afb74f7c3e2ea029ec82eb23d8527d5e909e97aa6", size = 3313907, upload-time = "2025-12-09T22:13:50.598Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5c/39/69c0b4051079addd57c84a5bfb34920d87456dd4c90cf7ee0df6efafc8ff/sqlalchemy-2.0.45-cp312-cp312-win32.whl", hash = "sha256:0c9f6ada57b58420a2c0277ff853abe40b9e9449f8d7d231763c6bc30f5c4953", size = 2112182, upload-time = "2025-12-09T21:39:30.824Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f7/4e/510db49dd89fc3a6e994bee51848c94c48c4a00dc905e8d0133c251f41a7/sqlalchemy-2.0.45-cp312-cp312-win_amd64.whl", hash = "sha256:8defe5737c6d2179c7997242d6473587c3beb52e557f5ef0187277009f73e5e1", size = 2139200, upload-time = "2025-12-09T21:39:32.321Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6a/c8/7cc5221b47a54edc72a0140a1efa56e0a2730eefa4058d7ed0b4c4357ff8/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fe187fc31a54d7fd90352f34e8c008cf3ad5d064d08fedd3de2e8df83eb4a1cf", size = 3277082, upload-time = "2025-12-09T22:11:06.167Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:672c45cae53ba88e0dad74b9027dddd09ef6f441e927786b05bec75d949fbb2e", size = 3293131, upload-time = "2025-12-09T22:13:52.626Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/da/4c/13dab31266fc9904f7609a5dc308a2432a066141d65b857760c3bef97e69/sqlalchemy-2.0.45-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:470daea2c1ce73910f08caf10575676a37159a6d16c4da33d0033546bddebc9b", size = 3225389, upload-time = "2025-12-09T22:11:08.093Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/74/04/891b5c2e9f83589de202e7abaf24cd4e4fa59e1837d64d528829ad6cc107/sqlalchemy-2.0.45-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9c6378449e0940476577047150fd09e242529b761dc887c9808a9a937fe990c8", size = 3266054, upload-time = "2025-12-09T22:13:54.262Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f1/24/fc59e7f71b0948cdd4cff7a286210e86b0443ef1d18a23b0d83b87e4b1f7/sqlalchemy-2.0.45-cp313-cp313-win32.whl", hash = "sha256:4b6bec67ca45bc166c8729910bd2a87f1c0407ee955df110d78948f5b5827e8a", size = 2110299, upload-time = "2025-12-09T21:39:33.486Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c0/c5/d17113020b2d43073412aeca09b60d2009442420372123b8d49cc253f8b8/sqlalchemy-2.0.45-cp313-cp313-win_amd64.whl", hash = "sha256:afbf47dc4de31fa38fd491f3705cac5307d21d4bb828a4f020ee59af412744ee", size = 2136264, upload-time = "2025-12-09T21:39:36.801Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/3d/8d/bb40a5d10e7a5f2195f235c0b2f2c79b0bf6e8f00c0c223130a4fbd2db09/sqlalchemy-2.0.45-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:83d7009f40ce619d483d26ac1b757dfe3167b39921379a8bd1b596cf02dab4a6", size = 3521998, upload-time = "2025-12-09T22:13:28.622Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/75/a5/346128b0464886f036c039ea287b7332a410aa2d3fb0bb5d404cb8861635/sqlalchemy-2.0.45-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d8a2ca754e5415cde2b656c27900b19d50ba076aa05ce66e2207623d3fe41f5a", size = 3473434, upload-time = "2025-12-09T22:13:30.188Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cc/64/4e1913772646b060b025d3fc52ce91a58967fe58957df32b455de5a12b4f/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f46ec744e7f51275582e6a24326e10c49fbdd3fc99103e01376841213028774", size = 3272404, upload-time = "2025-12-09T22:11:09.662Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:883c600c345123c033c2f6caca18def08f1f7f4c3ebeb591a63b6fceffc95cce", size = 3277057, upload-time = "2025-12-09T22:13:56.213Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/85/d0/3d64218c9724e91f3d1574d12eb7ff8f19f937643815d8daf792046d88ab/sqlalchemy-2.0.45-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2c0b74aa79e2deade948fe8593654c8ef4228c44ba862bb7c9585c8e0db90f33", size = 3222279, upload-time = "2025-12-09T22:11:11.1Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/24/10/dd7688a81c5bc7690c2a3764d55a238c524cd1a5a19487928844cb247695/sqlalchemy-2.0.45-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8a420169cef179d4c9064365f42d779f1e5895ad26ca0c8b4c0233920973db74", size = 3244508, upload-time = "2025-12-09T22:13:57.932Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/aa/41/db75756ca49f777e029968d9c9fee338c7907c563267740c6d310a8e3f60/sqlalchemy-2.0.45-cp314-cp314-win32.whl", hash = "sha256:e50dcb81a5dfe4b7b4a4aa8f338116d127cb209559124f3694c70d6cd072b68f", size = 2113204, upload-time = "2025-12-09T21:39:38.365Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl", hash = "sha256:4748601c8ea959e37e03d13dcda4a44837afcd1b21338e637f7c935b8da06177", size = 2138785, upload-time = "2025-12-09T21:39:39.503Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/42/39/f05f0ed54d451156bbed0e23eb0516bcad7cbb9f18b3bf219c786371b3f0/sqlalchemy-2.0.45-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd337d3526ec5298f67d6a30bbbe4ed7e5e68862f0bf6dd21d289f8d37b7d60b", size = 3522029, upload-time = "2025-12-09T22:13:32.09Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/54/0f/d15398b98b65c2bce288d5ee3f7d0a81f77ab89d9456994d5c7cc8b2a9db/sqlalchemy-2.0.45-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9a62b446b7d86a3909abbcd1cd3cc550a832f99c2bc37c5b22e1925438b9367b", size = 3475142, upload-time = "2025-12-09T22:13:33.739Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl", hash = "sha256:5225a288e4c8cc2308dbdd874edad6e7d0fd38eac1e9e5f23503425c8eee20d0", size = 1936672, upload-time = "2025-12-09T21:54:52.608Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "stack-data"
|
name = "stack-data"
|
||||||
version = "0.6.3"
|
version = "0.6.3"
|
||||||
@@ -1789,6 +2093,19 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" },
|
{ url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "starlette"
|
||||||
|
version = "0.50.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "anyio" },
|
||||||
|
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d9/52/1064f510b141bd54025f9b55105e26d1fa970b9be67ad766380a3c9b74b0/starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca", size = 74033, upload-time = "2025-11-01T15:25:25.461Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ta"
|
name = "ta"
|
||||||
version = "0.11.0"
|
version = "0.11.0"
|
||||||
@@ -1838,6 +2155,18 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
|
{ url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "typing-inspection"
|
||||||
|
version = "0.4.2"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "typing-extensions" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tzdata"
|
name = "tzdata"
|
||||||
version = "2025.2"
|
version = "2025.2"
|
||||||
@@ -1868,6 +2197,62 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" },
|
{ url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "uvicorn"
|
||||||
|
version = "0.40.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "click" },
|
||||||
|
{ name = "h11" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/c3/d1/8f3c683c9561a4e6689dd3b1d345c815f10f86acd044ee1fb9a4dcd0b8c5/uvicorn-0.40.0.tar.gz", hash = "sha256:839676675e87e73694518b5574fd0f24c9d97b46bea16df7b8c05ea1a51071ea", size = 81761, upload-time = "2025-12-21T14:16:22.45Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/3d/d8/2083a1daa7439a66f3a48589a57d576aa117726762618f6bb09fe3798796/uvicorn-0.40.0-py3-none-any.whl", hash = "sha256:c6c8f55bc8bf13eb6fa9ff87ad62308bbbc33d0b67f84293151efe87e0d5f2ee", size = 68502, upload-time = "2025-12-21T14:16:21.041Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.optional-dependencies]
|
||||||
|
standard = [
|
||||||
|
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||||
|
{ name = "httptools" },
|
||||||
|
{ name = "python-dotenv" },
|
||||||
|
{ name = "pyyaml" },
|
||||||
|
{ name = "uvloop", marker = "platform_python_implementation != 'PyPy' and sys_platform != 'cygwin' and sys_platform != 'win32'" },
|
||||||
|
{ name = "watchfiles" },
|
||||||
|
{ name = "websockets" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "uvloop"
|
||||||
|
version = "0.22.1"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/06/f0/18d39dbd1971d6d62c4629cc7fa67f74821b0dc1f5a77af43719de7936a7/uvloop-0.22.1.tar.gz", hash = "sha256:6c84bae345b9147082b17371e3dd5d42775bddce91f885499017f4607fdaf39f", size = 2443250, upload-time = "2025-10-16T22:17:19.342Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/3d/ff/7f72e8170be527b4977b033239a83a68d5c881cc4775fca255c677f7ac5d/uvloop-0.22.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fe94b4564e865d968414598eea1a6de60adba0c040ba4ed05ac1300de402cd42", size = 1359936, upload-time = "2025-10-16T22:16:29.436Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c3/c6/e5d433f88fd54d81ef4be58b2b7b0cea13c442454a1db703a1eea0db1a59/uvloop-0.22.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:51eb9bd88391483410daad430813d982010f9c9c89512321f5b60e2cddbdddd6", size = 752769, upload-time = "2025-10-16T22:16:30.493Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/24/68/a6ac446820273e71aa762fa21cdcc09861edd3536ff47c5cd3b7afb10eeb/uvloop-0.22.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:700e674a166ca5778255e0e1dc4e9d79ab2acc57b9171b79e65feba7184b3370", size = 4317413, upload-time = "2025-10-16T22:16:31.644Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5f/6f/e62b4dfc7ad6518e7eff2516f680d02a0f6eb62c0c212e152ca708a0085e/uvloop-0.22.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b5b1ac819a3f946d3b2ee07f09149578ae76066d70b44df3fa990add49a82e4", size = 4426307, upload-time = "2025-10-16T22:16:32.917Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/90/60/97362554ac21e20e81bcef1150cb2a7e4ffdaf8ea1e5b2e8bf7a053caa18/uvloop-0.22.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e047cc068570bac9866237739607d1313b9253c3051ad84738cbb095be0537b2", size = 4131970, upload-time = "2025-10-16T22:16:34.015Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/99/39/6b3f7d234ba3964c428a6e40006340f53ba37993f46ed6e111c6e9141d18/uvloop-0.22.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:512fec6815e2dd45161054592441ef76c830eddaad55c8aa30952e6fe1ed07c0", size = 4296343, upload-time = "2025-10-16T22:16:35.149Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/89/8c/182a2a593195bfd39842ea68ebc084e20c850806117213f5a299dfc513d9/uvloop-0.22.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:561577354eb94200d75aca23fbde86ee11be36b00e52a4eaf8f50fb0c86b7705", size = 1358611, upload-time = "2025-10-16T22:16:36.833Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d2/14/e301ee96a6dc95224b6f1162cd3312f6d1217be3907b79173b06785f2fe7/uvloop-0.22.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cdf5192ab3e674ca26da2eada35b288d2fa49fdd0f357a19f0e7c4e7d5077c8", size = 751811, upload-time = "2025-10-16T22:16:38.275Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b7/02/654426ce265ac19e2980bfd9ea6590ca96a56f10c76e63801a2df01c0486/uvloop-0.22.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6e2ea3d6190a2968f4a14a23019d3b16870dd2190cd69c8180f7c632d21de68d", size = 4288562, upload-time = "2025-10-16T22:16:39.375Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/15/c0/0be24758891ef825f2065cd5db8741aaddabe3e248ee6acc5e8a80f04005/uvloop-0.22.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0530a5fbad9c9e4ee3f2b33b148c6a64d47bbad8000ea63704fa8260f4cf728e", size = 4366890, upload-time = "2025-10-16T22:16:40.547Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d2/53/8369e5219a5855869bcee5f4d317f6da0e2c669aecf0ef7d371e3d084449/uvloop-0.22.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bc5ef13bbc10b5335792360623cc378d52d7e62c2de64660616478c32cd0598e", size = 4119472, upload-time = "2025-10-16T22:16:41.694Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f8/ba/d69adbe699b768f6b29a5eec7b47dd610bd17a69de51b251126a801369ea/uvloop-0.22.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1f38ec5e3f18c8a10ded09742f7fb8de0108796eb673f30ce7762ce1b8550cad", size = 4239051, upload-time = "2025-10-16T22:16:43.224Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/90/cd/b62bdeaa429758aee8de8b00ac0dd26593a9de93d302bff3d21439e9791d/uvloop-0.22.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3879b88423ec7e97cd4eba2a443aa26ed4e59b45e6b76aabf13fe2f27023a142", size = 1362067, upload-time = "2025-10-16T22:16:44.503Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/0d/f8/a132124dfda0777e489ca86732e85e69afcd1ff7686647000050ba670689/uvloop-0.22.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:4baa86acedf1d62115c1dc6ad1e17134476688f08c6efd8a2ab076e815665c74", size = 752423, upload-time = "2025-10-16T22:16:45.968Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a3/94/94af78c156f88da4b3a733773ad5ba0b164393e357cc4bd0ab2e2677a7d6/uvloop-0.22.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:297c27d8003520596236bdb2335e6b3f649480bd09e00d1e3a99144b691d2a35", size = 4272437, upload-time = "2025-10-16T22:16:47.451Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b5/35/60249e9fd07b32c665192cec7af29e06c7cd96fa1d08b84f012a56a0b38e/uvloop-0.22.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c1955d5a1dd43198244d47664a5858082a3239766a839b2102a269aaff7a4e25", size = 4292101, upload-time = "2025-10-16T22:16:49.318Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/02/62/67d382dfcb25d0a98ce73c11ed1a6fba5037a1a1d533dcbb7cab033a2636/uvloop-0.22.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b31dc2fccbd42adc73bc4e7cdbae4fc5086cf378979e53ca5d0301838c5682c6", size = 4114158, upload-time = "2025-10-16T22:16:50.517Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f0/7a/f1171b4a882a5d13c8b7576f348acfe6074d72eaf52cccef752f748d4a9f/uvloop-0.22.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:93f617675b2d03af4e72a5333ef89450dfaa5321303ede6e67ba9c9d26878079", size = 4177360, upload-time = "2025-10-16T22:16:52.646Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/79/7b/b01414f31546caf0919da80ad57cbfe24c56b151d12af68cee1b04922ca8/uvloop-0.22.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:37554f70528f60cad66945b885eb01f1bb514f132d92b6eeed1c90fd54ed6289", size = 1454790, upload-time = "2025-10-16T22:16:54.355Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d4/31/0bb232318dd838cad3fa8fb0c68c8b40e1145b32025581975e18b11fab40/uvloop-0.22.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:b76324e2dc033a0b2f435f33eb88ff9913c156ef78e153fb210e03c13da746b3", size = 796783, upload-time = "2025-10-16T22:16:55.906Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/42/38/c9b09f3271a7a723a5de69f8e237ab8e7803183131bc57c890db0b6bb872/uvloop-0.22.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:badb4d8e58ee08dad957002027830d5c3b06aea446a6a3744483c2b3b745345c", size = 4647548, upload-time = "2025-10-16T22:16:57.008Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c1/37/945b4ca0ac27e3dc4952642d4c900edd030b3da6c9634875af6e13ae80e5/uvloop-0.22.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b91328c72635f6f9e0282e4a57da7470c7350ab1c9f48546c0f2866205349d21", size = 4467065, upload-time = "2025-10-16T22:16:58.206Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/97/cc/48d232f33d60e2e2e0b42f4e73455b146b76ebe216487e862700457fbf3c/uvloop-0.22.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:daf620c2995d193449393d6c62131b3fbd40a63bf7b307a1527856ace637fe88", size = 4328384, upload-time = "2025-10-16T22:16:59.36Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e4/16/c1fd27e9549f3c4baf1dc9c20c456cd2f822dbf8de9f463824b0c0357e06/uvloop-0.22.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6cde23eeda1a25c75b2e07d39970f3374105d5eafbaab2a4482be82f272d5a5e", size = 4296730, upload-time = "2025-10-16T22:17:00.744Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "vectorbt"
|
name = "vectorbt"
|
||||||
version = "0.28.2"
|
version = "0.28.2"
|
||||||
@@ -1895,6 +2280,76 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/91/b9/250f7a1d033618bd0e43ae40bc180aa88895c907876ca39e219a45caecca/vectorbt-0.28.2-py3-none-any.whl", hash = "sha256:93e5fb20d2ff072b7fed78603b516eb64f967c9bf9420ce8ba28329af0410e7d", size = 527808, upload-time = "2025-12-12T16:18:10.624Z" },
|
{ url = "https://files.pythonhosted.org/packages/91/b9/250f7a1d033618bd0e43ae40bc180aa88895c907876ca39e219a45caecca/vectorbt-0.28.2-py3-none-any.whl", hash = "sha256:93e5fb20d2ff072b7fed78603b516eb64f967c9bf9420ce8ba28329af0410e7d", size = 527808, upload-time = "2025-12-12T16:18:10.624Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "watchfiles"
|
||||||
|
version = "1.1.1"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "anyio" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/c2/c9/8869df9b2a2d6c59d79220a4db37679e74f807c559ffe5265e08b227a210/watchfiles-1.1.1.tar.gz", hash = "sha256:a173cb5c16c4f40ab19cecf48a534c409f7ea983ab8fed0741304a1c0a31b3f2", size = 94440, upload-time = "2025-10-14T15:06:21.08Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/74/d5/f039e7e3c639d9b1d09b07ea412a6806d38123f0508e5f9b48a87b0a76cc/watchfiles-1.1.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:8c89f9f2f740a6b7dcc753140dd5e1ab9215966f7a3530d0c0705c83b401bd7d", size = 404745, upload-time = "2025-10-14T15:04:46.731Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a5/96/a881a13aa1349827490dab2d363c8039527060cfcc2c92cc6d13d1b1049e/watchfiles-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bd404be08018c37350f0d6e34676bd1e2889990117a2b90070b3007f172d0610", size = 391769, upload-time = "2025-10-14T15:04:48.003Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4b/5b/d3b460364aeb8da471c1989238ea0e56bec24b6042a68046adf3d9ddb01c/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8526e8f916bb5b9a0a777c8317c23ce65de259422bba5b31325a6fa6029d33af", size = 449374, upload-time = "2025-10-14T15:04:49.179Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b9/44/5769cb62d4ed055cb17417c0a109a92f007114a4e07f30812a73a4efdb11/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2edc3553362b1c38d9f06242416a5d8e9fe235c204a4072e988ce2e5bb1f69f6", size = 459485, upload-time = "2025-10-14T15:04:50.155Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/19/0c/286b6301ded2eccd4ffd0041a1b726afda999926cf720aab63adb68a1e36/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30f7da3fb3f2844259cba4720c3fc7138eb0f7b659c38f3bfa65084c7fc7abce", size = 488813, upload-time = "2025-10-14T15:04:51.059Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c7/2b/8530ed41112dd4a22f4dcfdb5ccf6a1baad1ff6eed8dc5a5f09e7e8c41c7/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8979280bdafff686ba5e4d8f97840f929a87ed9cdf133cbbd42f7766774d2aa", size = 594816, upload-time = "2025-10-14T15:04:52.031Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ce/d2/f5f9fb49489f184f18470d4f99f4e862a4b3e9ac2865688eb2099e3d837a/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dcc5c24523771db3a294c77d94771abcfcb82a0e0ee8efd910c37c59ec1b31bb", size = 475186, upload-time = "2025-10-14T15:04:53.064Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cf/68/5707da262a119fb06fbe214d82dd1fe4a6f4af32d2d14de368d0349eb52a/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db5d7ae38ff20153d542460752ff397fcf5c96090c1230803713cf3147a6803", size = 456812, upload-time = "2025-10-14T15:04:55.174Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/66/ab/3cbb8756323e8f9b6f9acb9ef4ec26d42b2109bce830cc1f3468df20511d/watchfiles-1.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:28475ddbde92df1874b6c5c8aaeb24ad5be47a11f87cde5a28ef3835932e3e94", size = 630196, upload-time = "2025-10-14T15:04:56.22Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/78/46/7152ec29b8335f80167928944a94955015a345440f524d2dfe63fc2f437b/watchfiles-1.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:36193ed342f5b9842edd3532729a2ad55c4160ffcfa3700e0d54be496b70dd43", size = 622657, upload-time = "2025-10-14T15:04:57.521Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/0a/bf/95895e78dd75efe9a7f31733607f384b42eb5feb54bd2eb6ed57cc2e94f4/watchfiles-1.1.1-cp312-cp312-win32.whl", hash = "sha256:859e43a1951717cc8de7f4c77674a6d389b106361585951d9e69572823f311d9", size = 272042, upload-time = "2025-10-14T15:04:59.046Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/87/0a/90eb755f568de2688cb220171c4191df932232c20946966c27a59c400850/watchfiles-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:91d4c9a823a8c987cce8fa2690923b069966dabb196dd8d137ea2cede885fde9", size = 288410, upload-time = "2025-10-14T15:05:00.081Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/36/76/f322701530586922fbd6723c4f91ace21364924822a8772c549483abed13/watchfiles-1.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:a625815d4a2bdca61953dbba5a39d60164451ef34c88d751f6c368c3ea73d404", size = 278209, upload-time = "2025-10-14T15:05:01.168Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bb/f4/f750b29225fe77139f7ae5de89d4949f5a99f934c65a1f1c0b248f26f747/watchfiles-1.1.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:130e4876309e8686a5e37dba7d5e9bc77e6ed908266996ca26572437a5271e18", size = 404321, upload-time = "2025-10-14T15:05:02.063Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2b/f9/f07a295cde762644aa4c4bb0f88921d2d141af45e735b965fb2e87858328/watchfiles-1.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5f3bde70f157f84ece3765b42b4a52c6ac1a50334903c6eaf765362f6ccca88a", size = 391783, upload-time = "2025-10-14T15:05:03.052Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bc/11/fc2502457e0bea39a5c958d86d2cb69e407a4d00b85735ca724bfa6e0d1a/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14e0b1fe858430fc0251737ef3824c54027bedb8c37c38114488b8e131cf8219", size = 449279, upload-time = "2025-10-14T15:05:04.004Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e3/1f/d66bc15ea0b728df3ed96a539c777acfcad0eb78555ad9efcaa1274688f0/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f27db948078f3823a6bb3b465180db8ebecf26dd5dae6f6180bd87383b6b4428", size = 459405, upload-time = "2025-10-14T15:05:04.942Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/be/90/9f4a65c0aec3ccf032703e6db02d89a157462fbb2cf20dd415128251cac0/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:059098c3a429f62fc98e8ec62b982230ef2c8df68c79e826e37b895bc359a9c0", size = 488976, upload-time = "2025-10-14T15:05:05.905Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/37/57/ee347af605d867f712be7029bb94c8c071732a4b44792e3176fa3c612d39/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfb5862016acc9b869bb57284e6cb35fdf8e22fe59f7548858e2f971d045f150", size = 595506, upload-time = "2025-10-14T15:05:06.906Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a8/78/cc5ab0b86c122047f75e8fc471c67a04dee395daf847d3e59381996c8707/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:319b27255aacd9923b8a276bb14d21a5f7ff82564c744235fc5eae58d95422ae", size = 474936, upload-time = "2025-10-14T15:05:07.906Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/62/da/def65b170a3815af7bd40a3e7010bf6ab53089ef1b75d05dd5385b87cf08/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c755367e51db90e75b19454b680903631d41f9e3607fbd941d296a020c2d752d", size = 456147, upload-time = "2025-10-14T15:05:09.138Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/57/99/da6573ba71166e82d288d4df0839128004c67d2778d3b566c138695f5c0b/watchfiles-1.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c22c776292a23bfc7237a98f791b9ad3144b02116ff10d820829ce62dff46d0b", size = 630007, upload-time = "2025-10-14T15:05:10.117Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a8/51/7439c4dd39511368849eb1e53279cd3454b4a4dbace80bab88feeb83c6b5/watchfiles-1.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:3a476189be23c3686bc2f4321dd501cb329c0a0469e77b7b534ee10129ae6374", size = 622280, upload-time = "2025-10-14T15:05:11.146Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/95/9c/8ed97d4bba5db6fdcdb2b298d3898f2dd5c20f6b73aee04eabe56c59677e/watchfiles-1.1.1-cp313-cp313-win32.whl", hash = "sha256:bf0a91bfb5574a2f7fc223cf95eeea79abfefa404bf1ea5e339c0c1560ae99a0", size = 272056, upload-time = "2025-10-14T15:05:12.156Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1f/f3/c14e28429f744a260d8ceae18bf58c1d5fa56b50d006a7a9f80e1882cb0d/watchfiles-1.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:52e06553899e11e8074503c8e716d574adeeb7e68913115c4b3653c53f9bae42", size = 288162, upload-time = "2025-10-14T15:05:13.208Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/dc/61/fe0e56c40d5cd29523e398d31153218718c5786b5e636d9ae8ae79453d27/watchfiles-1.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:ac3cc5759570cd02662b15fbcd9d917f7ecd47efe0d6b40474eafd246f91ea18", size = 277909, upload-time = "2025-10-14T15:05:14.49Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/79/42/e0a7d749626f1e28c7108a99fb9bf524b501bbbeb9b261ceecde644d5a07/watchfiles-1.1.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:563b116874a9a7ce6f96f87cd0b94f7faf92d08d0021e837796f0a14318ef8da", size = 403389, upload-time = "2025-10-14T15:05:15.777Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/15/49/08732f90ce0fbbc13913f9f215c689cfc9ced345fb1bcd8829a50007cc8d/watchfiles-1.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3ad9fe1dae4ab4212d8c91e80b832425e24f421703b5a42ef2e4a1e215aff051", size = 389964, upload-time = "2025-10-14T15:05:16.85Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/27/0d/7c315d4bd5f2538910491a0393c56bf70d333d51bc5b34bee8e68e8cea19/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce70f96a46b894b36eba678f153f052967a0d06d5b5a19b336ab0dbbd029f73e", size = 448114, upload-time = "2025-10-14T15:05:17.876Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c3/24/9e096de47a4d11bc4df41e9d1e61776393eac4cb6eb11b3e23315b78b2cc/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cb467c999c2eff23a6417e58d75e5828716f42ed8289fe6b77a7e5a91036ca70", size = 460264, upload-time = "2025-10-14T15:05:18.962Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cc/0f/e8dea6375f1d3ba5fcb0b3583e2b493e77379834c74fd5a22d66d85d6540/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:836398932192dae4146c8f6f737d74baeac8b70ce14831a239bdb1ca882fc261", size = 487877, upload-time = "2025-10-14T15:05:20.094Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ac/5b/df24cfc6424a12deb41503b64d42fbea6b8cb357ec62ca84a5a3476f654a/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:743185e7372b7bc7c389e1badcc606931a827112fbbd37f14c537320fca08620", size = 595176, upload-time = "2025-10-14T15:05:21.134Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8f/b5/853b6757f7347de4e9b37e8cc3289283fb983cba1ab4d2d7144694871d9c/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afaeff7696e0ad9f02cbb8f56365ff4686ab205fcf9c4c5b6fdfaaa16549dd04", size = 473577, upload-time = "2025-10-14T15:05:22.306Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e1/f7/0a4467be0a56e80447c8529c9fce5b38eab4f513cb3d9bf82e7392a5696b/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f7eb7da0eb23aa2ba036d4f616d46906013a68caf61b7fdbe42fc8b25132e77", size = 455425, upload-time = "2025-10-14T15:05:23.348Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8e/e0/82583485ea00137ddf69bc84a2db88bd92ab4a6e3c405e5fb878ead8d0e7/watchfiles-1.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:831a62658609f0e5c64178211c942ace999517f5770fe9436be4c2faeba0c0ef", size = 628826, upload-time = "2025-10-14T15:05:24.398Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/28/9a/a785356fccf9fae84c0cc90570f11702ae9571036fb25932f1242c82191c/watchfiles-1.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:f9a2ae5c91cecc9edd47e041a930490c31c3afb1f5e6d71de3dc671bfaca02bf", size = 622208, upload-time = "2025-10-14T15:05:25.45Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c3/f4/0872229324ef69b2c3edec35e84bd57a1289e7d3fe74588048ed8947a323/watchfiles-1.1.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:d1715143123baeeaeadec0528bb7441103979a1d5f6fd0e1f915383fea7ea6d5", size = 404315, upload-time = "2025-10-14T15:05:26.501Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7b/22/16d5331eaed1cb107b873f6ae1b69e9ced582fcf0c59a50cd84f403b1c32/watchfiles-1.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:39574d6370c4579d7f5d0ad940ce5b20db0e4117444e39b6d8f99db5676c52fd", size = 390869, upload-time = "2025-10-14T15:05:27.649Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b2/7e/5643bfff5acb6539b18483128fdc0ef2cccc94a5b8fbda130c823e8ed636/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7365b92c2e69ee952902e8f70f3ba6360d0d596d9299d55d7d386df84b6941fb", size = 449919, upload-time = "2025-10-14T15:05:28.701Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/51/2e/c410993ba5025a9f9357c376f48976ef0e1b1aefb73b97a5ae01a5972755/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bfff9740c69c0e4ed32416f013f3c45e2ae42ccedd1167ef2d805c000b6c71a5", size = 460845, upload-time = "2025-10-14T15:05:30.064Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8e/a4/2df3b404469122e8680f0fcd06079317e48db58a2da2950fb45020947734/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b27cf2eb1dda37b2089e3907d8ea92922b673c0c427886d4edc6b94d8dfe5db3", size = 489027, upload-time = "2025-10-14T15:05:31.064Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ea/84/4587ba5b1f267167ee715b7f66e6382cca6938e0a4b870adad93e44747e6/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:526e86aced14a65a5b0ec50827c745597c782ff46b571dbfe46192ab9e0b3c33", size = 595615, upload-time = "2025-10-14T15:05:32.074Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6a/0f/c6988c91d06e93cd0bb3d4a808bcf32375ca1904609835c3031799e3ecae/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04e78dd0b6352db95507fd8cb46f39d185cf8c74e4cf1e4fbad1d3df96faf510", size = 474836, upload-time = "2025-10-14T15:05:33.209Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b4/36/ded8aebea91919485b7bbabbd14f5f359326cb5ec218cd67074d1e426d74/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c85794a4cfa094714fb9c08d4a218375b2b95b8ed1666e8677c349906246c05", size = 455099, upload-time = "2025-10-14T15:05:34.189Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/98/e0/8c9bdba88af756a2fce230dd365fab2baf927ba42cd47521ee7498fd5211/watchfiles-1.1.1-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:74d5012b7630714b66be7b7b7a78855ef7ad58e8650c73afc4c076a1f480a8d6", size = 630626, upload-time = "2025-10-14T15:05:35.216Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2a/84/a95db05354bf2d19e438520d92a8ca475e578c647f78f53197f5a2f17aaf/watchfiles-1.1.1-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:8fbe85cb3201c7d380d3d0b90e63d520f15d6afe217165d7f98c9c649654db81", size = 622519, upload-time = "2025-10-14T15:05:36.259Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1d/ce/d8acdc8de545de995c339be67711e474c77d643555a9bb74a9334252bd55/watchfiles-1.1.1-cp314-cp314-win32.whl", hash = "sha256:3fa0b59c92278b5a7800d3ee7733da9d096d4aabcfabb9a928918bd276ef9b9b", size = 272078, upload-time = "2025-10-14T15:05:37.63Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c4/c9/a74487f72d0451524be827e8edec251da0cc1fcf111646a511ae752e1a3d/watchfiles-1.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:c2047d0b6cea13b3316bdbafbfa0c4228ae593d995030fda39089d36e64fc03a", size = 287664, upload-time = "2025-10-14T15:05:38.95Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/df/b8/8ac000702cdd496cdce998c6f4ee0ca1f15977bba51bdf07d872ebdfc34c/watchfiles-1.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:842178b126593addc05acf6fce960d28bc5fae7afbaa2c6c1b3a7b9460e5be02", size = 277154, upload-time = "2025-10-14T15:05:39.954Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/47/a8/e3af2184707c29f0f14b1963c0aace6529f9d1b8582d5b99f31bbf42f59e/watchfiles-1.1.1-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:88863fbbc1a7312972f1c511f202eb30866370ebb8493aef2812b9ff28156a21", size = 403820, upload-time = "2025-10-14T15:05:40.932Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c0/ec/e47e307c2f4bd75f9f9e8afbe3876679b18e1bcec449beca132a1c5ffb2d/watchfiles-1.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:55c7475190662e202c08c6c0f4d9e345a29367438cf8e8037f3155e10a88d5a5", size = 390510, upload-time = "2025-10-14T15:05:41.945Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d5/a0/ad235642118090f66e7b2f18fd5c42082418404a79205cdfca50b6309c13/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f53fa183d53a1d7a8852277c92b967ae99c2d4dcee2bfacff8868e6e30b15f7", size = 448408, upload-time = "2025-10-14T15:05:43.385Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/df/85/97fa10fd5ff3332ae17e7e40e20784e419e28521549780869f1413742e9d/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6aae418a8b323732fa89721d86f39ec8f092fc2af67f4217a2b07fd3e93c6101", size = 458968, upload-time = "2025-10-14T15:05:44.404Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/47/c2/9059c2e8966ea5ce678166617a7f75ecba6164375f3b288e50a40dc6d489/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f096076119da54a6080e8920cbdaac3dbee667eb91dcc5e5b78840b87415bd44", size = 488096, upload-time = "2025-10-14T15:05:45.398Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/94/44/d90a9ec8ac309bc26db808a13e7bfc0e4e78b6fc051078a554e132e80160/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00485f441d183717038ed2e887a7c868154f216877653121068107b227a2f64c", size = 596040, upload-time = "2025-10-14T15:05:46.502Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/95/68/4e3479b20ca305cfc561db3ed207a8a1c745ee32bf24f2026a129d0ddb6e/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a55f3e9e493158d7bfdb60a1165035f1cf7d320914e7b7ea83fe22c6023b58fc", size = 473847, upload-time = "2025-10-14T15:05:47.484Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4f/55/2af26693fd15165c4ff7857e38330e1b61ab8c37d15dc79118cdba115b7a/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c91ed27800188c2ae96d16e3149f199d62f86c7af5f5f4d2c61a3ed8cd3666c", size = 455072, upload-time = "2025-10-14T15:05:48.928Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/66/1d/d0d200b10c9311ec25d2273f8aad8c3ef7cc7ea11808022501811208a750/watchfiles-1.1.1-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:311ff15a0bae3714ffb603e6ba6dbfba4065ab60865d15a6ec544133bdb21099", size = 629104, upload-time = "2025-10-14T15:05:49.908Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e3/bd/fa9bb053192491b3867ba07d2343d9f2252e00811567d30ae8d0f78136fe/watchfiles-1.1.1-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:a916a2932da8f8ab582f242c065f5c81bed3462849ca79ee357dd9551b0e9b01", size = 622112, upload-time = "2025-10-14T15:05:50.941Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wcwidth"
|
name = "wcwidth"
|
||||||
version = "0.2.14"
|
version = "0.2.14"
|
||||||
@@ -1904,6 +2359,51 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" },
|
{ url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "websockets"
|
||||||
|
version = "16.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/04/24/4b2031d72e840ce4c1ccb255f693b15c334757fc50023e4db9537080b8c4/websockets-16.0.tar.gz", hash = "sha256:5f6261a5e56e8d5c42a4497b364ea24d94d9563e8fbd44e78ac40879c60179b5", size = 179346, upload-time = "2026-01-10T09:23:47.181Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/84/7b/bac442e6b96c9d25092695578dda82403c77936104b5682307bd4deb1ad4/websockets-16.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:71c989cbf3254fbd5e84d3bff31e4da39c43f884e64f2551d14bb3c186230f00", size = 177365, upload-time = "2026-01-10T09:22:46.787Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b0/fe/136ccece61bd690d9c1f715baaeefd953bb2360134de73519d5df19d29ca/websockets-16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8b6e209ffee39ff1b6d0fa7bfef6de950c60dfb91b8fcead17da4ee539121a79", size = 175038, upload-time = "2026-01-10T09:22:47.999Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/40/1e/9771421ac2286eaab95b8575b0cb701ae3663abf8b5e1f64f1fd90d0a673/websockets-16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86890e837d61574c92a97496d590968b23c2ef0aeb8a9bc9421d174cd378ae39", size = 175328, upload-time = "2026-01-10T09:22:49.809Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/18/29/71729b4671f21e1eaa5d6573031ab810ad2936c8175f03f97f3ff164c802/websockets-16.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9b5aca38b67492ef518a8ab76851862488a478602229112c4b0d58d63a7a4d5c", size = 184915, upload-time = "2026-01-10T09:22:51.071Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/97/bb/21c36b7dbbafc85d2d480cd65df02a1dc93bf76d97147605a8e27ff9409d/websockets-16.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e0334872c0a37b606418ac52f6ab9cfd17317ac26365f7f65e203e2d0d0d359f", size = 186152, upload-time = "2026-01-10T09:22:52.224Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4a/34/9bf8df0c0cf88fa7bfe36678dc7b02970c9a7d5e065a3099292db87b1be2/websockets-16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a0b31e0b424cc6b5a04b8838bbaec1688834b2383256688cf47eb97412531da1", size = 185583, upload-time = "2026-01-10T09:22:53.443Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/47/88/4dd516068e1a3d6ab3c7c183288404cd424a9a02d585efbac226cb61ff2d/websockets-16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:485c49116d0af10ac698623c513c1cc01c9446c058a4e61e3bf6c19dff7335a2", size = 184880, upload-time = "2026-01-10T09:22:55.033Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/91/d6/7d4553ad4bf1c0421e1ebd4b18de5d9098383b5caa1d937b63df8d04b565/websockets-16.0-cp312-cp312-win32.whl", hash = "sha256:eaded469f5e5b7294e2bdca0ab06becb6756ea86894a47806456089298813c89", size = 178261, upload-time = "2026-01-10T09:22:56.251Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c3/f0/f3a17365441ed1c27f850a80b2bc680a0fa9505d733fe152fdf5e98c1c0b/websockets-16.0-cp312-cp312-win_amd64.whl", hash = "sha256:5569417dc80977fc8c2d43a86f78e0a5a22fee17565d78621b6bb264a115d4ea", size = 178693, upload-time = "2026-01-10T09:22:57.478Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cc/9c/baa8456050d1c1b08dd0ec7346026668cbc6f145ab4e314d707bb845bf0d/websockets-16.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:878b336ac47938b474c8f982ac2f7266a540adc3fa4ad74ae96fea9823a02cc9", size = 177364, upload-time = "2026-01-10T09:22:59.333Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7e/0c/8811fc53e9bcff68fe7de2bcbe75116a8d959ac699a3200f4847a8925210/websockets-16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:52a0fec0e6c8d9a784c2c78276a48a2bdf099e4ccc2a4cad53b27718dbfd0230", size = 175039, upload-time = "2026-01-10T09:23:01.171Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/aa/82/39a5f910cb99ec0b59e482971238c845af9220d3ab9fa76dd9162cda9d62/websockets-16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e6578ed5b6981005df1860a56e3617f14a6c307e6a71b4fff8c48fdc50f3ed2c", size = 175323, upload-time = "2026-01-10T09:23:02.341Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bd/28/0a25ee5342eb5d5f297d992a77e56892ecb65e7854c7898fb7d35e9b33bd/websockets-16.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:95724e638f0f9c350bb1c2b0a7ad0e83d9cc0c9259f3ea94e40d7b02a2179ae5", size = 184975, upload-time = "2026-01-10T09:23:03.756Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f9/66/27ea52741752f5107c2e41fda05e8395a682a1e11c4e592a809a90c6a506/websockets-16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0204dc62a89dc9d50d682412c10b3542d748260d743500a85c13cd1ee4bde82", size = 186203, upload-time = "2026-01-10T09:23:05.01Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/37/e5/8e32857371406a757816a2b471939d51c463509be73fa538216ea52b792a/websockets-16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:52ac480f44d32970d66763115edea932f1c5b1312de36df06d6b219f6741eed8", size = 185653, upload-time = "2026-01-10T09:23:06.301Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/9b/67/f926bac29882894669368dc73f4da900fcdf47955d0a0185d60103df5737/websockets-16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6e5a82b677f8f6f59e8dfc34ec06ca6b5b48bc4fcda346acd093694cc2c24d8f", size = 184920, upload-time = "2026-01-10T09:23:07.492Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/3c/a1/3d6ccdcd125b0a42a311bcd15a7f705d688f73b2a22d8cf1c0875d35d34a/websockets-16.0-cp313-cp313-win32.whl", hash = "sha256:abf050a199613f64c886ea10f38b47770a65154dc37181bfaff70c160f45315a", size = 178255, upload-time = "2026-01-10T09:23:09.245Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6b/ae/90366304d7c2ce80f9b826096a9e9048b4bb760e44d3b873bb272cba696b/websockets-16.0-cp313-cp313-win_amd64.whl", hash = "sha256:3425ac5cf448801335d6fdc7ae1eb22072055417a96cc6b31b3861f455fbc156", size = 178689, upload-time = "2026-01-10T09:23:10.483Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f3/1d/e88022630271f5bd349ed82417136281931e558d628dd52c4d8621b4a0b2/websockets-16.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8cc451a50f2aee53042ac52d2d053d08bf89bcb31ae799cb4487587661c038a0", size = 177406, upload-time = "2026-01-10T09:23:12.178Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f2/78/e63be1bf0724eeb4616efb1ae1c9044f7c3953b7957799abb5915bffd38e/websockets-16.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:daa3b6ff70a9241cf6c7fc9e949d41232d9d7d26fd3522b1ad2b4d62487e9904", size = 175085, upload-time = "2026-01-10T09:23:13.511Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bb/f4/d3c9220d818ee955ae390cf319a7c7a467beceb24f05ee7aaaa2414345ba/websockets-16.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:fd3cb4adb94a2a6e2b7c0d8d05cb94e6f1c81a0cf9dc2694fb65c7e8d94c42e4", size = 175328, upload-time = "2026-01-10T09:23:14.727Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/63/bc/d3e208028de777087e6fb2b122051a6ff7bbcca0d6df9d9c2bf1dd869ae9/websockets-16.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:781caf5e8eee67f663126490c2f96f40906594cb86b408a703630f95550a8c3e", size = 185044, upload-time = "2026-01-10T09:23:15.939Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ad/6e/9a0927ac24bd33a0a9af834d89e0abc7cfd8e13bed17a86407a66773cc0e/websockets-16.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:caab51a72c51973ca21fa8a18bd8165e1a0183f1ac7066a182ff27107b71e1a4", size = 186279, upload-time = "2026-01-10T09:23:17.148Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b9/ca/bf1c68440d7a868180e11be653c85959502efd3a709323230314fda6e0b3/websockets-16.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:19c4dc84098e523fd63711e563077d39e90ec6702aff4b5d9e344a60cb3c0cb1", size = 185711, upload-time = "2026-01-10T09:23:18.372Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c4/f8/fdc34643a989561f217bb477cbc47a3a07212cbda91c0e4389c43c296ebf/websockets-16.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a5e18a238a2b2249c9a9235466b90e96ae4795672598a58772dd806edc7ac6d3", size = 184982, upload-time = "2026-01-10T09:23:19.652Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/dd/d1/574fa27e233764dbac9c52730d63fcf2823b16f0856b3329fc6268d6ae4f/websockets-16.0-cp314-cp314-win32.whl", hash = "sha256:a069d734c4a043182729edd3e9f247c3b2a4035415a9172fd0f1b71658a320a8", size = 177915, upload-time = "2026-01-10T09:23:21.458Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8a/f1/ae6b937bf3126b5134ce1f482365fde31a357c784ac51852978768b5eff4/websockets-16.0-cp314-cp314-win_amd64.whl", hash = "sha256:c0ee0e63f23914732c6d7e0cce24915c48f3f1512ec1d079ed01fc629dab269d", size = 178381, upload-time = "2026-01-10T09:23:22.715Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/06/9b/f791d1db48403e1f0a27577a6beb37afae94254a8c6f08be4a23e4930bc0/websockets-16.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:a35539cacc3febb22b8f4d4a99cc79b104226a756aa7400adc722e83b0d03244", size = 177737, upload-time = "2026-01-10T09:23:24.523Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bd/40/53ad02341fa33b3ce489023f635367a4ac98b73570102ad2cdd770dacc9a/websockets-16.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b784ca5de850f4ce93ec85d3269d24d4c82f22b7212023c974c401d4980ebc5e", size = 175268, upload-time = "2026-01-10T09:23:25.781Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/74/9b/6158d4e459b984f949dcbbb0c5d270154c7618e11c01029b9bbd1bb4c4f9/websockets-16.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:569d01a4e7fba956c5ae4fc988f0d4e187900f5497ce46339c996dbf24f17641", size = 175486, upload-time = "2026-01-10T09:23:27.033Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e5/2d/7583b30208b639c8090206f95073646c2c9ffd66f44df967981a64f849ad/websockets-16.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50f23cdd8343b984957e4077839841146f67a3d31ab0d00e6b824e74c5b2f6e8", size = 185331, upload-time = "2026-01-10T09:23:28.259Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/45/b0/cce3784eb519b7b5ad680d14b9673a31ab8dcb7aad8b64d81709d2430aa8/websockets-16.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:152284a83a00c59b759697b7f9e9cddf4e3c7861dd0d964b472b70f78f89e80e", size = 186501, upload-time = "2026-01-10T09:23:29.449Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/19/60/b8ebe4c7e89fb5f6cdf080623c9d92789a53636950f7abacfc33fe2b3135/websockets-16.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc59589ab64b0022385f429b94697348a6a234e8ce22544e3681b2e9331b5944", size = 186062, upload-time = "2026-01-10T09:23:31.368Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/88/a8/a080593f89b0138b6cba1b28f8df5673b5506f72879322288b031337c0b8/websockets-16.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:32da954ffa2814258030e5a57bc73a3635463238e797c7375dc8091327434206", size = 185356, upload-time = "2026-01-10T09:23:32.627Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c2/b6/b9afed2afadddaf5ebb2afa801abf4b0868f42f8539bfe4b071b5266c9fe/websockets-16.0-cp314-cp314t-win32.whl", hash = "sha256:5a4b4cc550cb665dd8a47f868c8d04c8230f857363ad3c9caf7a0c3bf8c61ca6", size = 178085, upload-time = "2026-01-10T09:23:33.816Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/9f/3e/28135a24e384493fa804216b79a6a6759a38cc4ff59118787b9fb693df93/websockets-16.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b14dc141ed6d2dde437cddb216004bcac6a1df0935d79656387bd41632ba0bbd", size = 178531, upload-time = "2026-01-10T09:23:35.016Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6f/28/258ebab549c2bf3e64d2b0217b973467394a9cea8c42f70418ca2c5d0d2e/websockets-16.0-py3-none-any.whl", hash = "sha256:1637db62fad1dc833276dded54215f2c7fa46912301a24bd94d45d46a011ceec", size = 171598, upload-time = "2026-01-10T09:23:45.395Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "widgetsnbextension"
|
name = "widgetsnbextension"
|
||||||
version = "4.0.15"
|
version = "4.0.15"
|
||||||
|
|||||||
Reference in New Issue
Block a user