revert refactor for modularity
This commit is contained in:
2025-05-23 12:47:59 +00:00
parent b71faa9758
commit 65ae3060de
11 changed files with 416 additions and 1669 deletions

View File

@@ -1,13 +0,0 @@
"""
This module contains the analysis classes for the cycles project.
"""
from .boillinger_band import BollingerBands
from .rsi import RSI
from .bb_rsi import BollingerBandsStrategy
__all__ = ["BollingerBands", "RSI", "BollingerBandsStrategy"]
__version__ = "0.1.0"
__author__ = 'TCP Cycles Team'

View File

@@ -1,214 +0,0 @@
"""
Backtesting Application
This module provides the main application class that orchestrates the entire
backtesting workflow. It coordinates configuration management, data loading,
backtest execution, and result output.
"""
import logging
import datetime
import concurrent.futures
from pathlib import Path
from typing import Optional, List, Dict, Any
from cycles.utils.storage import Storage
from cycles.utils.system import SystemUtils
from cycles.utils.config_manager import ConfigManager
from cycles.utils.results_processor import ResultsProcessor
from cycles.utils.backtest_runner import create_timeframe_tasks
class BacktestApplication:
"""
Main application class for coordinating backtesting workflow.
Orchestrates configuration management, data loading, backtest execution,
and result output in a clean, modular way.
"""
def __init__(self, config_path: Optional[str] = None):
"""
Initialize the backtesting application.
Args:
config_path: Optional path to configuration file
"""
self.config_manager = ConfigManager(config_path)
self.storage = Storage(logging=logging)
self.system_utils = SystemUtils(logging=logging)
self.results_processor = ResultsProcessor()
self.logger = logging.getLogger(__name__)
def load_data(self):
"""Load market data based on configuration."""
self.logger.info("Loading market data...")
data_1min = self.storage.load_data(
'btcusd_1-min_data.csv',
self.config_manager.start_date,
self.config_manager.stop_date
)
self.logger.info(f"Loaded {len(data_1min)} rows of 1-minute data")
return data_1min
def create_tasks(self, data_1min) -> List:
"""Create backtest tasks from configuration."""
self.logger.info("Creating backtest tasks...")
tasks = create_timeframe_tasks(
self.config_manager.timeframes,
data_1min,
self.config_manager
)
self.logger.info(f"Created {len(tasks)} backtest tasks")
return tasks
def execute_tasks(self, tasks: List, debug: bool = False) -> tuple:
"""
Execute backtest tasks.
Args:
tasks: List of TimeframeTask objects
debug: Whether to run in debug mode (sequential with plotting)
Returns:
Tuple of (results_rows, trade_rows)
"""
if debug:
return self._execute_tasks_debug(tasks)
else:
return self._execute_tasks_parallel(tasks)
def _execute_tasks_debug(self, tasks: List) -> tuple:
"""Execute tasks in debug mode (sequential)."""
self.logger.info("Executing tasks in debug mode (sequential)")
all_results_rows = []
all_trade_rows = []
for task in tasks:
self.logger.info(f"Processing timeframe: {task.timeframe}")
results, trades = task.execute(debug=True)
if results:
all_results_rows.append(results)
if trades:
all_trade_rows.extend(trades)
return all_results_rows, all_trade_rows
def _execute_tasks_parallel(self, tasks: List) -> tuple:
"""Execute tasks in parallel."""
workers = self.system_utils.get_optimal_workers()
self.logger.info(f"Executing tasks in parallel with {workers} workers")
all_results_rows = []
all_trade_rows = []
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as executor:
# Submit all tasks
futures = {
executor.submit(task.execute, False): task
for task in tasks
}
# Collect results
for future in concurrent.futures.as_completed(futures):
task = futures[future]
try:
results, trades = future.result()
if results:
all_results_rows.append(results)
if trades:
all_trade_rows.extend(trades)
self.logger.info(f"Completed timeframe: {task.timeframe}")
except Exception as e:
self.logger.error(f"Task failed for timeframe {task.timeframe}: {e}")
return all_results_rows, all_trade_rows
def save_results(self, results_rows: List[Dict[str, Any]], trade_rows: List[Dict[str, Any]],
data_1min) -> None:
"""
Save backtest results to files.
Args:
results_rows: List of result summary rows
trade_rows: List of individual trade rows
data_1min: Original 1-minute data for metadata
"""
timestamp = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M")
# Create metadata
metadata_lines = self.results_processor.create_metadata_lines(
self.config_manager, data_1min
)
# Save backtest results
backtest_filename = f"{timestamp}_backtest.csv"
backtest_fieldnames = [
"timeframe", "stop_loss_pct", "n_trades", "n_stop_loss", "win_rate",
"max_drawdown", "avg_trade", "profit_ratio", "final_usd", "total_fees_usd"
]
self.storage.write_backtest_results(
backtest_filename, backtest_fieldnames, results_rows, metadata_lines
)
# Save trade details
trades_fieldnames = [
"entry_time", "exit_time", "entry_price", "exit_price",
"profit_pct", "type", "fee_usd"
]
self.storage.write_trades(trade_rows, trades_fieldnames)
self.logger.info(f"Results saved to {backtest_filename}")
def run(self, debug: bool = False) -> None:
"""
Run the complete backtesting workflow.
Args:
debug: Whether to run in debug mode
"""
try:
self.logger.info("Starting backtesting workflow")
self.logger.info(f"Configuration: {self.config_manager}")
# Load data
data_1min = self.load_data()
# Create and execute tasks
tasks = self.create_tasks(data_1min)
results_rows, trade_rows = self.execute_tasks(tasks, debug)
# Save results
if results_rows or trade_rows:
self.save_results(results_rows, trade_rows, data_1min)
self.logger.info("Backtesting workflow completed successfully")
else:
self.logger.warning("No results generated")
except Exception as e:
self.logger.error(f"Backtesting workflow failed: {e}")
raise
def setup_logging() -> None:
"""Setup application logging configuration."""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler("backtest.log"),
logging.StreamHandler()
]
)

View File

@@ -1,23 +0,0 @@
"""
Utilities Module
This module provides utility classes and functions for the backtesting framework.
"""
from .storage import Storage
from .system import SystemUtils
from .data_utils import *
from .config_manager import ConfigManager
from .results_processor import ResultsProcessor, BacktestMetrics
from .backtest_runner import BacktestRunner, TimeframeTask, create_timeframe_tasks
__all__ = [
'Storage',
'SystemUtils',
'ConfigManager',
'ResultsProcessor',
'BacktestMetrics',
'BacktestRunner',
'TimeframeTask',
'create_timeframe_tasks'
]

View File

@@ -1,224 +0,0 @@
"""
Backtest Runner
This module provides a high-level interface for running backtests with strategy
management. It encapsulates the backtesting workflow and provides a clean
interface for executing tests across different configurations.
"""
import pandas as pd
import logging
from typing import Dict, List, Tuple, Any, Optional
from cycles.backtest import Backtest
from cycles.charts import BacktestCharts
from cycles.strategies import create_strategy_manager
from .results_processor import ResultsProcessor
class BacktestRunner:
"""
High-level backtest execution interface.
Encapsulates the backtesting workflow, strategy management, and result
processing into a clean, reusable interface.
"""
def __init__(self, results_processor: Optional[ResultsProcessor] = None):
"""
Initialize backtest runner.
Args:
results_processor: Optional results processor instance
"""
self.logger = logging.getLogger(__name__)
self.results_processor = results_processor or ResultsProcessor()
def run_single_timeframe(self, data_1min: pd.DataFrame, timeframe: str,
config: Dict[str, Any], debug: bool = False) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]:
"""
Run backtest for a single timeframe configuration.
Args:
data_1min: 1-minute OHLCV data
timeframe: Timeframe identifier
config: Configuration dictionary
debug: Whether to enable debug mode
Returns:
Tuple[Dict, List]: (summary_row, trade_rows)
"""
try:
# Create and initialize strategy manager
strategy_manager = self._create_strategy_manager(config)
# Setup backtester with appropriate data
backtester = self._setup_backtester(data_1min, strategy_manager, config)
# Run backtest
results = self._execute_backtest(backtester, debug)
# Process results
strategy_summary = strategy_manager.get_strategy_summary()
summary_row, trade_rows = self.results_processor.process_backtest_results(
results, timeframe, config, strategy_summary
)
# Handle debug plotting
if debug:
self._handle_debug_plotting(backtester, results)
return summary_row, trade_rows
except Exception as e:
self.logger.error(f"Backtest failed for timeframe {timeframe}: {e}")
raise
def _create_strategy_manager(self, config: Dict[str, Any]):
"""Create and validate strategy manager from configuration."""
strategy_config = {
"strategies": config['strategies'],
"combination_rules": config['combination_rules']
}
if not strategy_config['strategies']:
raise ValueError("No strategy configuration provided")
return create_strategy_manager(strategy_config)
def _setup_backtester(self, data_1min: pd.DataFrame, strategy_manager, config: Dict[str, Any]) -> Backtest:
"""Setup backtester with appropriate data and strategy manager."""
# Get primary strategy for backtester setup
primary_strategy = strategy_manager.strategies[0]
# Determine working dataframe based on strategy type
if primary_strategy.name == "bbrs":
# BBRS strategy processes 1-minute data and handles internal resampling
working_df = data_1min.copy()
else:
# Other strategies specify their preferred timeframe
primary_strategy._resample_data(data_1min)
working_df = primary_strategy.get_primary_timeframe_data()
# Prepare working dataframe for backtester
working_df_for_backtest = working_df.copy().reset_index()
if 'index' in working_df_for_backtest.columns:
working_df_for_backtest = working_df_for_backtest.rename(columns={'index': 'timestamp'})
# Initialize backtest
backtester = Backtest(
config['initial_usd'],
working_df_for_backtest,
working_df_for_backtest,
self._strategy_manager_init
)
# Store original data and attach strategy manager
backtester.original_df = data_1min
backtester.strategy_manager = strategy_manager
# Initialize strategy manager
strategy_manager.initialize(backtester)
return backtester
def _execute_backtest(self, backtester: Backtest, debug: bool = False) -> Dict[str, Any]:
"""Execute the backtest using strategy manager functions."""
return backtester.run(
self._strategy_manager_entry,
self._strategy_manager_exit,
debug
)
def _handle_debug_plotting(self, backtester: Backtest, results: Dict[str, Any]) -> None:
"""Handle debug plotting if enabled."""
try:
# Check if any strategy has processed_data for universal plotting
processed_data = None
for strategy in backtester.strategy_manager.strategies:
if hasattr(backtester, 'processed_data') and backtester.processed_data is not None:
processed_data = backtester.processed_data
break
if processed_data is not None and not processed_data.empty:
# Format strategy data with actual executed trades for universal plotting
formatted_data = BacktestCharts.format_strategy_data_with_trades(processed_data, results)
# Plot using universal function
BacktestCharts.plot_data(formatted_data)
else:
# Fallback to meta_trend plot if available
if "meta_trend" in backtester.strategies:
meta_trend = backtester.strategies["meta_trend"]
working_df = backtester.df.set_index('timestamp')
BacktestCharts.plot(working_df, meta_trend)
else:
self.logger.info("No plotting data available")
except Exception as e:
self.logger.warning(f"Plotting failed: {e}")
# Strategy manager interface functions
@staticmethod
def _strategy_manager_init(backtester: Backtest):
"""Strategy Manager initialization function."""
# Actual initialization happens in strategy_manager.initialize()
pass
@staticmethod
def _strategy_manager_entry(backtester: Backtest, df_index: int) -> bool:
"""Strategy Manager entry function."""
return backtester.strategy_manager.get_entry_signal(backtester, df_index)
@staticmethod
def _strategy_manager_exit(backtester: Backtest, df_index: int) -> Tuple[Optional[str], Optional[float]]:
"""Strategy Manager exit function."""
return backtester.strategy_manager.get_exit_signal(backtester, df_index)
class TimeframeTask:
"""Encapsulates a single timeframe backtest task."""
def __init__(self, timeframe: str, data_1min: pd.DataFrame, config: Dict[str, Any]):
"""
Initialize timeframe task.
Args:
timeframe: Timeframe identifier
data_1min: 1-minute OHLCV data
config: Configuration for this task
"""
self.timeframe = timeframe
self.data_1min = data_1min
self.config = config
def execute(self, debug: bool = False) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]:
"""
Execute the timeframe task.
Args:
debug: Whether to enable debug mode
Returns:
Tuple[Dict, List]: (summary_row, trade_rows)
"""
runner = BacktestRunner()
return runner.run_single_timeframe(self.data_1min, self.timeframe, self.config, debug)
def create_timeframe_tasks(timeframes: List[str], data_1min: pd.DataFrame,
config_manager) -> List[TimeframeTask]:
"""
Create timeframe tasks from configuration.
Args:
timeframes: List of timeframes to test
data_1min: 1-minute OHLCV data
config_manager: Configuration manager instance
Returns:
List[TimeframeTask]: List of timeframe tasks
"""
tasks = []
for timeframe in timeframes:
task_config = config_manager.get_timeframe_task_config(timeframe)
tasks.append(TimeframeTask(timeframe, data_1min, task_config))
return tasks

View File

@@ -1,129 +0,0 @@
"""
Configuration Manager
This module provides centralized configuration handling for the backtesting system.
It handles loading, validation, and provides a clean interface for accessing
configuration data.
"""
import json
import datetime
import logging
from typing import Dict, List, Optional, Any
from pathlib import Path
class ConfigManager:
"""
Manages configuration loading, validation, and access.
Provides a centralized way to handle configuration files with validation
and convenient access methods.
"""
def __init__(self, config_path: Optional[str] = None):
"""
Initialize configuration manager.
Args:
config_path: Path to configuration file. If None, uses default.
"""
self.config_path = config_path or "configs/config_default.json"
self.config = self._load_config()
self._validate_config()
def _load_config(self) -> Dict[str, Any]:
"""Load configuration from file."""
try:
with open(self.config_path, 'r') as f:
config = json.load(f)
logging.info(f"Loaded configuration from: {self.config_path}")
return config
except FileNotFoundError:
available_configs = list(Path("configs").glob("*.json"))
raise FileNotFoundError(
f"Config file '{self.config_path}' not found. "
f"Available configs: {[str(c) for c in available_configs]}"
)
except json.JSONDecodeError as e:
raise ValueError(f"Invalid JSON in config file '{self.config_path}': {e}")
def _validate_config(self) -> None:
"""Validate configuration structure and values."""
required_fields = ['start_date', 'initial_usd', 'timeframes', 'strategies']
for field in required_fields:
if field not in self.config:
raise ValueError(f"Missing required field '{field}' in configuration")
# Validate strategies
if not self.config['strategies']:
raise ValueError("At least one strategy must be specified")
for strategy in self.config['strategies']:
if 'name' not in strategy:
raise ValueError("Strategy must have a 'name' field")
# Validate timeframes
if not self.config['timeframes']:
raise ValueError("At least one timeframe must be specified")
logging.info("Configuration validation successful")
@property
def start_date(self) -> str:
"""Get start date."""
return self.config['start_date']
@property
def stop_date(self) -> str:
"""Get stop date, defaulting to current date if None."""
stop_date = self.config.get('stop_date')
if stop_date is None:
return datetime.datetime.now().strftime("%Y-%m-%d")
return stop_date
@property
def initial_usd(self) -> float:
"""Get initial USD amount."""
return self.config['initial_usd']
@property
def timeframes(self) -> List[str]:
"""Get list of timeframes to test."""
return self.config['timeframes']
@property
def strategies_config(self) -> List[Dict[str, Any]]:
"""Get strategies configuration."""
return self.config['strategies']
@property
def combination_rules(self) -> Dict[str, Any]:
"""Get combination rules for strategy manager."""
return self.config.get('combination_rules', {
"entry": "any",
"exit": "any",
"min_confidence": 0.5
})
def get_strategy_manager_config(self) -> Dict[str, Any]:
"""Get configuration for strategy manager."""
return {
"strategies": self.strategies_config,
"combination_rules": self.combination_rules
}
def get_timeframe_task_config(self, timeframe: str) -> Dict[str, Any]:
"""Get configuration for a specific timeframe task."""
return {
"initial_usd": self.initial_usd,
"strategies": self.strategies_config,
"combination_rules": self.combination_rules
}
def __repr__(self) -> str:
"""String representation of configuration."""
return (f"ConfigManager(config_path='{self.config_path}', "
f"strategies={len(self.strategies_config)}, "
f"timeframes={len(self.timeframes)})")

View File

@@ -1,239 +0,0 @@
"""
Results Processor
This module handles processing, aggregation, and analysis of backtest results.
It provides utilities for calculating metrics, aggregating results across
timeframes, and formatting output data.
"""
import pandas as pd
import numpy as np
import logging
from typing import Dict, List, Tuple, Any, Optional
from collections import defaultdict
class BacktestMetrics:
"""Container for backtest metrics calculation."""
@staticmethod
def calculate_trade_metrics(trades: List[Dict[str, Any]]) -> Dict[str, float]:
"""Calculate trade-level metrics."""
if not trades:
return {
"n_trades": 0,
"n_winning_trades": 0,
"win_rate": 0.0,
"total_profit": 0.0,
"total_loss": 0.0,
"avg_trade": 0.0,
"profit_ratio": 0.0,
"max_drawdown": 0.0
}
n_trades = len(trades)
wins = [t for t in trades if t.get('exit') and t['exit'] > t['entry']]
n_winning_trades = len(wins)
win_rate = n_winning_trades / n_trades if n_trades > 0 else 0
total_profit = sum(trade['profit_pct'] for trade in trades)
total_loss = sum(-trade['profit_pct'] for trade in trades if trade['profit_pct'] < 0)
avg_trade = total_profit / n_trades if n_trades > 0 else 0
profit_ratio = total_profit / total_loss if total_loss > 0 else float('inf')
# Calculate max drawdown
cumulative_profit = 0
max_drawdown = 0
peak = 0
for trade in trades:
cumulative_profit += trade['profit_pct']
if cumulative_profit > peak:
peak = cumulative_profit
drawdown = peak - cumulative_profit
if drawdown > max_drawdown:
max_drawdown = drawdown
return {
"n_trades": n_trades,
"n_winning_trades": n_winning_trades,
"win_rate": win_rate,
"total_profit": total_profit,
"total_loss": total_loss,
"avg_trade": avg_trade,
"profit_ratio": profit_ratio,
"max_drawdown": max_drawdown
}
@staticmethod
def calculate_portfolio_metrics(trades: List[Dict[str, Any]], initial_usd: float) -> Dict[str, float]:
"""Calculate portfolio-level metrics."""
final_usd = initial_usd
for trade in trades:
final_usd *= (1 + trade['profit_pct'])
total_fees_usd = sum(trade.get('fee_usd', 0.0) for trade in trades)
return {
"initial_usd": initial_usd,
"final_usd": final_usd,
"total_fees_usd": total_fees_usd,
"total_return": (final_usd - initial_usd) / initial_usd
}
class ResultsProcessor:
"""
Processes and aggregates backtest results.
Handles result processing, metric calculation, and aggregation across
multiple timeframes and configurations.
"""
def __init__(self):
"""Initialize results processor."""
self.logger = logging.getLogger(__name__)
def process_backtest_results(self, results: Dict[str, Any], timeframe: str,
config: Dict[str, Any], strategy_summary: Dict[str, Any]) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]:
"""
Process results from a single backtest run.
Args:
results: Raw backtest results
timeframe: Timeframe identifier
config: Configuration used for the test
strategy_summary: Summary of strategies used
Returns:
Tuple[Dict, List]: (summary_row, trade_rows)
"""
trades = results.get('trades', [])
initial_usd = config['initial_usd']
# Calculate metrics
trade_metrics = BacktestMetrics.calculate_trade_metrics(trades)
portfolio_metrics = BacktestMetrics.calculate_portfolio_metrics(trades, initial_usd)
# Get primary strategy info for reporting
primary_strategy = strategy_summary['strategies'][0] if strategy_summary['strategies'] else {}
primary_timeframe = primary_strategy.get('timeframes', ['unknown'])[0]
stop_loss_pct = primary_strategy.get('params', {}).get('stop_loss_pct', 'N/A')
# Create summary row
summary_row = {
"timeframe": f"{timeframe}({primary_timeframe})",
"stop_loss_pct": stop_loss_pct,
"n_stop_loss": sum(1 for trade in trades if trade.get('type') == 'STOP_LOSS'),
**trade_metrics,
**portfolio_metrics
}
# Create trade rows
trade_rows = []
for trade in trades:
trade_rows.append({
"timeframe": f"{timeframe}({primary_timeframe})",
"stop_loss_pct": stop_loss_pct,
"entry_time": trade.get("entry_time"),
"exit_time": trade.get("exit_time"),
"entry_price": trade.get("entry"),
"exit_price": trade.get("exit"),
"profit_pct": trade.get("profit_pct"),
"type": trade.get("type"),
"fee_usd": trade.get("fee_usd"),
})
# Log results
strategy_names = [s['name'] for s in strategy_summary['strategies']]
self.logger.info(
f"Timeframe: {timeframe}({primary_timeframe}), Stop Loss: {stop_loss_pct}, "
f"Trades: {trade_metrics['n_trades']}, Strategies: {strategy_names}"
)
return summary_row, trade_rows
def aggregate_results(self, all_rows: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Aggregate results per timeframe and stop_loss_pct.
Args:
all_rows: List of result rows to aggregate
Returns:
List[Dict]: Aggregated summary rows
"""
grouped = defaultdict(list)
for row in all_rows:
key = (row['timeframe'], row['stop_loss_pct'])
grouped[key].append(row)
summary_rows = []
for (timeframe, stop_loss_pct), rows in grouped.items():
if not rows:
continue
# Aggregate metrics
total_trades = sum(r['n_trades'] for r in rows)
total_stop_loss = sum(r['n_stop_loss'] for r in rows)
# Average metrics across runs
avg_win_rate = np.mean([r['win_rate'] for r in rows])
avg_max_drawdown = np.mean([r['max_drawdown'] for r in rows])
avg_avg_trade = np.mean([r['avg_trade'] for r in rows])
avg_profit_ratio = np.mean([r['profit_ratio'] for r in rows if r['profit_ratio'] != float('inf')])
# Portfolio metrics
initial_usd = rows[0]['initial_usd'] # Should be same for all
avg_final_usd = np.mean([r['final_usd'] for r in rows])
avg_total_fees_usd = np.mean([r['total_fees_usd'] for r in rows])
summary_rows.append({
"timeframe": timeframe,
"stop_loss_pct": stop_loss_pct,
"n_trades": total_trades,
"n_stop_loss": total_stop_loss,
"win_rate": avg_win_rate,
"max_drawdown": avg_max_drawdown,
"avg_trade": avg_avg_trade,
"profit_ratio": avg_profit_ratio,
"initial_usd": initial_usd,
"final_usd": avg_final_usd,
"total_fees_usd": avg_total_fees_usd,
})
return summary_rows
def create_metadata_lines(self, config_manager, data_1min: pd.DataFrame) -> List[str]:
"""
Create metadata lines for result files.
Args:
config_manager: Configuration manager instance
data_1min: 1-minute data for price lookups
Returns:
List[str]: Metadata lines
"""
start_date = config_manager.start_date
stop_date = config_manager.stop_date
initial_usd = config_manager.initial_usd
def get_nearest_price(df: pd.DataFrame, target_date: str) -> Tuple[Optional[str], Optional[float]]:
"""Get nearest price for a given date."""
if len(df) == 0:
return None, None
target_ts = pd.to_datetime(target_date)
nearest_idx = df.index.get_indexer([target_ts], method='nearest')[0]
nearest_time = df.index[nearest_idx]
price = df.iloc[nearest_idx]['close']
return str(nearest_time), price
nearest_start_time, start_price = get_nearest_price(data_1min, start_date)
nearest_stop_time, stop_price = get_nearest_price(data_1min, stop_date)
return [
f"Start date\t{start_date}\tPrice\t{start_price}",
f"Stop date\t{stop_date}\tPrice\t{stop_price}",
f"Initial USD\t{initial_usd}"
]