224 lines
8.6 KiB
Python
224 lines
8.6 KiB
Python
|
|
"""
|
||
|
|
Backtest Runner
|
||
|
|
|
||
|
|
This module provides a high-level interface for running backtests with strategy
|
||
|
|
management. It encapsulates the backtesting workflow and provides a clean
|
||
|
|
interface for executing tests across different configurations.
|
||
|
|
"""
|
||
|
|
|
||
|
|
import pandas as pd
|
||
|
|
import logging
|
||
|
|
from typing import Dict, List, Tuple, Any, Optional
|
||
|
|
|
||
|
|
from cycles.backtest import Backtest
|
||
|
|
from cycles.charts import BacktestCharts
|
||
|
|
from cycles.strategies import create_strategy_manager
|
||
|
|
from .results_processor import ResultsProcessor
|
||
|
|
|
||
|
|
|
||
|
|
class BacktestRunner:
|
||
|
|
"""
|
||
|
|
High-level backtest execution interface.
|
||
|
|
|
||
|
|
Encapsulates the backtesting workflow, strategy management, and result
|
||
|
|
processing into a clean, reusable interface.
|
||
|
|
"""
|
||
|
|
|
||
|
|
def __init__(self, results_processor: Optional[ResultsProcessor] = None):
|
||
|
|
"""
|
||
|
|
Initialize backtest runner.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
results_processor: Optional results processor instance
|
||
|
|
"""
|
||
|
|
self.logger = logging.getLogger(__name__)
|
||
|
|
self.results_processor = results_processor or ResultsProcessor()
|
||
|
|
|
||
|
|
def run_single_timeframe(self, data_1min: pd.DataFrame, timeframe: str,
|
||
|
|
config: Dict[str, Any], debug: bool = False) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]:
|
||
|
|
"""
|
||
|
|
Run backtest for a single timeframe configuration.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
data_1min: 1-minute OHLCV data
|
||
|
|
timeframe: Timeframe identifier
|
||
|
|
config: Configuration dictionary
|
||
|
|
debug: Whether to enable debug mode
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
Tuple[Dict, List]: (summary_row, trade_rows)
|
||
|
|
"""
|
||
|
|
try:
|
||
|
|
# Create and initialize strategy manager
|
||
|
|
strategy_manager = self._create_strategy_manager(config)
|
||
|
|
|
||
|
|
# Setup backtester with appropriate data
|
||
|
|
backtester = self._setup_backtester(data_1min, strategy_manager, config)
|
||
|
|
|
||
|
|
# Run backtest
|
||
|
|
results = self._execute_backtest(backtester, debug)
|
||
|
|
|
||
|
|
# Process results
|
||
|
|
strategy_summary = strategy_manager.get_strategy_summary()
|
||
|
|
summary_row, trade_rows = self.results_processor.process_backtest_results(
|
||
|
|
results, timeframe, config, strategy_summary
|
||
|
|
)
|
||
|
|
|
||
|
|
# Handle debug plotting
|
||
|
|
if debug:
|
||
|
|
self._handle_debug_plotting(backtester, results)
|
||
|
|
|
||
|
|
return summary_row, trade_rows
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
self.logger.error(f"Backtest failed for timeframe {timeframe}: {e}")
|
||
|
|
raise
|
||
|
|
|
||
|
|
def _create_strategy_manager(self, config: Dict[str, Any]):
|
||
|
|
"""Create and validate strategy manager from configuration."""
|
||
|
|
strategy_config = {
|
||
|
|
"strategies": config['strategies'],
|
||
|
|
"combination_rules": config['combination_rules']
|
||
|
|
}
|
||
|
|
|
||
|
|
if not strategy_config['strategies']:
|
||
|
|
raise ValueError("No strategy configuration provided")
|
||
|
|
|
||
|
|
return create_strategy_manager(strategy_config)
|
||
|
|
|
||
|
|
def _setup_backtester(self, data_1min: pd.DataFrame, strategy_manager, config: Dict[str, Any]) -> Backtest:
|
||
|
|
"""Setup backtester with appropriate data and strategy manager."""
|
||
|
|
# Get primary strategy for backtester setup
|
||
|
|
primary_strategy = strategy_manager.strategies[0]
|
||
|
|
|
||
|
|
# Determine working dataframe based on strategy type
|
||
|
|
if primary_strategy.name == "bbrs":
|
||
|
|
# BBRS strategy processes 1-minute data and handles internal resampling
|
||
|
|
working_df = data_1min.copy()
|
||
|
|
else:
|
||
|
|
# Other strategies specify their preferred timeframe
|
||
|
|
primary_strategy._resample_data(data_1min)
|
||
|
|
working_df = primary_strategy.get_primary_timeframe_data()
|
||
|
|
|
||
|
|
# Prepare working dataframe for backtester
|
||
|
|
working_df_for_backtest = working_df.copy().reset_index()
|
||
|
|
if 'index' in working_df_for_backtest.columns:
|
||
|
|
working_df_for_backtest = working_df_for_backtest.rename(columns={'index': 'timestamp'})
|
||
|
|
|
||
|
|
# Initialize backtest
|
||
|
|
backtester = Backtest(
|
||
|
|
config['initial_usd'],
|
||
|
|
working_df_for_backtest,
|
||
|
|
working_df_for_backtest,
|
||
|
|
self._strategy_manager_init
|
||
|
|
)
|
||
|
|
|
||
|
|
# Store original data and attach strategy manager
|
||
|
|
backtester.original_df = data_1min
|
||
|
|
backtester.strategy_manager = strategy_manager
|
||
|
|
|
||
|
|
# Initialize strategy manager
|
||
|
|
strategy_manager.initialize(backtester)
|
||
|
|
|
||
|
|
return backtester
|
||
|
|
|
||
|
|
def _execute_backtest(self, backtester: Backtest, debug: bool = False) -> Dict[str, Any]:
|
||
|
|
"""Execute the backtest using strategy manager functions."""
|
||
|
|
return backtester.run(
|
||
|
|
self._strategy_manager_entry,
|
||
|
|
self._strategy_manager_exit,
|
||
|
|
debug
|
||
|
|
)
|
||
|
|
|
||
|
|
def _handle_debug_plotting(self, backtester: Backtest, results: Dict[str, Any]) -> None:
|
||
|
|
"""Handle debug plotting if enabled."""
|
||
|
|
try:
|
||
|
|
# Check if any strategy has processed_data for universal plotting
|
||
|
|
processed_data = None
|
||
|
|
for strategy in backtester.strategy_manager.strategies:
|
||
|
|
if hasattr(backtester, 'processed_data') and backtester.processed_data is not None:
|
||
|
|
processed_data = backtester.processed_data
|
||
|
|
break
|
||
|
|
|
||
|
|
if processed_data is not None and not processed_data.empty:
|
||
|
|
# Format strategy data with actual executed trades for universal plotting
|
||
|
|
formatted_data = BacktestCharts.format_strategy_data_with_trades(processed_data, results)
|
||
|
|
# Plot using universal function
|
||
|
|
BacktestCharts.plot_data(formatted_data)
|
||
|
|
else:
|
||
|
|
# Fallback to meta_trend plot if available
|
||
|
|
if "meta_trend" in backtester.strategies:
|
||
|
|
meta_trend = backtester.strategies["meta_trend"]
|
||
|
|
working_df = backtester.df.set_index('timestamp')
|
||
|
|
BacktestCharts.plot(working_df, meta_trend)
|
||
|
|
else:
|
||
|
|
self.logger.info("No plotting data available")
|
||
|
|
except Exception as e:
|
||
|
|
self.logger.warning(f"Plotting failed: {e}")
|
||
|
|
|
||
|
|
# Strategy manager interface functions
|
||
|
|
@staticmethod
|
||
|
|
def _strategy_manager_init(backtester: Backtest):
|
||
|
|
"""Strategy Manager initialization function."""
|
||
|
|
# Actual initialization happens in strategy_manager.initialize()
|
||
|
|
pass
|
||
|
|
|
||
|
|
@staticmethod
|
||
|
|
def _strategy_manager_entry(backtester: Backtest, df_index: int) -> bool:
|
||
|
|
"""Strategy Manager entry function."""
|
||
|
|
return backtester.strategy_manager.get_entry_signal(backtester, df_index)
|
||
|
|
|
||
|
|
@staticmethod
|
||
|
|
def _strategy_manager_exit(backtester: Backtest, df_index: int) -> Tuple[Optional[str], Optional[float]]:
|
||
|
|
"""Strategy Manager exit function."""
|
||
|
|
return backtester.strategy_manager.get_exit_signal(backtester, df_index)
|
||
|
|
|
||
|
|
|
||
|
|
class TimeframeTask:
|
||
|
|
"""Encapsulates a single timeframe backtest task."""
|
||
|
|
|
||
|
|
def __init__(self, timeframe: str, data_1min: pd.DataFrame, config: Dict[str, Any]):
|
||
|
|
"""
|
||
|
|
Initialize timeframe task.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
timeframe: Timeframe identifier
|
||
|
|
data_1min: 1-minute OHLCV data
|
||
|
|
config: Configuration for this task
|
||
|
|
"""
|
||
|
|
self.timeframe = timeframe
|
||
|
|
self.data_1min = data_1min
|
||
|
|
self.config = config
|
||
|
|
|
||
|
|
def execute(self, debug: bool = False) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]:
|
||
|
|
"""
|
||
|
|
Execute the timeframe task.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
debug: Whether to enable debug mode
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
Tuple[Dict, List]: (summary_row, trade_rows)
|
||
|
|
"""
|
||
|
|
runner = BacktestRunner()
|
||
|
|
return runner.run_single_timeframe(self.data_1min, self.timeframe, self.config, debug)
|
||
|
|
|
||
|
|
|
||
|
|
def create_timeframe_tasks(timeframes: List[str], data_1min: pd.DataFrame,
|
||
|
|
config_manager) -> List[TimeframeTask]:
|
||
|
|
"""
|
||
|
|
Create timeframe tasks from configuration.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
timeframes: List of timeframes to test
|
||
|
|
data_1min: 1-minute OHLCV data
|
||
|
|
config_manager: Configuration manager instance
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
List[TimeframeTask]: List of timeframe tasks
|
||
|
|
"""
|
||
|
|
tasks = []
|
||
|
|
for timeframe in timeframes:
|
||
|
|
task_config = config_manager.get_timeframe_task_config(timeframe)
|
||
|
|
tasks.append(TimeframeTask(timeframe, data_1min, task_config))
|
||
|
|
return tasks
|