#!/usr/bin/env python3 """ Strategy Backtest Runner for IncrementalTrader This script runs backtests with specific strategy configurations defined in a JSON file. Unlike the optimization script, this runner executes predefined strategies without parameter optimization, making it ideal for testing specific configurations or comparing different strategies. Features: - JSON configuration file support - Multiple strategy execution in sequence - Detailed result reporting and analysis - Support for all available strategies (MetaTrend, BBRS, Random) - Individual strategy plotting and detailed trade analysis - Export results to CSV, JSON, and plots - Detailed plots showing portfolio over time with buy/sell signals - Signal data export for trade analysis - Real-time file saving during execution - Progress bars with tqdm (optional dependency) Dependencies: - Required: pandas, matplotlib, seaborn - Optional: tqdm (for progress bars - pip install tqdm) Usage: python test/backtest/strategy_run.py --config path/to/config.json python test/backtest/strategy_run.py --config configs/example_strategies.json --results-dir custom_results """ import os import sys import argparse import logging import json import time import traceback from datetime import datetime from typing import Dict, List, Any, Optional from concurrent.futures import ProcessPoolExecutor, as_completed import pandas as pd import numpy as np # Import plotting libraries for result visualization try: import matplotlib.pyplot as plt import seaborn as sns plt.style.use('default') PLOTTING_AVAILABLE = True except ImportError: PLOTTING_AVAILABLE = False # Import progress bar try: from tqdm import tqdm TQDM_AVAILABLE = True except ImportError: TQDM_AVAILABLE = False # Add project root to path project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, project_root) # Import IncrementalTrader components from IncrementalTrader.backtester import IncBacktester, BacktestConfig from IncrementalTrader.backtester.utils import DataLoader, DataCache, SystemUtils, ResultsSaver from IncrementalTrader.strategies import ( MetaTrendStrategy, BBRSStrategy, RandomStrategy, IncStrategyBase ) from IncrementalTrader.trader import IncTrader # Set up logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.StreamHandler(sys.stdout) ] ) logger = logging.getLogger(__name__) # Reduce verbosity for entry/exit logging logging.getLogger('IncrementalTrader.strategies').setLevel(logging.WARNING) logging.getLogger('IncrementalTrader.trader').setLevel(logging.WARNING) def run_strategy_worker_function(job: Dict[str, Any]) -> Dict[str, Any]: """ Global worker function for multiprocessing strategy execution. This function must be at module level to be picklable for multiprocessing. Args: job: Job configuration dictionary containing: - strategy_config: Strategy configuration - backtest_settings: Backtest settings - shared_data_info: Serialized market data - strategy_index: Index of the strategy - total_strategies: Total number of strategies Returns: Dictionary with backtest results """ try: # Extract job parameters strategy_config = job['strategy_config'] backtest_settings = job['backtest_settings'] shared_data_info = job['shared_data_info'] strategy_index = job['strategy_index'] total_strategies = job['total_strategies'] # Reconstruct market data from serialized form data_json = shared_data_info['data_serialized'] shared_data = pd.read_json(data_json, orient='split') shared_data.index = pd.to_datetime(shared_data.index) shared_data.index.name = shared_data_info['index_name'] # Create a temporary strategy runner for this worker temp_runner = StrategyRunner() # Execute the strategy with shared data result = temp_runner.run_single_backtest_with_shared_data( strategy_config, backtest_settings, shared_data, strategy_index, total_strategies ) return result except Exception as e: # Return error result if worker fails return { "success": False, "error": str(e), "strategy_name": job['strategy_config'].get('name', 'Unknown'), "strategy_type": job['strategy_config'].get('type', 'Unknown'), "strategy_params": job['strategy_config'].get('params', {}), "trader_params": job['strategy_config'].get('trader_params', {}), "traceback": traceback.format_exc() } class StrategyRunner: """ Strategy backtest runner for executing predefined strategies. This class executes specific trading strategies with given parameters, provides detailed analysis and saves comprehensive results. Features: - Parallel strategy execution using all CPU cores - Data caching to eliminate redundant loading - Real-time compatible frame-by-frame processing - Comprehensive result analysis and visualization """ def __init__(self, results_dir: str = "results", enable_parallel: bool = True): """ Initialize the StrategyRunner. Args: results_dir: Directory for saving results enable_parallel: Enable parallel strategy execution (default: True) """ self.base_results_dir = results_dir self.results_dir = None # Will be set when running strategies self.system_utils = SystemUtils() self.session_start_time = datetime.now() self.results = [] self.market_data = None # Will store the full market data for plotting self.enable_parallel = enable_parallel # Initialize data cache for optimized loading self.data_cache = DataCache(max_cache_size=20) # Create results directory os.makedirs(self.base_results_dir, exist_ok=True) parallel_status = "enabled" if enable_parallel else "disabled" logger.info(f"StrategyRunner initialized with data caching enabled, parallel execution {parallel_status}") logger.info(f"Base results directory: {self.base_results_dir}") logger.info(f"System info: {self.system_utils.get_system_info()}") def load_config(self, config_path: str) -> Dict[str, Any]: """ Load strategy configuration from JSON file. Args: config_path: Path to the JSON configuration file Returns: Dictionary containing configuration Raises: FileNotFoundError: If config file doesn't exist json.JSONDecodeError: If config file is invalid JSON """ if not os.path.exists(config_path): raise FileNotFoundError(f"Configuration file not found: {config_path}") try: with open(config_path, 'r') as f: config = json.load(f) # Validate config structure self._validate_config(config) logger.info(f"Configuration loaded from: {config_path}") return config except json.JSONDecodeError as e: raise json.JSONDecodeError(f"Invalid JSON in config file: {e}") def _validate_config(self, config: Dict[str, Any]) -> None: """ Validate the configuration structure. Args: config: Configuration dictionary to validate Raises: ValueError: If configuration is invalid """ required_fields = ['backtest_settings', 'strategies'] for field in required_fields: if field not in config: raise ValueError(f"Missing required field in config: {field}") # Validate backtest settings backtest_settings = config['backtest_settings'] required_backtest_fields = ['data_file', 'start_date', 'end_date'] for field in required_backtest_fields: if field not in backtest_settings: raise ValueError(f"Missing required backtest setting: {field}") # Validate strategies strategies = config['strategies'] if not isinstance(strategies, list) or len(strategies) == 0: raise ValueError("Config must contain at least one strategy") for i, strategy in enumerate(strategies): if 'name' not in strategy or 'type' not in strategy: raise ValueError(f"Strategy {i} missing required fields: 'name' and 'type'") def create_strategy(self, strategy_config: Dict[str, Any]) -> IncStrategyBase: """ Create a strategy instance from configuration. Args: strategy_config: Strategy configuration dictionary Returns: Strategy instance Raises: ValueError: If strategy type is unknown """ strategy_type = strategy_config['type'].lower() strategy_name = strategy_config['name'] strategy_params = strategy_config.get('params', {}) if strategy_type == 'metatrend': return MetaTrendStrategy(name=strategy_name, params=strategy_params) elif strategy_type == 'bbrs': return BBRSStrategy(name=strategy_name, params=strategy_params) elif strategy_type == 'random': return RandomStrategy(name=strategy_name, params=strategy_params) else: raise ValueError(f"Unknown strategy type: {strategy_type}") def load_data_once(self, backtest_settings: Dict[str, Any]) -> pd.DataFrame: """ Load data once using cache for efficient reuse across strategies. Args: backtest_settings: Backtest settings containing data file info Returns: DataFrame with market data """ try: data_file = backtest_settings['data_file'] data_dir = backtest_settings.get('data_dir', 'data') start_date = backtest_settings['start_date'] end_date = backtest_settings['end_date'] # Create data loader data_loader = DataLoader(data_dir) # Use cache to get data (will load from disk only if not cached) logger.info(f"Loading data: {data_file} [{start_date} to {end_date}]") if TQDM_AVAILABLE: with tqdm(desc="šŸ“Š Loading market data", unit="MB", ncols=80) as pbar: data = self.data_cache.get_data(data_file, start_date, end_date, data_loader) pbar.update(1) else: data = self.data_cache.get_data(data_file, start_date, end_date, data_loader) # Log cache statistics cache_stats = self.data_cache.get_cache_stats() logger.info(f"Data cache stats: {cache_stats['hits']} hits, {cache_stats['misses']} misses, " f"hit ratio: {cache_stats['hit_ratio']:.1%}") if data.empty: logger.error("No data loaded - empty DataFrame returned") return pd.DataFrame() logger.info(f"Loaded data: {len(data)} rows from {start_date} to {end_date}") return data except Exception as e: logger.error(f"Error loading data: {e}") return pd.DataFrame() def aggregate_market_data_for_plotting(self, df: pd.DataFrame, max_points: int = 2000) -> pd.DataFrame: """ Aggregate market data to reduce the number of points for plotting. Args: df: Full market data DataFrame max_points: Maximum number of points to keep for plotting Returns: Aggregated DataFrame suitable for plotting """ if df.empty or len(df) <= max_points: return df try: # Calculate step size to get approximately max_points step = len(df) // max_points # Sample every nth row to reduce data points aggregated_df = df.iloc[::step].copy() # Always include the first and last points if len(aggregated_df) > 0: if aggregated_df.index[0] != df.index[0]: aggregated_df = pd.concat([df.iloc[[0]], aggregated_df]) if aggregated_df.index[-1] != df.index[-1]: aggregated_df = pd.concat([aggregated_df, df.iloc[[-1]]]) logger.info(f"Market data aggregated: {len(df)} → {len(aggregated_df)} points for plotting") return aggregated_df.sort_values('timestamp') except Exception as e: logger.warning(f"Error aggregating market data: {e}, using original data") return df def create_strategy_plot(self, result: Dict[str, Any], save_path: str) -> None: """ Create and save a comprehensive plot for a strategy's performance. Args: result: Strategy backtest results save_path: Path to save the plot """ if not PLOTTING_AVAILABLE: logger.warning("Matplotlib not available, skipping plot generation") return if not result['success']: logger.warning(f"Cannot create plot for failed strategy: {result['strategy_name']}") return try: trades = result.get('trades', []) if not trades: logger.warning(f"No trades data available for plotting: {result['strategy_name']}") return # Create DataFrame from trades trades_df = pd.DataFrame(trades) # Calculate equity curve from trade data equity_curve = [] running_balance = result['initial_usd'] timestamps = [] # Add starting point if trades: start_time = pd.to_datetime(trades[0]['entry_time']) equity_curve.append(running_balance) timestamps.append(start_time) for trade in trades: # Only process completed trades (with exit_time) if 'exit_time' in trade and trade['exit_time']: exit_time = pd.to_datetime(trade['exit_time']) # Calculate profit from profit_pct or profit_usd if 'profit_usd' in trade: profit_usd = trade['profit_usd'] elif 'profit_pct' in trade: profit_usd = running_balance * float(trade['profit_pct']) else: # Calculate from entry/exit prices if available if 'entry' in trade and 'exit' in trade: entry_price = float(trade['entry']) exit_price = float(trade['exit']) quantity = trade.get('quantity', 1.0) profit_usd = quantity * (exit_price - entry_price) else: profit_usd = 0 running_balance += profit_usd equity_curve.append(running_balance) timestamps.append(exit_time) if len(equity_curve) < 2: logger.warning(f"Insufficient completed trades for equity curve: {result['strategy_name']}") return # Create the plot fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12)) fig.suptitle(f"Strategy Performance: {result['strategy_name']}", fontsize=16, fontweight='bold') # 1. Equity Curve ax1.plot(timestamps, equity_curve, linewidth=2, color='blue', alpha=0.8) ax1.axhline(y=result['initial_usd'], color='gray', linestyle='--', alpha=0.7, label='Initial Balance') ax1.set_title('Equity Curve') ax1.set_ylabel('Portfolio Value ($)') ax1.grid(True, alpha=0.3) ax1.legend() # Format x-axis for better readability if len(timestamps) > 10: ax1.tick_params(axis='x', rotation=45) # 2. Trade Profits/Losses # Calculate profits for each trade trade_profits = [] initial_balance = result['initial_usd'] for trade in trades: if 'exit_time' in trade and trade['exit_time']: if 'profit_usd' in trade: profit_usd = trade['profit_usd'] elif 'profit_pct' in trade: profit_usd = initial_balance * float(trade['profit_pct']) else: # Calculate from entry/exit prices if 'entry' in trade and 'exit' in trade: entry_price = float(trade['entry']) exit_price = float(trade['exit']) quantity = trade.get('quantity', 1.0) profit_usd = quantity * (exit_price - entry_price) else: profit_usd = 0 trade_profits.append(profit_usd) if trade_profits: colors = ['green' if p > 0 else 'red' for p in trade_profits] ax2.bar(range(len(trade_profits)), trade_profits, color=colors, alpha=0.7) ax2.set_title('Individual Trade P&L') ax2.set_xlabel('Trade Number') ax2.set_ylabel('Profit/Loss ($)') ax2.axhline(y=0, color='black', linestyle='-', alpha=0.5) ax2.grid(True, alpha=0.3) # 3. Drawdown if len(equity_curve) >= 2: peak = equity_curve[0] drawdowns = [] for value in equity_curve: if value > peak: peak = value drawdown = (value - peak) / peak * 100 if peak > 0 else 0 drawdowns.append(drawdown) ax3.fill_between(timestamps, drawdowns, 0, color='red', alpha=0.3) ax3.plot(timestamps, drawdowns, color='red', linewidth=1) ax3.set_title('Drawdown (%)') ax3.set_ylabel('Drawdown (%)') ax3.grid(True, alpha=0.3) if len(timestamps) > 10: ax3.tick_params(axis='x', rotation=45) # 4. Strategy Statistics ax4.axis('off') stats_text = f""" Strategy Statistics: Strategy Type: {result['strategy_type']} Total Return: {result['profit_ratio']:.2%} Total Profit: ${result['profit_usd']:.2f} Number of Trades: {result['n_trades']} Win Rate: {result['win_rate']:.1%} Max Drawdown: {result['max_drawdown']:.2%} Avg Trade: ${result['avg_trade']:.2f} Total Fees: ${result['total_fees_usd']:.2f} Duration: {result['backtest_duration_seconds']:.1f}s Period: {result['backtest_period']} """.strip() ax4.text(0.05, 0.95, stats_text, transform=ax4.transAxes, fontsize=10, verticalalignment='top', fontfamily='monospace', bbox=dict(boxstyle='round', facecolor='lightgray', alpha=0.8)) plt.tight_layout() plt.savefig(save_path, dpi=300, bbox_inches='tight') plt.close() logger.info(f"Strategy plot saved: {save_path}") except Exception as e: logger.error(f"Error creating plot for {result['strategy_name']}: {e}") logger.error(f"Traceback: {traceback.format_exc()}") # Close any open figures to prevent memory leaks plt.close('all') def create_detailed_strategy_plot(self, result: Dict[str, Any], save_path: str) -> None: """ Create and save a detailed plot showing portfolio value over time with signals. Args: result: Strategy backtest results save_path: Path to save the plot """ if not PLOTTING_AVAILABLE: logger.warning("Matplotlib not available, skipping detailed plot generation") return if not result['success']: logger.warning(f"Cannot create detailed plot for failed strategy: {result['strategy_name']}") return try: trades = result.get('trades', []) if not trades: logger.warning(f"No trades data available for detailed plotting: {result['strategy_name']}") return # Create DataFrame from trades trades_df = pd.DataFrame(trades) # Calculate portfolio value evolution and signals portfolio_times = [] portfolio_values = [] buy_times = [] buy_prices = [] buy_portfolio_values = [] sell_times = [] sell_prices = [] sell_portfolio_values = [] running_balance = result['initial_usd'] # Add initial point if trades: first_trade_time = pd.to_datetime(trades[0]['entry_time']) portfolio_times.append(first_trade_time) portfolio_values.append(running_balance) # Process each trade for trade in trades: entry_time = pd.to_datetime(trade['entry_time']) entry_price = float(trade['entry']) # Buy signal at entry buy_times.append(entry_time) buy_prices.append(entry_price) buy_portfolio_values.append(running_balance) # Add entry point to portfolio curve portfolio_times.append(entry_time) portfolio_values.append(running_balance) # Process exit if available if 'exit_time' in trade and trade['exit_time']: exit_time = pd.to_datetime(trade['exit_time']) exit_price = float(trade['exit']) # Calculate profit from available data if 'profit_usd' in trade: profit_usd = trade['profit_usd'] elif 'profit_pct' in trade: profit_usd = running_balance * float(trade['profit_pct']) else: # Calculate from entry/exit prices quantity = trade.get('quantity', 1.0) profit_usd = quantity * (exit_price - entry_price) running_balance += profit_usd # Sell signal at exit sell_times.append(exit_time) sell_prices.append(exit_price) sell_portfolio_values.append(running_balance) # Add exit point to portfolio curve portfolio_times.append(exit_time) portfolio_values.append(running_balance) if not portfolio_times: logger.warning(f"No portfolio data for detailed plotting: {result['strategy_name']}") return # Create the detailed plot with 3 panels fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(16, 16)) fig.suptitle(f"Detailed Strategy Analysis: {result['strategy_name']}", fontsize=16, fontweight='bold') # 1. Portfolio Value Over Time with Signals ax1.plot(portfolio_times, portfolio_values, linewidth=2, color='blue', alpha=0.8, label='Portfolio Value') ax1.axhline(y=result['initial_usd'], color='gray', linestyle='--', alpha=0.7, label='Initial Balance') # Add buy signals (green triangles pointing up) if buy_times and buy_portfolio_values: ax1.scatter(buy_times, buy_portfolio_values, color='green', marker='^', s=100, alpha=0.8, label=f'Buy Signals ({len(buy_times)})', zorder=5) # Add sell signals (red triangles pointing down) if sell_times and sell_portfolio_values: ax1.scatter(sell_times, sell_portfolio_values, color='red', marker='v', s=100, alpha=0.8, label=f'Sell Signals ({len(sell_times)})', zorder=5) ax1.set_title('Portfolio Value Over Time with Trading Signals') ax1.set_ylabel('Portfolio Value ($)') ax1.grid(True, alpha=0.3) ax1.legend() # Format x-axis if len(portfolio_times) > 10: ax1.tick_params(axis='x', rotation=45) # 2. Full Market Price Chart with Entry/Exit Points if self.market_data is not None and not self.market_data.empty: # Aggregate market data for plotting performance plot_market_data = self.aggregate_market_data_for_plotting(self.market_data) # Plot full market price data ax2.plot(plot_market_data.index, plot_market_data['close'], linewidth=1.5, color='black', alpha=0.7, label='Market Price') # Add entry points (green circles) if buy_times and buy_prices: ax2.scatter(buy_times, buy_prices, color='green', marker='o', s=80, alpha=0.9, label=f'Entry Points ({len(buy_times)})', zorder=5, edgecolors='darkgreen') # Add exit points (red circles) if sell_times and sell_prices: ax2.scatter(sell_times, sell_prices, color='red', marker='o', s=80, alpha=0.9, label=f'Exit Points ({len(sell_times)})', zorder=5, edgecolors='darkred') ax2.set_title('Market Price with Entry/Exit Points') ax2.set_ylabel('Price ($)') ax2.grid(True, alpha=0.3) ax2.legend() if len(plot_market_data) > 100: ax2.tick_params(axis='x', rotation=45) else: # Fallback to signal-only price data all_times = buy_times + sell_times if sell_times else buy_times all_prices = buy_prices + sell_prices if sell_prices else buy_prices if all_times and all_prices: # Sort by time for price line price_data = list(zip(all_times, all_prices)) price_data.sort(key=lambda x: x[0]) sorted_times, sorted_prices = zip(*price_data) ax2.plot(sorted_times, sorted_prices, linewidth=2, color='black', alpha=0.8, label='Price (Signal Points)') # Add entry points if buy_times and buy_prices: ax2.scatter(buy_times, buy_prices, color='green', marker='o', s=80, alpha=0.9, label=f'Entry Points ({len(buy_times)})', zorder=5, edgecolors='darkgreen') # Add exit points if sell_times and sell_prices: ax2.scatter(sell_times, sell_prices, color='red', marker='o', s=80, alpha=0.9, label=f'Exit Points ({len(sell_times)})', zorder=5, edgecolors='darkred') ax2.set_title('Price with Entry/Exit Points (Limited Data)') ax2.set_ylabel('Price ($)') ax2.grid(True, alpha=0.3) ax2.legend() else: ax2.text(0.5, 0.5, 'No price data available', transform=ax2.transAxes, ha='center', va='center', fontsize=12) ax2.set_title('Market Price Chart - No Data Available') # 3. Combined View: Price and Portfolio Performance if self.market_data is not None and not self.market_data.empty and portfolio_times: # Use the same aggregated data for consistency plot_market_data = self.aggregate_market_data_for_plotting(self.market_data) # Create dual y-axis plot ax3_price = ax3 ax3_portfolio = ax3.twinx() # Plot price on left axis line1 = ax3_price.plot(plot_market_data.index, plot_market_data['close'], linewidth=1.5, color='black', alpha=0.7, label='Market Price') ax3_price.set_ylabel('Market Price ($)', color='black') ax3_price.tick_params(axis='y', labelcolor='black') # Plot portfolio on right axis line2 = ax3_portfolio.plot(portfolio_times, portfolio_values, linewidth=2, color='blue', alpha=0.8, label='Portfolio Value') ax3_portfolio.set_ylabel('Portfolio Value ($)', color='blue') ax3_portfolio.tick_params(axis='y', labelcolor='blue') # Add signals on price axis if buy_times and buy_prices: ax3_price.scatter(buy_times, buy_prices, color='green', marker='^', s=120, alpha=0.9, label='Buy Signals', zorder=5, edgecolors='darkgreen') if sell_times and sell_prices: ax3_price.scatter(sell_times, sell_prices, color='red', marker='v', s=120, alpha=0.9, label='Sell Signals', zorder=5, edgecolors='darkred') ax3_price.set_title('Combined View: Market Price vs Portfolio Performance') ax3_price.set_xlabel('Time') ax3_price.grid(True, alpha=0.3) # Combine legends lines1, labels1 = ax3_price.get_legend_handles_labels() lines2, labels2 = ax3_portfolio.get_legend_handles_labels() ax3_price.legend(lines1 + lines2, labels1 + labels2, loc='upper left') if len(plot_market_data) > 100: ax3_price.tick_params(axis='x', rotation=45) else: ax3.text(0.5, 0.5, 'No data available for combined view', transform=ax3.transAxes, ha='center', va='center', fontsize=12) ax3.set_title('Combined View - No Data Available') ax3.set_xlabel('Time') plt.tight_layout() plt.savefig(save_path, dpi=300, bbox_inches='tight') plt.close() logger.info(f"Detailed plot saved: {save_path}") except Exception as e: logger.error(f"Error creating detailed plot for {result['strategy_name']}: {e}") logger.error(f"Traceback: {traceback.format_exc()}") # Close any open figures to prevent memory leaks plt.close('all') def save_individual_strategy_results(self, result: Dict[str, Any], config_name: str, strategy_index: int) -> None: """ Save individual strategy results immediately after completion. Args: result: Strategy backtest results config_name: Base configuration name strategy_index: Index of the strategy (1-based) """ try: strategy_name = result['strategy_name'].replace(' ', '_').replace('/', '_') # Create individual strategy filename base_filename = f"strategy_{strategy_index}_{strategy_name}" # Show progress for file saving if tqdm is available if TQDM_AVAILABLE: logger.info(f"šŸ’¾ Saving files for {strategy_name}...") # Save JSON result json_path = os.path.join(self.results_dir, f"{base_filename}.json") with open(json_path, 'w') as f: json.dump(result, f, indent=2, default=str) logger.info(f"šŸ“„ Individual strategy result saved: {json_path}") # Debug info for plotting trades_count = len(result.get('trades', [])) completed_trades = len([t for t in result.get('trades', []) if 'exit_time' in t and t['exit_time']]) logger.info(f"šŸ” Strategy {strategy_name}: {trades_count} total trades, {completed_trades} completed trades") # Save plot if strategy was successful if result['success'] and PLOTTING_AVAILABLE: try: plot_path = os.path.join(self.results_dir, f"{base_filename}_plot.png") logger.info(f"šŸŽØ Creating strategy plot: {plot_path}") self.create_strategy_plot(result, plot_path) except Exception as plot_error: logger.error(f"āŒ Failed to create strategy plot for {strategy_name}: {plot_error}") logger.error(f"Plot error traceback: {traceback.format_exc()}") elif not result['success']: logger.warning(f"āš ļø Skipping plot for failed strategy: {strategy_name}") elif not PLOTTING_AVAILABLE: logger.warning(f"āš ļø Plotting not available, skipping plot for: {strategy_name}") # Save detailed plot with portfolio and signals if result['success'] and PLOTTING_AVAILABLE: try: detailed_plot_path = os.path.join(self.results_dir, f"{base_filename}_detailed_plot.png") logger.info(f"šŸŽØ Creating detailed plot: {detailed_plot_path}") self.create_detailed_strategy_plot(result, detailed_plot_path) except Exception as detailed_plot_error: logger.error(f"āŒ Failed to create detailed plot for {strategy_name}: {detailed_plot_error}") logger.error(f"Detailed plot error traceback: {traceback.format_exc()}") # Save trades CSV if available if result['success'] and result.get('trades'): trades_df = pd.DataFrame(result['trades']) trades_csv_path = os.path.join(self.results_dir, f"{base_filename}_trades.csv") trades_df.to_csv(trades_csv_path, index=False) logger.info(f"šŸ“Š Trades data saved: {trades_csv_path}") # Save signals data signals_data = [] for i, trade in enumerate(result['trades']): # Buy signal signals_data.append({ 'signal_id': f"buy_{i+1}", 'signal_type': 'BUY', 'time': trade.get('entry_time'), 'price': trade.get('entry', 0), 'trade_id': i + 1, 'quantity': trade.get('quantity', 0), 'value': trade.get('quantity', 0) * trade.get('entry', 0), 'strategy': result['strategy_name'] }) # Sell signal (if trade is completed) if 'exit_time' in trade: signals_data.append({ 'signal_id': f"sell_{i+1}", 'signal_type': 'SELL', 'time': trade.get('exit_time'), 'price': trade.get('exit', 0), 'trade_id': i + 1, 'quantity': trade.get('quantity', 0), 'value': trade.get('quantity', 0) * trade.get('exit', 0), 'strategy': result['strategy_name'] }) if signals_data: signals_df = pd.DataFrame(signals_data) signals_csv_path = os.path.join(self.results_dir, f"{base_filename}_signals.csv") signals_df.to_csv(signals_csv_path, index=False) logger.info(f"šŸ“” Signals data saved: {signals_csv_path}") # Summary of files created files_created = [] files_created.append(f"{base_filename}.json") if result['success'] and PLOTTING_AVAILABLE: files_created.extend([f"{base_filename}_plot.png", f"{base_filename}_detailed_plot.png"]) if result['success'] and result.get('trades'): files_created.extend([f"{base_filename}_trades.csv", f"{base_filename}_signals.csv"]) logger.info(f"āœ… Saved {len(files_created)} files for {strategy_name}: {', '.join(files_created)}") except Exception as e: logger.error(f"Error saving individual strategy results for {result['strategy_name']}: {e}") logger.error(f"Save error traceback: {traceback.format_exc()}") def create_summary_plot(self, results: List[Dict[str, Any]], save_path: str) -> None: """ Create and save a summary comparison plot for all strategies. Args: results: List of all strategy results save_path: Path to save the plot """ if not PLOTTING_AVAILABLE: logger.warning("Matplotlib not available, skipping summary plot generation") return successful_results = [r for r in results if r['success']] if not successful_results: logger.warning("No successful strategies to plot") return try: # Create summary comparison plot fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(16, 12)) fig.suptitle('Strategy Comparison Summary', fontsize=16, fontweight='bold') strategy_names = [r['strategy_name'] for r in successful_results] # 1. Total Returns Comparison returns = [r['profit_ratio'] * 100 for r in successful_results] colors = ['green' if r > 0 else 'red' for r in returns] bars1 = ax1.bar(strategy_names, returns, color=colors, alpha=0.7) ax1.set_title('Total Returns (%)') ax1.set_ylabel('Return (%)') ax1.axhline(y=0, color='black', linestyle='-', alpha=0.5) ax1.tick_params(axis='x', rotation=45) ax1.grid(True, alpha=0.3) # Add value labels on bars for bar, value in zip(bars1, returns): height = bar.get_height() ax1.text(bar.get_x() + bar.get_width()/2., height + (0.1 if height >= 0 else -0.3), f'{value:.1f}%', ha='center', va='bottom' if height >= 0 else 'top') # 2. Number of Trades trades = [r['n_trades'] for r in successful_results] ax2.bar(strategy_names, trades, color='blue', alpha=0.7) ax2.set_title('Number of Trades') ax2.set_ylabel('Trade Count') ax2.tick_params(axis='x', rotation=45) ax2.grid(True, alpha=0.3) # 3. Win Rate vs Max Drawdown win_rates = [r['win_rate'] * 100 for r in successful_results] max_drawdowns = [r['max_drawdown'] * 100 for r in successful_results] scatter = ax3.scatter(max_drawdowns, win_rates, s=100, alpha=0.7, c=returns, cmap='RdYlGn') ax3.set_xlabel('Max Drawdown (%)') ax3.set_ylabel('Win Rate (%)') ax3.set_title('Win Rate vs Max Drawdown') ax3.grid(True, alpha=0.3) # Add strategy labels for i, name in enumerate(strategy_names): ax3.annotate(name, (max_drawdowns[i], win_rates[i]), xytext=(5, 5), textcoords='offset points', fontsize=8) # Add colorbar cbar = plt.colorbar(scatter, ax=ax3) cbar.set_label('Return (%)') # 4. Strategy Statistics Table ax4.axis('off') table_data = [] headers = ['Strategy', 'Return%', 'Trades', 'Win%', 'MaxDD%', 'Avg Trade'] for r in successful_results: row = [ r['strategy_name'][:15] + '...' if len(r['strategy_name']) > 15 else r['strategy_name'], f"{r['profit_ratio']*100:.1f}%", str(r['n_trades']), f"{r['win_rate']*100:.0f}%", f"{r['max_drawdown']*100:.1f}%", f"${r['avg_trade']:.1f}" ] table_data.append(row) table = ax4.table(cellText=table_data, colLabels=headers, loc='center', cellLoc='center') table.auto_set_font_size(False) table.set_fontsize(9) table.scale(1.2, 1.5) # Style the table for i in range(len(headers)): table[(0, i)].set_facecolor('#4CAF50') table[(0, i)].set_text_props(weight='bold', color='white') ax4.set_title('Strategy Statistics Summary', pad=20) plt.tight_layout() plt.savefig(save_path, dpi=300, bbox_inches='tight') plt.close() logger.info(f"Summary plot saved: {save_path}") except Exception as e: logger.error(f"Error creating summary plot: {e}") plt.close('all') def run_strategies_parallel(self, config: Dict[str, Any], config_name: str = "strategy_run") -> List[Dict[str, Any]]: """ Run all strategies in parallel using multiprocessing for optimal performance. Args: config: Configuration dictionary config_name: Base name for output files Returns: List of backtest results """ backtest_settings = config['backtest_settings'] strategies = config['strategies'] # Create organized results folder timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") run_folder_name = f"{config_name}_{timestamp}" self.results_dir = os.path.join(self.base_results_dir, run_folder_name) os.makedirs(self.results_dir, exist_ok=True) logger.info(f"Created run folder: {self.results_dir}") # Load shared data once for all strategies logger.info("Loading shared market data for parallel execution...") self.market_data = self.load_data_once(backtest_settings) if self.market_data.empty: logger.error("Failed to load market data - aborting strategy execution") return [] logger.info(f"Starting parallel backtest run with {len(strategies)} strategies") logger.info(f"Data file: {backtest_settings['data_file']}") logger.info(f"Period: {backtest_settings['start_date']} to {backtest_settings['end_date']}") logger.info(f"Using cached data: {len(self.market_data)} rows") # Determine optimal number of workers max_workers = min(len(strategies), self.system_utils.get_optimal_workers()) logger.info(f"Using {max_workers} worker processes for parallel execution") # Prepare strategy jobs for parallel execution strategy_jobs = [] for i, strategy_config in enumerate(strategies, 1): job = { 'strategy_config': strategy_config, 'backtest_settings': backtest_settings, 'strategy_index': i, 'total_strategies': len(strategies), 'run_folder_name': run_folder_name, 'shared_data_info': self._prepare_shared_data_for_worker(self.market_data) } strategy_jobs.append(job) # Execute strategies in parallel results = [] if max_workers == 1: # Single-threaded fallback logger.info("Using single-threaded execution (only 1 worker)") for job in strategy_jobs: result = self._run_strategy_worker_function(job) results.append(result) self._process_worker_result(result, job) else: # Multi-threaded execution logger.info(f"Using parallel execution with {max_workers} workers") with ProcessPoolExecutor(max_workers=max_workers) as executor: # Submit all jobs future_to_job = { executor.submit(run_strategy_worker_function, job): job for job in strategy_jobs } # Create progress bar if TQDM_AVAILABLE: progress_bar = tqdm( total=len(strategies), desc="šŸš€ Parallel Strategies", ncols=100, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]" ) else: progress_bar = None # Collect results as they complete completed_count = 0 for future in as_completed(future_to_job): job = future_to_job[future] try: result = future.result(timeout=300) # 5 minute timeout per strategy results.append(result) # Process and save result immediately self._process_worker_result(result, job) completed_count += 1 # Update progress if progress_bar: success_status = "āœ…" if result['success'] else "āŒ" progress_bar.set_postfix_str(f"{success_status} {result['strategy_name'][:25]}") progress_bar.update(1) logger.info(f"Completed strategy {completed_count}/{len(strategies)}: {result['strategy_name']}") except Exception as e: logger.error(f"Strategy execution failed: {e}") error_result = { "success": False, "error": str(e), "strategy_name": job['strategy_config'].get('name', 'Unknown'), "strategy_type": job['strategy_config'].get('type', 'Unknown'), "strategy_params": job['strategy_config'].get('params', {}), "trader_params": job['strategy_config'].get('trader_params', {}), "traceback": traceback.format_exc() } results.append(error_result) completed_count += 1 if progress_bar: progress_bar.set_postfix_str(f"āŒ {error_result['strategy_name'][:25]}") progress_bar.update(1) if progress_bar: progress_bar.close() # Log final cache statistics cache_stats = self.data_cache.get_cache_stats() logger.info(f"\nšŸ“Š Final data cache statistics:") logger.info(f" Total requests: {cache_stats['total_requests']}") logger.info(f" Cache hits: {cache_stats['hits']}") logger.info(f" Cache misses: {cache_stats['misses']}") logger.info(f" Hit ratio: {cache_stats['hit_ratio']:.1%}") logger.info(f" Memory usage: {cache_stats['total_memory_mb']:.1f}MB") # Log parallel execution summary successful_results = [r for r in results if r['success']] logger.info(f"\nšŸš€ Parallel execution completed:") logger.info(f" Successful strategies: {len(successful_results)}/{len(results)}") logger.info(f" Workers used: {max_workers}") logger.info(f" Total execution time: {(datetime.now() - self.session_start_time).total_seconds():.1f}s") self.results = results return results def _prepare_shared_data_for_worker(self, data: pd.DataFrame) -> Dict[str, Any]: """ Prepare shared data information for worker processes. For now, we'll serialize the data. In Phase 2, we'll use shared memory. Args: data: Market data DataFrame Returns: Dictionary with data information for workers """ return { 'data_serialized': data.to_json(orient='split', date_format='iso'), 'data_shape': data.shape, 'data_columns': list(data.columns), 'index_name': data.index.name } def _process_worker_result(self, result: Dict[str, Any], job: Dict[str, Any]) -> None: """ Process and save individual worker result. Args: result: Strategy execution result job: Original job configuration """ if result['success']: # Save individual strategy results immediately self.save_individual_strategy_results( result, job['run_folder_name'], job['strategy_index'] ) logger.info(f"āœ“ Strategy {job['strategy_index']} saved: {result['strategy_name']}") else: logger.error(f"āœ— Strategy {job['strategy_index']} failed: {result['strategy_name']}") def _run_strategy_worker_function(self, job: Dict[str, Any]) -> Dict[str, Any]: """ Worker function to run a single strategy (for single-threaded fallback). Args: job: Job configuration dictionary Returns: Strategy execution results """ return self.run_single_backtest_with_shared_data( job['strategy_config'], job['backtest_settings'], self.market_data, # Use cached data job['strategy_index'], job['total_strategies'] ) def run_single_backtest_with_shared_data(self, strategy_config: Dict[str, Any], backtest_settings: Dict[str, Any], shared_data: pd.DataFrame, strategy_index: int, total_strategies: int) -> Dict[str, Any]: """ Run a single backtest with pre-loaded shared data for optimization. Args: strategy_config: Strategy configuration backtest_settings: Backtest settings shared_data: Pre-loaded market data strategy_index: Index of the strategy (1-based) total_strategies: Total number of strategies Returns: Dictionary with backtest results """ try: start_time = time.time() # Create strategy strategy = self.create_strategy(strategy_config) strategy_name = strategy_config['name'] # Extract backtest settings initial_usd = backtest_settings.get('initial_usd', 10000) start_date = backtest_settings['start_date'] end_date = backtest_settings['end_date'] # Extract trader parameters trader_params = strategy_config.get('trader_params', {}) # Create trader directly (bypassing backtester for shared data processing) final_trader_params = { "stop_loss_pct": trader_params.get('stop_loss_pct', 0.0), "take_profit_pct": trader_params.get('take_profit_pct', 0.0), "portfolio_percent_per_trade": trader_params.get('portfolio_percent_per_trade', 1.0) } trader = IncTrader( strategy=strategy, initial_usd=initial_usd, params=final_trader_params ) logger.info(f"Running optimized backtest for strategy: {strategy_name}") # Process data frame-by-frame (SAME as real-time processing) data_processed = 0 if TQDM_AVAILABLE: logger.info(f"⚔ Running Strategy {strategy_index}/{total_strategies}: {strategy_name}") for timestamp, row in shared_data.iterrows(): ohlcv_data = { 'open': row['open'], 'high': row['high'], 'low': row['low'], 'close': row['close'], 'volume': row['volume'] } trader.process_data_point(timestamp, ohlcv_data) data_processed += 1 # Finalize and get results trader.finalize() results = trader.get_results() # Calculate additional metrics end_time = time.time() backtest_duration = end_time - start_time # Format results formatted_results = { "success": True, "strategy_name": strategy_name, "strategy_type": strategy_config['type'], "strategy_params": strategy_config.get('params', {}), "trader_params": trader_params, "initial_usd": results["initial_usd"], "final_usd": results["final_usd"], "profit_ratio": results["profit_ratio"], "profit_usd": results["final_usd"] - results["initial_usd"], "n_trades": results["n_trades"], "win_rate": results["win_rate"], "max_drawdown": results["max_drawdown"], "avg_trade": results["avg_trade"], "total_fees_usd": results["total_fees_usd"], "backtest_duration_seconds": backtest_duration, "data_points_processed": data_processed, "warmup_complete": results.get("warmup_complete", False), "trades": results.get("trades", []), "backtest_period": f"{start_date} to {end_date}" } logger.info(f"Optimized backtest completed for {strategy_name}: " f"Profit: {formatted_results['profit_ratio']:.1%} " f"(${formatted_results['profit_usd']:.2f}), " f"Trades: {formatted_results['n_trades']}, " f"Win Rate: {formatted_results['win_rate']:.1%}") return formatted_results except Exception as e: logger.error(f"Error in optimized backtest for {strategy_config.get('name', 'Unknown')}: {e}") return { "success": False, "error": str(e), "strategy_name": strategy_config.get('name', 'Unknown'), "strategy_type": strategy_config.get('type', 'Unknown'), "strategy_params": strategy_config.get('params', {}), "trader_params": strategy_config.get('trader_params', {}), "traceback": traceback.format_exc() } def run_single_backtest(self, strategy_config: Dict[str, Any], backtest_settings: Dict[str, Any], strategy_index: int, total_strategies: int) -> Dict[str, Any]: """ Run a single backtest with given strategy and settings. Args: strategy_config: Strategy configuration backtest_settings: Backtest settings strategy_index: Index of the strategy (1-based) total_strategies: Total number of strategies Returns: Dictionary with backtest results """ try: start_time = time.time() # Create strategy strategy = self.create_strategy(strategy_config) strategy_name = strategy_config['name'] # Extract backtest settings data_file = backtest_settings['data_file'] start_date = backtest_settings['start_date'] end_date = backtest_settings['end_date'] initial_usd = backtest_settings.get('initial_usd', 10000) data_dir = backtest_settings.get('data_dir', 'data') # Extract trader parameters trader_params = strategy_config.get('trader_params', {}) # Create backtest config config = BacktestConfig( data_file=data_file, start_date=start_date, end_date=end_date, initial_usd=initial_usd, data_dir=data_dir, stop_loss_pct=trader_params.get('stop_loss_pct', 0.0) ) # Create backtester backtester = IncBacktester(config) logger.info(f"Running backtest for strategy: {strategy_name}") # Create a custom backtester wrapper with progress tracking if TQDM_AVAILABLE: # Simple progress indication without threading logger.info(f"⚔ Running Strategy {strategy_index}/{total_strategies}: {strategy_name}") results = backtester.run_single_strategy(strategy, trader_params) else: # Run without progress tracking results = backtester.run_single_strategy(strategy, trader_params) # Calculate additional metrics end_time = time.time() backtest_duration = end_time - start_time # Format results formatted_results = { "success": True, "strategy_name": strategy_name, "strategy_type": strategy_config['type'], "strategy_params": strategy_config.get('params', {}), "trader_params": trader_params, "initial_usd": results["initial_usd"], "final_usd": results["final_usd"], "profit_ratio": results["profit_ratio"], "profit_usd": results["final_usd"] - results["initial_usd"], "n_trades": results["n_trades"], "win_rate": results["win_rate"], "max_drawdown": results["max_drawdown"], "avg_trade": results["avg_trade"], "total_fees_usd": results["total_fees_usd"], "backtest_duration_seconds": backtest_duration, "data_points_processed": results.get("data_points", 0), "warmup_complete": results.get("warmup_complete", False), "trades": results.get("trades", []), "backtest_period": f"{start_date} to {end_date}" } logger.info(f"Backtest completed for {strategy_name}: " f"Profit: {formatted_results['profit_ratio']:.1%} " f"(${formatted_results['profit_usd']:.2f}), " f"Trades: {formatted_results['n_trades']}, " f"Win Rate: {formatted_results['win_rate']:.1%}") return formatted_results except Exception as e: # Close progress bar on error if TQDM_AVAILABLE and 'strategy_progress' in locals(): strategy_progress.close() logger.error(f"Error in backtest for {strategy_config.get('name', 'Unknown')}: {e}") return { "success": False, "error": str(e), "strategy_name": strategy_config.get('name', 'Unknown'), "strategy_type": strategy_config.get('type', 'Unknown'), "strategy_params": strategy_config.get('params', {}), "trader_params": strategy_config.get('trader_params', {}), "traceback": traceback.format_exc() } def save_results(self, results: List[Dict[str, Any]], config_name: str = "strategy_run") -> None: """ Save backtest results to files. Args: results: List of backtest results config_name: Base name for output files """ base_filename = "summary" # Use ResultsSaver for comprehensive results saver = ResultsSaver(self.results_dir) saver.save_comprehensive_results( results=results, base_filename=base_filename, session_start_time=self.session_start_time ) # Create summary CSV successful_results = [r for r in results if r['success']] if successful_results: summary_df = pd.DataFrame([ { 'Strategy Name': r['strategy_name'], 'Strategy Type': r['strategy_type'], 'Initial USD': r['initial_usd'], 'Final USD': r['final_usd'], 'Profit USD': r['profit_usd'], 'Profit Ratio': r['profit_ratio'], 'Number of Trades': r['n_trades'], 'Win Rate': r['win_rate'], 'Max Drawdown': r['max_drawdown'], 'Avg Trade': r['avg_trade'], 'Total Fees': r['total_fees_usd'], 'Duration (s)': r['backtest_duration_seconds'] } for r in successful_results ]) summary_path = os.path.join(self.results_dir, f"{base_filename}.csv") summary_df.to_csv(summary_path, index=False) logger.info(f"Summary saved to: {summary_path}") # Create summary comparison plot if PLOTTING_AVAILABLE and len(successful_results) > 0: summary_plot_path = os.path.join(self.results_dir, f"{base_filename}_plot.png") self.create_summary_plot(results, summary_plot_path) logger.info(f"All results saved to: {self.results_dir}/") # Print file summary logger.info(f"\nšŸ“Š Files generated in: {os.path.basename(self.results_dir)}/") logger.info(f" šŸ“‹ Summary data and plots for final comparison") logger.info(f" šŸ“ˆ Individual strategy files saved during execution") logger.info(f" šŸŽØ Strategy plots: {len(successful_results)} individual + {len(successful_results)} detailed + 1 summary") logger.info(f" šŸ“Š Trade files: {len(successful_results)} trade CSVs + {len(successful_results)} signal CSVs") def print_summary(self, results: List[Dict[str, Any]]) -> None: """ Print a summary of backtest results. Args: results: List of backtest results """ successful_results = [r for r in results if r['success']] failed_results = [r for r in results if not r['success']] print(f"\n{'='*60}") print(f"BACKTEST SUMMARY") print(f"{'='*60}") print(f"Total Strategies: {len(results)}") print(f"Successful: {len(successful_results)}") print(f"Failed: {len(failed_results)}") print(f"Session Duration: {(datetime.now() - self.session_start_time).total_seconds():.1f} seconds") if successful_results: print(f"\nSTRATEGY RESULTS:") print(f"{'-'*60}") # Sort by profit ratio sorted_results = sorted(successful_results, key=lambda x: x['profit_ratio'], reverse=True) for i, result in enumerate(sorted_results, 1): print(f"{i}. {result['strategy_name']} ({result['strategy_type']})") print(f" Profit: {result['profit_ratio']:.1%} (${result['profit_usd']:.2f})") print(f" Trades: {result['n_trades']} | Win Rate: {result['win_rate']:.1%}") print(f" Max Drawdown: {result['max_drawdown']:.1%} | Avg Trade: ${result['avg_trade']:.2f}") print() if failed_results: print(f"\nFAILED STRATEGIES:") print(f"{'-'*60}") for result in failed_results: print(f"- {result['strategy_name']}: {result['error']}") print(f"{'='*60}") def run_strategies(self, config: Dict[str, Any], config_name: str = "strategy_run") -> List[Dict[str, Any]]: """ Run all strategies using the optimal execution method (parallel or sequential). Args: config: Configuration dictionary config_name: Base name for output files Returns: List of backtest results """ if self.enable_parallel and len(config['strategies']) > 1: # Use parallel execution for multiple strategies logger.info("Using parallel execution for multiple strategies") return self.run_strategies_parallel(config, config_name) else: # Use sequential execution for single strategy or when parallel is disabled logger.info("Using sequential execution") return self.run_strategies_sequential(config, config_name) def run_strategies_sequential(self, config: Dict[str, Any], config_name: str = "strategy_run") -> List[Dict[str, Any]]: """ Run all strategies sequentially (original method, kept for compatibility). Args: config: Configuration dictionary config_name: Base name for output files Returns: List of backtest results """ backtest_settings = config['backtest_settings'] strategies = config['strategies'] # Create organized results folder: [config_name]_[timestamp] timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") run_folder_name = f"{config_name}_{timestamp}" self.results_dir = os.path.join(self.base_results_dir, run_folder_name) os.makedirs(self.results_dir, exist_ok=True) logger.info(f"Created run folder: {self.results_dir}") # Load market data for plotting and strategy execution (load once, use many times) logger.info("Loading shared market data...") self.market_data = self.load_data_once(backtest_settings) if self.market_data.empty: logger.error("Failed to load market data - aborting strategy execution") return [] logger.info(f"Starting sequential backtest run with {len(strategies)} strategies") logger.info(f"Data file: {backtest_settings['data_file']}") logger.info(f"Period: {backtest_settings['start_date']} to {backtest_settings['end_date']}") logger.info(f"Using cached data: {len(self.market_data)} rows") results = [] # Create progress bar for strategies if TQDM_AVAILABLE: strategy_iterator = tqdm(enumerate(strategies, 1), total=len(strategies), desc="šŸš€ Strategies", ncols=100, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]") else: strategy_iterator = enumerate(strategies, 1) for i, strategy_config in strategy_iterator: if TQDM_AVAILABLE: strategy_iterator.set_postfix_str(f"{strategy_config['name'][:30]}") logger.info(f"\n--- Running Strategy {i}/{len(strategies)}: {strategy_config['name']} ---") # Use shared data method for optimized execution result = self.run_single_backtest_with_shared_data( strategy_config, backtest_settings, self.market_data, # Use cached data i, len(strategies) ) results.append(result) # Save individual strategy results immediately self.save_individual_strategy_results(result, run_folder_name, i) # Show progress if result['success']: logger.info(f"āœ“ Strategy {i} completed successfully") if TQDM_AVAILABLE: strategy_iterator.set_postfix_str(f"āœ“ {strategy_config['name'][:30]}") else: logger.error(f"āœ— Strategy {i} failed: {result['error']}") if TQDM_AVAILABLE: strategy_iterator.set_postfix_str(f"āœ— {strategy_config['name'][:30]}") # Log final cache statistics cache_stats = self.data_cache.get_cache_stats() logger.info(f"\nšŸ“Š Final data cache statistics:") logger.info(f" Total requests: {cache_stats['total_requests']}") logger.info(f" Cache hits: {cache_stats['hits']}") logger.info(f" Cache misses: {cache_stats['misses']}") logger.info(f" Hit ratio: {cache_stats['hit_ratio']:.1%}") logger.info(f" Memory usage: {cache_stats['total_memory_mb']:.1f}MB") self.results = results return results def create_example_config(output_path: str) -> None: """ Create an example configuration file. Args: output_path: Path where to save the example config """ example_config = { "backtest_settings": { "data_file": "btcusd_1-min_data.csv", "data_dir": "data", "start_date": "2023-01-01", "end_date": "2023-01-31", "initial_usd": 10000 }, "strategies": [ { "name": "MetaTrend_Conservative", "type": "metatrend", "params": { "supertrend_periods": [12, 10, 11], "supertrend_multipliers": [3.0, 1.0, 2.0], "min_trend_agreement": 0.8, "timeframe": "15min" }, "trader_params": { "stop_loss_pct": 0.02, "portfolio_percent_per_trade": 0.5 } }, { "name": "MetaTrend_Aggressive", "type": "metatrend", "params": { "supertrend_periods": [10, 8, 9], "supertrend_multipliers": [2.0, 1.0, 1.5], "min_trend_agreement": 0.5, "timeframe": "5min" }, "trader_params": { "stop_loss_pct": 0.03, "portfolio_percent_per_trade": 0.8 } }, { "name": "BBRS_Default", "type": "bbrs", "params": { "bb_length": 20, "bb_std": 2.0, "rsi_length": 14, "rsi_overbought": 70, "rsi_oversold": 30, "timeframe": "15min" }, "trader_params": { "stop_loss_pct": 0.025, "portfolio_percent_per_trade": 0.6 } }, { "name": "Random_Baseline", "type": "random", "params": { "signal_probability": 0.001, "timeframe": "15min" }, "trader_params": { "stop_loss_pct": 0.02, "portfolio_percent_per_trade": 0.5 } } ] } os.makedirs(os.path.dirname(output_path), exist_ok=True) with open(output_path, 'w') as f: json.dump(example_config, f, indent=2) print(f"Example configuration saved to: {output_path}") def main(): """Main function for running strategy backtests.""" parser = argparse.ArgumentParser(description="Strategy Backtest Runner") parser.add_argument("--config", type=str, default=None, help="Path to JSON configuration file") parser.add_argument("--results-dir", type=str, default="results", help="Directory for saving results") parser.add_argument("--create-example", type=str, default=None, help="Create example config file at specified path") parser.add_argument("--verbose", action="store_true", help="Enable verbose logging") parser.add_argument("--no-parallel", action="store_true", help="Disable parallel execution (use sequential mode)") args = parser.parse_args() # Set logging level if args.verbose: logging.getLogger().setLevel(logging.DEBUG) logging.getLogger('IncrementalTrader.strategies').setLevel(logging.INFO) logging.getLogger('IncrementalTrader.trader').setLevel(logging.INFO) # Create example config if requested if args.create_example: create_example_config(args.create_example) return # Require config for normal operation if not args.config: parser.error("--config is required unless using --create-example") try: # Create runner with parallel execution setting enable_parallel = not args.no_parallel runner = StrategyRunner(results_dir=args.results_dir, enable_parallel=enable_parallel) # Load configuration config = runner.load_config(args.config) # Check if data file exists data_path = os.path.join( config['backtest_settings'].get('data_dir', 'data'), config['backtest_settings']['data_file'] ) if not os.path.exists(data_path): logger.error(f"Data file not found: {data_path}") return # Run strategies config_name = os.path.splitext(os.path.basename(args.config))[0] results = runner.run_strategies(config, config_name) # Save results runner.save_results(results, config_name) # Print summary runner.print_summary(results) except FileNotFoundError as e: logger.error(f"File not found: {e}") except json.JSONDecodeError as e: logger.error(f"JSON error: {e}") except ValueError as e: logger.error(f"Configuration error: {e}") except KeyboardInterrupt: logger.info("Backtest interrupted by user") except Exception as e: logger.error(f"Backtest failed: {e}") traceback.print_exc() if __name__ == "__main__": main()