cleanup of the old Incremental trader after refactopring
This commit is contained in:
parent
54e3f5677a
commit
a99ed50cfe
@ -1,363 +0,0 @@
|
||||
# Migration Guide: Cycles Framework → IncrementalTrader
|
||||
|
||||
## Overview
|
||||
|
||||
This guide helps you migrate from the legacy Cycles framework to the new IncrementalTrader module. The IncrementalTrader module provides a cleaner, more modular architecture while maintaining compatibility with existing strategies and workflows.
|
||||
|
||||
## Key Architectural Changes
|
||||
|
||||
### Module Structure
|
||||
|
||||
**Old Structure (Cycles)**:
|
||||
```
|
||||
cycles/
|
||||
├── IncStrategies/
|
||||
│ ├── base.py
|
||||
│ ├── default_strategy.py
|
||||
│ └── bbrs_strategy.py
|
||||
├── backtest.py
|
||||
├── trader.py
|
||||
└── utils/
|
||||
```
|
||||
|
||||
**New Structure (IncrementalTrader)**:
|
||||
```
|
||||
IncrementalTrader/
|
||||
├── strategies/
|
||||
│ ├── base.py
|
||||
│ ├── metatrend.py
|
||||
│ ├── random.py
|
||||
│ ├── bbrs.py
|
||||
│ └── indicators/
|
||||
├── trader/
|
||||
│ ├── trader.py
|
||||
│ └── position.py
|
||||
├── backtester/
|
||||
│ ├── backtester.py
|
||||
│ ├── config.py
|
||||
│ └── utils.py
|
||||
└── docs/
|
||||
```
|
||||
|
||||
### Import Changes
|
||||
|
||||
**Old Imports**:
|
||||
```python
|
||||
from cycles.IncStrategies.base import StrategyBase, StrategySignal
|
||||
from cycles.IncStrategies.default_strategy import DefaultStrategy
|
||||
from cycles.IncStrategies.bbrs_strategy import BBRSStrategy
|
||||
from cycles.backtest import Backtester
|
||||
from cycles.trader import Trader
|
||||
```
|
||||
|
||||
**New Imports**:
|
||||
```python
|
||||
from IncrementalTrader.strategies.base import IncStrategyBase, IncStrategySignal
|
||||
from IncrementalTrader.strategies.metatrend import MetaTrendStrategy
|
||||
from IncrementalTrader.strategies.bbrs import BBRSStrategy
|
||||
from IncrementalTrader.backtester import IncBacktester
|
||||
from IncrementalTrader.trader import IncTrader
|
||||
```
|
||||
|
||||
## Strategy Migration
|
||||
|
||||
### Base Class Changes
|
||||
|
||||
**Old Base Class**:
|
||||
```python
|
||||
class MyStrategy(StrategyBase):
|
||||
def get_entry_signal(self, backtester, df_index):
|
||||
return StrategySignal("ENTRY", confidence=0.8)
|
||||
```
|
||||
|
||||
**New Base Class**:
|
||||
```python
|
||||
class MyStrategy(IncStrategyBase):
|
||||
def get_entry_signal(self, backtester, df_index):
|
||||
return IncStrategySignal.BUY(confidence=0.8)
|
||||
```
|
||||
|
||||
### Signal Generation Changes
|
||||
|
||||
**Old Signal Creation**:
|
||||
```python
|
||||
# Manual signal creation
|
||||
signal = StrategySignal("ENTRY", confidence=0.8)
|
||||
signal = StrategySignal("EXIT", confidence=0.9)
|
||||
signal = StrategySignal("HOLD", confidence=0.0)
|
||||
```
|
||||
|
||||
**New Signal Creation (Factory Methods)**:
|
||||
```python
|
||||
# Factory methods for cleaner signal creation
|
||||
signal = IncStrategySignal.BUY(confidence=0.8)
|
||||
signal = IncStrategySignal.SELL(confidence=0.9)
|
||||
signal = IncStrategySignal.HOLD(confidence=0.0)
|
||||
```
|
||||
|
||||
### Strategy Name Mapping
|
||||
|
||||
| Old Strategy | New Strategy | Compatibility Alias |
|
||||
|-------------|-------------|-------------------|
|
||||
| `DefaultStrategy` | `MetaTrendStrategy` | `IncMetaTrendStrategy` |
|
||||
| `BBRSStrategy` | `BBRSStrategy` | `IncBBRSStrategy` |
|
||||
| N/A | `RandomStrategy` | `IncRandomStrategy` |
|
||||
|
||||
## Backtesting Migration
|
||||
|
||||
### Configuration Changes
|
||||
|
||||
**Old Configuration**:
|
||||
```python
|
||||
# Direct backtester usage
|
||||
backtester = Backtester(data, strategy)
|
||||
results = backtester.run()
|
||||
```
|
||||
|
||||
**New Configuration**:
|
||||
```python
|
||||
# Enhanced configuration system
|
||||
from IncrementalTrader.backtester import IncBacktester, BacktestConfig
|
||||
|
||||
config = BacktestConfig(
|
||||
initial_capital=10000,
|
||||
commission=0.001,
|
||||
slippage=0.0001
|
||||
)
|
||||
|
||||
backtester = IncBacktester(config)
|
||||
results = backtester.run_backtest(data, strategy)
|
||||
```
|
||||
|
||||
### Parameter Optimization
|
||||
|
||||
**Old Optimization**:
|
||||
```python
|
||||
# Manual parameter loops
|
||||
for param1 in values1:
|
||||
for param2 in values2:
|
||||
strategy = MyStrategy(param1=param1, param2=param2)
|
||||
results = backtester.run()
|
||||
```
|
||||
|
||||
**New Optimization**:
|
||||
```python
|
||||
# Built-in optimization framework
|
||||
from IncrementalTrader.backtester import OptimizationConfig
|
||||
|
||||
opt_config = OptimizationConfig(
|
||||
strategy_class=MyStrategy,
|
||||
param_ranges={
|
||||
'param1': [1, 2, 3, 4, 5],
|
||||
'param2': [0.1, 0.2, 0.3, 0.4, 0.5]
|
||||
},
|
||||
optimization_metric='sharpe_ratio'
|
||||
)
|
||||
|
||||
results = backtester.optimize_strategy(data, opt_config)
|
||||
```
|
||||
|
||||
## Trading Migration
|
||||
|
||||
### Trader Interface Changes
|
||||
|
||||
**Old Trader**:
|
||||
```python
|
||||
trader = Trader(strategy, initial_capital=10000)
|
||||
trader.process_tick(price_data)
|
||||
```
|
||||
|
||||
**New Trader**:
|
||||
```python
|
||||
trader = IncTrader(strategy, initial_capital=10000)
|
||||
trader.process_tick(price_data)
|
||||
```
|
||||
|
||||
### Position Management
|
||||
|
||||
**Old Position Handling**:
|
||||
```python
|
||||
# Position management was embedded in trader
|
||||
if trader.position_size > 0:
|
||||
# Handle long position
|
||||
```
|
||||
|
||||
**New Position Handling**:
|
||||
```python
|
||||
# Dedicated position manager
|
||||
position_manager = trader.position_manager
|
||||
if position_manager.has_position():
|
||||
current_position = position_manager.get_current_position()
|
||||
# Handle position with dedicated methods
|
||||
```
|
||||
|
||||
## Indicator Migration
|
||||
|
||||
### Import Changes
|
||||
|
||||
**Old Indicator Imports**:
|
||||
```python
|
||||
from cycles.IncStrategies.indicators import SupertrendState, ATRState
|
||||
```
|
||||
|
||||
**New Indicator Imports**:
|
||||
```python
|
||||
from IncrementalTrader.strategies.indicators import SupertrendState, ATRState
|
||||
```
|
||||
|
||||
### Indicator Usage
|
||||
|
||||
The indicator interface remains largely the same, but with enhanced features:
|
||||
|
||||
**Enhanced Indicator Features**:
|
||||
```python
|
||||
# New indicators have better state management
|
||||
supertrend = SupertrendState(period=10, multiplier=3.0)
|
||||
|
||||
# Process data incrementally
|
||||
for price_data in data_stream:
|
||||
supertrend.update(price_data)
|
||||
current_trend = supertrend.get_value()
|
||||
trend_direction = supertrend.get_trend()
|
||||
```
|
||||
|
||||
## Compatibility Layer
|
||||
|
||||
### Backward Compatibility Aliases
|
||||
|
||||
The new module provides compatibility aliases for smooth migration:
|
||||
|
||||
```python
|
||||
# These imports work for backward compatibility
|
||||
from IncrementalTrader.strategies.metatrend import IncMetaTrendStrategy as DefaultStrategy
|
||||
from IncrementalTrader.strategies.bbrs import IncBBRSStrategy as BBRSStrategy
|
||||
from IncrementalTrader.strategies.random import IncRandomStrategy as RandomStrategy
|
||||
```
|
||||
|
||||
### Gradual Migration Strategy
|
||||
|
||||
1. **Phase 1**: Update imports to use compatibility aliases
|
||||
2. **Phase 2**: Update signal generation to use factory methods
|
||||
3. **Phase 3**: Migrate to new configuration system
|
||||
4. **Phase 4**: Update to new class names and remove aliases
|
||||
|
||||
## Enhanced Features
|
||||
|
||||
### New Capabilities in IncrementalTrader
|
||||
|
||||
1. **Modular Architecture**: Each component can be used independently
|
||||
2. **Enhanced Configuration**: Robust configuration with validation
|
||||
3. **Better Error Handling**: Comprehensive exception handling and logging
|
||||
4. **Improved Performance**: Optimized data processing and memory usage
|
||||
5. **Self-Contained**: No external dependencies on legacy modules
|
||||
6. **Enhanced Documentation**: Comprehensive API documentation and examples
|
||||
|
||||
### Performance Improvements
|
||||
|
||||
- **Memory Efficiency**: Reduced memory footprint for large datasets
|
||||
- **Processing Speed**: Optimized indicator calculations
|
||||
- **Parallel Processing**: Built-in support for parallel backtesting
|
||||
- **Resource Management**: Intelligent system resource allocation
|
||||
|
||||
## Migration Checklist
|
||||
|
||||
### Pre-Migration
|
||||
- [ ] Review current strategy implementations
|
||||
- [ ] Identify external dependencies
|
||||
- [ ] Backup existing configurations
|
||||
- [ ] Test current system performance
|
||||
|
||||
### During Migration
|
||||
- [ ] Update import statements
|
||||
- [ ] Replace signal generation with factory methods
|
||||
- [ ] Update configuration format
|
||||
- [ ] Test strategy behavior equivalence
|
||||
- [ ] Validate backtesting results
|
||||
|
||||
### Post-Migration
|
||||
- [ ] Remove old import statements
|
||||
- [ ] Update documentation
|
||||
- [ ] Performance testing
|
||||
- [ ] Clean up legacy code references
|
||||
|
||||
## Common Migration Issues
|
||||
|
||||
### Issue 1: Signal Type Mismatch
|
||||
**Problem**: Old string-based signals don't work with new system
|
||||
**Solution**: Use factory methods (`IncStrategySignal.BUY()` instead of `"ENTRY"`)
|
||||
|
||||
### Issue 2: Import Errors
|
||||
**Problem**: Old import paths no longer exist
|
||||
**Solution**: Update to new module structure or use compatibility aliases
|
||||
|
||||
### Issue 3: Configuration Format
|
||||
**Problem**: Old configuration format not compatible
|
||||
**Solution**: Migrate to new `BacktestConfig` and `OptimizationConfig` classes
|
||||
|
||||
### Issue 4: Indicator State
|
||||
**Problem**: Indicator state not preserved during migration
|
||||
**Solution**: Use new indicator initialization patterns with proper state management
|
||||
|
||||
## Support and Resources
|
||||
|
||||
### Documentation
|
||||
- [Strategy Development Guide](./strategies.md)
|
||||
- [Indicator Reference](./indicators.md)
|
||||
- [Backtesting Guide](./backtesting.md)
|
||||
- [API Reference](./api.md)
|
||||
|
||||
### Examples
|
||||
- [Basic Usage Examples](../examples/basic_usage.py)
|
||||
- Strategy migration examples in documentation
|
||||
|
||||
### Getting Help
|
||||
- Review the comprehensive API documentation
|
||||
- Check the examples directory for usage patterns
|
||||
- Refer to the original Cycles documentation for context
|
||||
|
||||
## Legacy Framework Reference
|
||||
|
||||
### Timeframe System (Legacy)
|
||||
|
||||
The legacy Cycles framework had sophisticated timeframe management that is preserved in the new system:
|
||||
|
||||
**Key Concepts from Legacy System**:
|
||||
- Strategy-controlled timeframes
|
||||
- Automatic resampling
|
||||
- Precision execution with 1-minute data
|
||||
- Signal mapping between timeframes
|
||||
|
||||
**Migration Notes**:
|
||||
- The new `TimeframeAggregator` provides similar functionality
|
||||
- Strategies can still specify required timeframes
|
||||
- Multi-timeframe strategies are fully supported
|
||||
- 1-minute precision for stop-loss execution is maintained
|
||||
|
||||
### Strategy Manager (Legacy)
|
||||
|
||||
The legacy StrategyManager for multi-strategy combination:
|
||||
|
||||
**Legacy Features**:
|
||||
- Multi-strategy orchestration
|
||||
- Signal combination methods (weighted consensus, majority voting)
|
||||
- Multi-timeframe strategy coordination
|
||||
|
||||
**Migration Path**:
|
||||
- Individual strategies are now self-contained
|
||||
- Multi-strategy combination can be implemented at the application level
|
||||
- Consider using multiple backtests and combining results
|
||||
|
||||
### Performance Characteristics (Legacy)
|
||||
|
||||
**Legacy Strategy Performance Notes**:
|
||||
- Default Strategy: High accuracy in trending markets, vulnerable to sideways markets
|
||||
- BBRS Strategy: Market regime adaptation, volume confirmation, multi-timeframe analysis
|
||||
|
||||
**New Performance Improvements**:
|
||||
- Enhanced signal generation reduces false positives
|
||||
- Better risk management with dedicated position manager
|
||||
- Improved backtesting accuracy with enhanced data handling
|
||||
|
||||
---
|
||||
|
||||
*This migration guide provides a comprehensive path from the legacy Cycles framework to the new IncrementalTrader module while preserving functionality and improving architecture.*
|
||||
@ -1,460 +0,0 @@
|
||||
# Incremental Backtester
|
||||
|
||||
A high-performance backtesting system for incremental trading strategies with multiprocessing support for parameter optimization.
|
||||
|
||||
## Overview
|
||||
|
||||
The Incremental Backtester provides a complete solution for testing incremental trading strategies:
|
||||
|
||||
- **IncTrader**: Manages a single strategy during backtesting
|
||||
- **IncBacktester**: Orchestrates multiple traders and parameter optimization
|
||||
- **Multiprocessing Support**: Parallel execution across CPU cores
|
||||
- **Memory Efficient**: Bounded memory usage regardless of data length
|
||||
- **Real-time Compatible**: Same interface as live trading systems
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Basic Single Strategy Backtest
|
||||
|
||||
```python
|
||||
from cycles.IncStrategies import (
|
||||
IncBacktester, BacktestConfig, IncRandomStrategy
|
||||
)
|
||||
|
||||
# Configure backtest
|
||||
config = BacktestConfig(
|
||||
data_file="btc_1min_2023.csv",
|
||||
start_date="2023-01-01",
|
||||
end_date="2023-12-31",
|
||||
initial_usd=10000,
|
||||
stop_loss_pct=0.02, # 2% stop loss
|
||||
take_profit_pct=0.05 # 5% take profit
|
||||
)
|
||||
|
||||
# Create strategy
|
||||
strategy = IncRandomStrategy(params={
|
||||
"timeframe": "15min",
|
||||
"entry_probability": 0.1,
|
||||
"exit_probability": 0.15
|
||||
})
|
||||
|
||||
# Run backtest
|
||||
backtester = IncBacktester(config)
|
||||
results = backtester.run_single_strategy(strategy)
|
||||
|
||||
print(f"Profit: {results['profit_ratio']*100:.2f}%")
|
||||
print(f"Trades: {results['n_trades']}")
|
||||
print(f"Win Rate: {results['win_rate']*100:.1f}%")
|
||||
```
|
||||
|
||||
### 2. Multiple Strategies
|
||||
|
||||
```python
|
||||
strategies = [
|
||||
IncRandomStrategy(params={"timeframe": "15min"}),
|
||||
IncRandomStrategy(params={"timeframe": "30min"}),
|
||||
IncMetaTrendStrategy(params={"timeframe": "15min"})
|
||||
]
|
||||
|
||||
results = backtester.run_multiple_strategies(strategies)
|
||||
|
||||
for result in results:
|
||||
print(f"{result['strategy_name']}: {result['profit_ratio']*100:.2f}%")
|
||||
```
|
||||
|
||||
### 3. Parameter Optimization
|
||||
|
||||
```python
|
||||
# Define parameter grids
|
||||
strategy_param_grid = {
|
||||
"timeframe": ["15min", "30min", "1h"],
|
||||
"entry_probability": [0.05, 0.1, 0.15],
|
||||
"exit_probability": [0.1, 0.15, 0.2]
|
||||
}
|
||||
|
||||
trader_param_grid = {
|
||||
"stop_loss_pct": [0.01, 0.02, 0.03],
|
||||
"take_profit_pct": [0.03, 0.05, 0.07]
|
||||
}
|
||||
|
||||
# Run optimization (uses all CPU cores)
|
||||
results = backtester.optimize_parameters(
|
||||
strategy_class=IncRandomStrategy,
|
||||
param_grid=strategy_param_grid,
|
||||
trader_param_grid=trader_param_grid,
|
||||
max_workers=8 # Use 8 CPU cores
|
||||
)
|
||||
|
||||
# Get summary statistics
|
||||
summary = backtester.get_summary_statistics(results)
|
||||
print(f"Best profit: {summary['profit_ratio']['max']*100:.2f}%")
|
||||
|
||||
# Save results
|
||||
backtester.save_results(results, "optimization_results.csv")
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### IncTrader Class
|
||||
|
||||
Manages a single strategy during backtesting:
|
||||
|
||||
```python
|
||||
trader = IncTrader(
|
||||
strategy=strategy,
|
||||
initial_usd=10000,
|
||||
params={
|
||||
"stop_loss_pct": 0.02,
|
||||
"take_profit_pct": 0.05
|
||||
}
|
||||
)
|
||||
|
||||
# Process data sequentially
|
||||
for timestamp, ohlcv_data in data_stream:
|
||||
trader.process_data_point(timestamp, ohlcv_data)
|
||||
|
||||
# Get results
|
||||
results = trader.get_results()
|
||||
```
|
||||
|
||||
**Key Features:**
|
||||
- Position management (USD/coin balance)
|
||||
- Trade execution based on strategy signals
|
||||
- Stop loss and take profit handling
|
||||
- Performance tracking and metrics
|
||||
- Fee calculation using existing MarketFees
|
||||
|
||||
### IncBacktester Class
|
||||
|
||||
Orchestrates multiple traders and handles data loading:
|
||||
|
||||
```python
|
||||
backtester = IncBacktester(config, storage)
|
||||
|
||||
# Single strategy
|
||||
results = backtester.run_single_strategy(strategy)
|
||||
|
||||
# Multiple strategies
|
||||
results = backtester.run_multiple_strategies(strategies)
|
||||
|
||||
# Parameter optimization
|
||||
results = backtester.optimize_parameters(strategy_class, param_grid)
|
||||
```
|
||||
|
||||
**Key Features:**
|
||||
- Data loading using existing Storage class
|
||||
- Multiprocessing for parameter optimization
|
||||
- Result aggregation and analysis
|
||||
- Summary statistics calculation
|
||||
- CSV export functionality
|
||||
|
||||
### BacktestConfig Class
|
||||
|
||||
Configuration for backtesting runs:
|
||||
|
||||
```python
|
||||
config = BacktestConfig(
|
||||
data_file="btc_1min_2023.csv",
|
||||
start_date="2023-01-01",
|
||||
end_date="2023-12-31",
|
||||
initial_usd=10000,
|
||||
timeframe="1min",
|
||||
|
||||
# Trader parameters
|
||||
stop_loss_pct=0.02,
|
||||
take_profit_pct=0.05,
|
||||
|
||||
# Performance settings
|
||||
max_workers=None, # Auto-detect CPU cores
|
||||
chunk_size=1000
|
||||
)
|
||||
```
|
||||
|
||||
## Data Requirements
|
||||
|
||||
### Input Data Format
|
||||
|
||||
The backtester expects minute-level OHLCV data in CSV format:
|
||||
|
||||
```csv
|
||||
timestamp,open,high,low,close,volume
|
||||
1672531200,16625.1,16634.5,16620.0,16628.3,125.45
|
||||
1672531260,16628.3,16635.2,16625.8,16631.7,98.32
|
||||
...
|
||||
```
|
||||
|
||||
**Requirements:**
|
||||
- Timestamp column (Unix timestamp or datetime)
|
||||
- OHLCV columns: open, high, low, close, volume
|
||||
- Minute-level frequency (strategies handle timeframe aggregation)
|
||||
- Sorted by timestamp (ascending)
|
||||
|
||||
### Data Loading
|
||||
|
||||
Uses the existing Storage class for data loading:
|
||||
|
||||
```python
|
||||
from cycles.utils.storage import Storage
|
||||
|
||||
storage = Storage()
|
||||
data = storage.load_data(
|
||||
"btc_1min_2023.csv",
|
||||
"2023-01-01",
|
||||
"2023-12-31"
|
||||
)
|
||||
```
|
||||
|
||||
## Performance Features
|
||||
|
||||
### Multiprocessing Support
|
||||
|
||||
Parameter optimization automatically distributes work across CPU cores:
|
||||
|
||||
```python
|
||||
# Automatic CPU detection
|
||||
results = backtester.optimize_parameters(strategy_class, param_grid)
|
||||
|
||||
# Manual worker count
|
||||
results = backtester.optimize_parameters(
|
||||
strategy_class, param_grid, max_workers=4
|
||||
)
|
||||
|
||||
# Single-threaded (for debugging)
|
||||
results = backtester.optimize_parameters(
|
||||
strategy_class, param_grid, max_workers=1
|
||||
)
|
||||
```
|
||||
|
||||
### Memory Efficiency
|
||||
|
||||
- **Bounded Memory**: Strategy buffers have fixed size limits
|
||||
- **Incremental Processing**: No need to load entire datasets into memory
|
||||
- **Efficient Data Structures**: Optimized for sequential processing
|
||||
|
||||
### Performance Monitoring
|
||||
|
||||
Built-in performance tracking:
|
||||
|
||||
```python
|
||||
results = backtester.run_single_strategy(strategy)
|
||||
|
||||
print(f"Backtest duration: {results['backtest_duration_seconds']:.2f}s")
|
||||
print(f"Data points processed: {results['data_points_processed']}")
|
||||
print(f"Processing rate: {results['data_points']/results['backtest_duration_seconds']:.0f} points/sec")
|
||||
```
|
||||
|
||||
## Result Analysis
|
||||
|
||||
### Individual Results
|
||||
|
||||
Each backtest returns comprehensive metrics:
|
||||
|
||||
```python
|
||||
{
|
||||
"strategy_name": "IncRandomStrategy",
|
||||
"strategy_params": {"timeframe": "15min", ...},
|
||||
"trader_params": {"stop_loss_pct": 0.02, ...},
|
||||
"initial_usd": 10000.0,
|
||||
"final_usd": 10250.0,
|
||||
"profit_ratio": 0.025,
|
||||
"n_trades": 15,
|
||||
"win_rate": 0.6,
|
||||
"max_drawdown": 0.08,
|
||||
"avg_trade": 0.0167,
|
||||
"total_fees_usd": 45.32,
|
||||
"trades": [...], # Individual trade records
|
||||
"backtest_duration_seconds": 2.45
|
||||
}
|
||||
```
|
||||
|
||||
### Summary Statistics
|
||||
|
||||
For parameter optimization runs:
|
||||
|
||||
```python
|
||||
summary = backtester.get_summary_statistics(results)
|
||||
|
||||
{
|
||||
"total_runs": 108,
|
||||
"successful_runs": 105,
|
||||
"failed_runs": 3,
|
||||
"profit_ratio": {
|
||||
"mean": 0.023,
|
||||
"std": 0.045,
|
||||
"min": -0.12,
|
||||
"max": 0.18,
|
||||
"median": 0.019
|
||||
},
|
||||
"best_run": {...},
|
||||
"worst_run": {...}
|
||||
}
|
||||
```
|
||||
|
||||
### Export Results
|
||||
|
||||
Save results to CSV for further analysis:
|
||||
|
||||
```python
|
||||
backtester.save_results(results, "backtest_results.csv")
|
||||
```
|
||||
|
||||
Output includes:
|
||||
- Strategy and trader parameters
|
||||
- Performance metrics
|
||||
- Trade statistics
|
||||
- Execution timing
|
||||
|
||||
## Integration with Existing System
|
||||
|
||||
### Compatibility
|
||||
|
||||
The incremental backtester integrates seamlessly with existing components:
|
||||
|
||||
- **Storage Class**: Uses existing data loading infrastructure
|
||||
- **MarketFees**: Uses existing fee calculation
|
||||
- **Strategy Interface**: Compatible with incremental strategies
|
||||
- **Result Format**: Similar to existing Backtest class
|
||||
|
||||
### Migration from Original Backtester
|
||||
|
||||
```python
|
||||
# Original backtester
|
||||
from cycles.backtest import Backtest
|
||||
|
||||
# Incremental backtester
|
||||
from cycles.IncStrategies import IncBacktester, BacktestConfig
|
||||
|
||||
# Similar interface, enhanced capabilities
|
||||
config = BacktestConfig(...)
|
||||
backtester = IncBacktester(config)
|
||||
results = backtester.run_single_strategy(strategy)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
### Synthetic Data Testing
|
||||
|
||||
Test with synthetic data before using real market data:
|
||||
|
||||
```python
|
||||
from cycles.IncStrategies.test_inc_backtester import main
|
||||
|
||||
# Run all tests
|
||||
main()
|
||||
```
|
||||
|
||||
### Unit Tests
|
||||
|
||||
Individual component testing:
|
||||
|
||||
```python
|
||||
# Test IncTrader
|
||||
from cycles.IncStrategies.test_inc_backtester import test_inc_trader
|
||||
test_inc_trader()
|
||||
|
||||
# Test IncBacktester
|
||||
from cycles.IncStrategies.test_inc_backtester import test_inc_backtester
|
||||
test_inc_backtester()
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
See `example_backtest.py` for comprehensive usage examples:
|
||||
|
||||
```python
|
||||
from cycles.IncStrategies.example_backtest import (
|
||||
example_single_strategy_backtest,
|
||||
example_parameter_optimization,
|
||||
example_custom_analysis
|
||||
)
|
||||
|
||||
# Run examples
|
||||
example_single_strategy_backtest()
|
||||
example_parameter_optimization()
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Data Preparation
|
||||
|
||||
- Ensure data quality (no gaps, correct format)
|
||||
- Use appropriate date ranges for testing
|
||||
- Consider market conditions in test periods
|
||||
|
||||
### 2. Parameter Optimization
|
||||
|
||||
- Start with small parameter grids for testing
|
||||
- Use representative time periods
|
||||
- Consider overfitting risks
|
||||
- Validate results on out-of-sample data
|
||||
|
||||
### 3. Performance Optimization
|
||||
|
||||
- Use multiprocessing for large parameter grids
|
||||
- Monitor memory usage for long backtests
|
||||
- Profile bottlenecks for optimization
|
||||
|
||||
### 4. Result Validation
|
||||
|
||||
- Compare with original backtester for validation
|
||||
- Check trade logic manually for small samples
|
||||
- Verify fee calculations and position management
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Data Loading Errors**
|
||||
- Check file path and format
|
||||
- Verify date range availability
|
||||
- Ensure required columns exist
|
||||
|
||||
2. **Strategy Errors**
|
||||
- Check strategy initialization
|
||||
- Verify parameter validity
|
||||
- Monitor warmup period completion
|
||||
|
||||
3. **Performance Issues**
|
||||
- Reduce parameter grid size
|
||||
- Limit worker count for memory constraints
|
||||
- Use shorter time periods for testing
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Enable detailed logging:
|
||||
|
||||
```python
|
||||
import logging
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
# Run with detailed output
|
||||
results = backtester.run_single_strategy(strategy)
|
||||
```
|
||||
|
||||
### Memory Monitoring
|
||||
|
||||
Monitor memory usage during optimization:
|
||||
|
||||
```python
|
||||
import psutil
|
||||
import os
|
||||
|
||||
process = psutil.Process(os.getpid())
|
||||
print(f"Memory usage: {process.memory_info().rss / 1024 / 1024:.1f} MB")
|
||||
```
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
- **Live Trading Integration**: Direct connection to trading systems
|
||||
- **Advanced Analytics**: Risk metrics, Sharpe ratio, etc.
|
||||
- **Visualization**: Built-in plotting and analysis tools
|
||||
- **Database Support**: Direct database connectivity
|
||||
- **Strategy Combinations**: Multi-strategy portfolio testing
|
||||
|
||||
## Support
|
||||
|
||||
For issues and questions:
|
||||
1. Check the test scripts for working examples
|
||||
2. Review the TODO.md for known limitations
|
||||
3. Examine the base strategy implementations
|
||||
4. Use debug logging for detailed troubleshooting
|
||||
@ -1,71 +0,0 @@
|
||||
"""
|
||||
Incremental Strategies Module
|
||||
|
||||
This module contains the incremental calculation implementation of trading strategies
|
||||
that support real-time data processing with efficient memory usage and performance.
|
||||
|
||||
The incremental strategies are designed to:
|
||||
- Process new data points incrementally without full recalculation
|
||||
- Maintain bounded memory usage regardless of data history length
|
||||
- Provide identical results to batch calculations
|
||||
- Support real-time trading with minimal latency
|
||||
|
||||
Classes:
|
||||
IncStrategyBase: Base class for all incremental strategies
|
||||
IncRandomStrategy: Incremental implementation of random strategy for testing
|
||||
IncMetaTrendStrategy: Incremental implementation of the MetaTrend strategy
|
||||
IncDefaultStrategy: Incremental implementation of the default Supertrend strategy
|
||||
IncBBRSStrategy: Incremental implementation of Bollinger Bands + RSI strategy
|
||||
IncStrategyManager: Manager for coordinating multiple incremental strategies
|
||||
|
||||
IncTrader: Trader that manages a single strategy during backtesting
|
||||
IncBacktester: Backtester for testing incremental strategies with multiprocessing
|
||||
BacktestConfig: Configuration class for backtesting runs
|
||||
"""
|
||||
|
||||
from .base import IncStrategyBase, IncStrategySignal
|
||||
from .random_strategy import IncRandomStrategy
|
||||
from .metatrend_strategy import IncMetaTrendStrategy, MetaTrendStrategy
|
||||
from .inc_trader import IncTrader, TradeRecord
|
||||
from .inc_backtester import IncBacktester, BacktestConfig
|
||||
|
||||
# Note: These will be implemented in subsequent phases
|
||||
# from .default_strategy import IncDefaultStrategy
|
||||
# from .bbrs_strategy import IncBBRSStrategy
|
||||
# from .manager import IncStrategyManager
|
||||
|
||||
# Strategy registry for easy access
|
||||
AVAILABLE_STRATEGIES = {
|
||||
'random': IncRandomStrategy,
|
||||
'metatrend': IncMetaTrendStrategy,
|
||||
'meta_trend': IncMetaTrendStrategy, # Alternative name
|
||||
# 'default': IncDefaultStrategy,
|
||||
# 'bbrs': IncBBRSStrategy,
|
||||
}
|
||||
|
||||
__all__ = [
|
||||
# Base classes
|
||||
'IncStrategyBase',
|
||||
'IncStrategySignal',
|
||||
|
||||
# Strategies
|
||||
'IncRandomStrategy',
|
||||
'IncMetaTrendStrategy',
|
||||
'MetaTrendStrategy',
|
||||
|
||||
# Backtesting components
|
||||
'IncTrader',
|
||||
'IncBacktester',
|
||||
'BacktestConfig',
|
||||
'TradeRecord',
|
||||
|
||||
# Registry
|
||||
'AVAILABLE_STRATEGIES'
|
||||
|
||||
# Future implementations
|
||||
# 'IncDefaultStrategy',
|
||||
# 'IncBBRSStrategy',
|
||||
# 'IncStrategyManager'
|
||||
]
|
||||
|
||||
__version__ = '1.0.0'
|
||||
@ -1,649 +0,0 @@
|
||||
"""
|
||||
Base classes for the incremental strategy system.
|
||||
|
||||
This module contains the fundamental building blocks for all incremental trading strategies:
|
||||
- IncStrategySignal: Represents trading signals with confidence and metadata
|
||||
- IncStrategyBase: Abstract base class that all incremental strategies must inherit from
|
||||
- TimeframeAggregator: Built-in timeframe aggregation for minute-level data processing
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Optional, List, Union, Any
|
||||
from collections import deque
|
||||
import logging
|
||||
|
||||
# Import the original signal class for compatibility
|
||||
from ..strategies.base import StrategySignal
|
||||
|
||||
# Create alias for consistency
|
||||
IncStrategySignal = StrategySignal
|
||||
|
||||
|
||||
class TimeframeAggregator:
|
||||
"""
|
||||
Handles real-time aggregation of minute data to higher timeframes.
|
||||
|
||||
This class accumulates minute-level OHLCV data and produces complete
|
||||
bars when a timeframe period is completed. Integrated into IncStrategyBase
|
||||
to provide consistent minute-level data processing across all strategies.
|
||||
"""
|
||||
|
||||
def __init__(self, timeframe_minutes: int = 15):
|
||||
"""
|
||||
Initialize timeframe aggregator.
|
||||
|
||||
Args:
|
||||
timeframe_minutes: Target timeframe in minutes (e.g., 60 for 1h, 15 for 15min)
|
||||
"""
|
||||
self.timeframe_minutes = timeframe_minutes
|
||||
self.current_bar = None
|
||||
self.current_bar_start = None
|
||||
self.last_completed_bar = None
|
||||
|
||||
def update(self, timestamp: pd.Timestamp, ohlcv_data: Dict[str, float]) -> Optional[Dict[str, float]]:
|
||||
"""
|
||||
Update with new minute data and return completed bar if timeframe is complete.
|
||||
|
||||
Args:
|
||||
timestamp: Timestamp of the data
|
||||
ohlcv_data: OHLCV data dictionary
|
||||
|
||||
Returns:
|
||||
Completed OHLCV bar if timeframe period ended, None otherwise
|
||||
"""
|
||||
# Calculate which timeframe bar this timestamp belongs to
|
||||
bar_start = self._get_bar_start_time(timestamp)
|
||||
|
||||
# Check if we're starting a new bar
|
||||
if self.current_bar_start != bar_start:
|
||||
# Save the completed bar (if any)
|
||||
completed_bar = self.current_bar.copy() if self.current_bar is not None else None
|
||||
|
||||
# Start new bar
|
||||
self.current_bar_start = bar_start
|
||||
self.current_bar = {
|
||||
'timestamp': bar_start,
|
||||
'open': ohlcv_data['close'], # Use current close as open for new bar
|
||||
'high': ohlcv_data['close'],
|
||||
'low': ohlcv_data['close'],
|
||||
'close': ohlcv_data['close'],
|
||||
'volume': ohlcv_data['volume']
|
||||
}
|
||||
|
||||
# Return the completed bar (if any)
|
||||
if completed_bar is not None:
|
||||
self.last_completed_bar = completed_bar
|
||||
return completed_bar
|
||||
else:
|
||||
# Update current bar with new data
|
||||
if self.current_bar is not None:
|
||||
self.current_bar['high'] = max(self.current_bar['high'], ohlcv_data['high'])
|
||||
self.current_bar['low'] = min(self.current_bar['low'], ohlcv_data['low'])
|
||||
self.current_bar['close'] = ohlcv_data['close']
|
||||
self.current_bar['volume'] += ohlcv_data['volume']
|
||||
|
||||
return None # No completed bar yet
|
||||
|
||||
def _get_bar_start_time(self, timestamp: pd.Timestamp) -> pd.Timestamp:
|
||||
"""Calculate the start time of the timeframe bar for given timestamp.
|
||||
|
||||
This method now aligns with pandas resampling to ensure consistency
|
||||
with the original strategy's bar boundaries.
|
||||
"""
|
||||
# Use pandas-style resampling alignment
|
||||
# This ensures bars align to standard boundaries (e.g., 00:00, 00:15, 00:30, 00:45)
|
||||
freq_str = f'{self.timeframe_minutes}min'
|
||||
|
||||
# Create a temporary series with the timestamp and resample to get the bar start
|
||||
temp_series = pd.Series([1], index=[timestamp])
|
||||
resampled = temp_series.resample(freq_str)
|
||||
|
||||
# Get the first group's name (which is the bar start time)
|
||||
for bar_start, _ in resampled:
|
||||
return bar_start
|
||||
|
||||
# Fallback to original method if resampling fails
|
||||
minutes_since_midnight = timestamp.hour * 60 + timestamp.minute
|
||||
bar_minutes = (minutes_since_midnight // self.timeframe_minutes) * self.timeframe_minutes
|
||||
|
||||
return timestamp.replace(
|
||||
hour=bar_minutes // 60,
|
||||
minute=bar_minutes % 60,
|
||||
second=0,
|
||||
microsecond=0
|
||||
)
|
||||
|
||||
def get_current_bar(self) -> Optional[Dict[str, float]]:
|
||||
"""Get the current incomplete bar (for debugging)."""
|
||||
return self.current_bar.copy() if self.current_bar is not None else None
|
||||
|
||||
def reset(self):
|
||||
"""Reset aggregator state."""
|
||||
self.current_bar = None
|
||||
self.current_bar_start = None
|
||||
self.last_completed_bar = None
|
||||
|
||||
|
||||
class IncStrategyBase(ABC):
|
||||
"""
|
||||
Abstract base class for all incremental trading strategies.
|
||||
|
||||
This class defines the interface that all incremental strategies must implement:
|
||||
- get_minimum_buffer_size(): Specify minimum data requirements
|
||||
- calculate_on_data(): Process new data points incrementally
|
||||
- supports_incremental_calculation(): Whether strategy supports incremental mode
|
||||
- get_entry_signal(): Generate entry signals
|
||||
- get_exit_signal(): Generate exit signals
|
||||
|
||||
The incremental approach allows strategies to:
|
||||
- Process new data points without full recalculation
|
||||
- Maintain bounded memory usage regardless of data history length
|
||||
- Provide real-time performance with minimal latency
|
||||
- Support both initialization and incremental modes
|
||||
- Accept minute-level data and internally aggregate to any timeframe
|
||||
|
||||
New Features:
|
||||
- Built-in TimeframeAggregator for minute-level data processing
|
||||
- update_minute_data() method for real-time trading systems
|
||||
- Automatic timeframe detection and aggregation
|
||||
- Backward compatibility with existing update() methods
|
||||
|
||||
Attributes:
|
||||
name (str): Strategy name
|
||||
weight (float): Strategy weight for combination
|
||||
params (Dict): Strategy parameters
|
||||
calculation_mode (str): Current mode ('initialization' or 'incremental')
|
||||
is_warmed_up (bool): Whether strategy has sufficient data for reliable signals
|
||||
timeframe_buffers (Dict): Rolling buffers for different timeframes
|
||||
indicator_states (Dict): Internal indicator calculation states
|
||||
timeframe_aggregator (TimeframeAggregator): Built-in aggregator for minute data
|
||||
|
||||
Example:
|
||||
class MyIncStrategy(IncStrategyBase):
|
||||
def get_minimum_buffer_size(self):
|
||||
return {"15min": 50} # Strategy works on 15min timeframe
|
||||
|
||||
def calculate_on_data(self, new_data_point, timestamp):
|
||||
# Process new data incrementally
|
||||
self._update_indicators(new_data_point)
|
||||
|
||||
def get_entry_signal(self):
|
||||
# Generate signal based on current state
|
||||
if self._should_enter():
|
||||
return IncStrategySignal("ENTRY", confidence=0.8)
|
||||
return IncStrategySignal("HOLD", confidence=0.0)
|
||||
|
||||
# Usage with minute-level data:
|
||||
strategy = MyIncStrategy(params={"timeframe_minutes": 15})
|
||||
for minute_data in live_stream:
|
||||
result = strategy.update_minute_data(minute_data['timestamp'], minute_data)
|
||||
if result is not None: # Complete 15min bar formed
|
||||
entry_signal = strategy.get_entry_signal()
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, weight: float = 1.0, params: Optional[Dict] = None):
|
||||
"""
|
||||
Initialize the incremental strategy base.
|
||||
|
||||
Args:
|
||||
name: Strategy name/identifier
|
||||
weight: Strategy weight for combination (default: 1.0)
|
||||
params: Strategy-specific parameters
|
||||
"""
|
||||
self.name = name
|
||||
self.weight = weight
|
||||
self.params = params or {}
|
||||
|
||||
# Calculation state
|
||||
self._calculation_mode = "initialization"
|
||||
self._is_warmed_up = False
|
||||
self._data_points_received = 0
|
||||
|
||||
# Timeframe management
|
||||
self._timeframe_buffers = {}
|
||||
self._timeframe_last_update = {}
|
||||
self._buffer_size_multiplier = self.params.get("buffer_size_multiplier", 2.0)
|
||||
|
||||
# Built-in timeframe aggregation
|
||||
self._primary_timeframe_minutes = self._extract_timeframe_minutes()
|
||||
self._timeframe_aggregator = None
|
||||
if self._primary_timeframe_minutes > 1:
|
||||
self._timeframe_aggregator = TimeframeAggregator(self._primary_timeframe_minutes)
|
||||
|
||||
# Indicator states (strategy-specific)
|
||||
self._indicator_states = {}
|
||||
|
||||
# Signal generation state
|
||||
self._last_signals = {}
|
||||
self._signal_history = deque(maxlen=100)
|
||||
|
||||
# Error handling
|
||||
self._max_acceptable_gap = pd.Timedelta(self.params.get("max_acceptable_gap", "5min"))
|
||||
self._state_validation_enabled = self.params.get("enable_state_validation", True)
|
||||
|
||||
# Performance monitoring
|
||||
self._performance_metrics = {
|
||||
'update_times': deque(maxlen=1000),
|
||||
'signal_generation_times': deque(maxlen=1000),
|
||||
'state_validation_failures': 0,
|
||||
'data_gaps_handled': 0,
|
||||
'minute_data_points_processed': 0,
|
||||
'timeframe_bars_completed': 0
|
||||
}
|
||||
|
||||
# Compatibility with original strategy interface
|
||||
self.initialized = False
|
||||
self.timeframes_data = {}
|
||||
|
||||
def _extract_timeframe_minutes(self) -> int:
|
||||
"""
|
||||
Extract timeframe in minutes from strategy parameters.
|
||||
|
||||
Looks for timeframe configuration in various parameter formats:
|
||||
- timeframe_minutes: Direct specification in minutes
|
||||
- timeframe: String format like "15min", "1h", etc.
|
||||
|
||||
Returns:
|
||||
int: Timeframe in minutes (default: 1 for minute-level processing)
|
||||
"""
|
||||
# Direct specification
|
||||
if "timeframe_minutes" in self.params:
|
||||
return self.params["timeframe_minutes"]
|
||||
|
||||
# String format parsing
|
||||
timeframe_str = self.params.get("timeframe", "1min")
|
||||
|
||||
if timeframe_str.endswith("min"):
|
||||
return int(timeframe_str[:-3])
|
||||
elif timeframe_str.endswith("h"):
|
||||
return int(timeframe_str[:-1]) * 60
|
||||
elif timeframe_str.endswith("d"):
|
||||
return int(timeframe_str[:-1]) * 60 * 24
|
||||
else:
|
||||
# Default to 1 minute if can't parse
|
||||
return 1
|
||||
|
||||
def update_minute_data(self, timestamp: pd.Timestamp, ohlcv_data: Dict[str, float]) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Update strategy with minute-level OHLCV data.
|
||||
|
||||
This method provides a standardized interface for real-time trading systems
|
||||
that receive minute-level data. It internally aggregates to the strategy's
|
||||
configured timeframe and only processes indicators when complete bars are formed.
|
||||
|
||||
Args:
|
||||
timestamp: Timestamp of the minute data
|
||||
ohlcv_data: Dictionary with 'open', 'high', 'low', 'close', 'volume'
|
||||
|
||||
Returns:
|
||||
Strategy processing result if timeframe bar completed, None otherwise
|
||||
|
||||
Example:
|
||||
# Process live minute data
|
||||
result = strategy.update_minute_data(
|
||||
timestamp=pd.Timestamp('2024-01-01 10:15:00'),
|
||||
ohlcv_data={
|
||||
'open': 100.0,
|
||||
'high': 101.0,
|
||||
'low': 99.5,
|
||||
'close': 100.5,
|
||||
'volume': 1000.0
|
||||
}
|
||||
)
|
||||
|
||||
if result is not None:
|
||||
# A complete timeframe bar was formed and processed
|
||||
entry_signal = strategy.get_entry_signal()
|
||||
"""
|
||||
self._performance_metrics['minute_data_points_processed'] += 1
|
||||
|
||||
# If no aggregator (1min strategy), process directly
|
||||
if self._timeframe_aggregator is None:
|
||||
self.calculate_on_data(ohlcv_data, timestamp)
|
||||
return {
|
||||
'timestamp': timestamp,
|
||||
'timeframe_minutes': 1,
|
||||
'processed_directly': True,
|
||||
'is_warmed_up': self.is_warmed_up
|
||||
}
|
||||
|
||||
# Use aggregator to accumulate minute data
|
||||
completed_bar = self._timeframe_aggregator.update(timestamp, ohlcv_data)
|
||||
|
||||
if completed_bar is not None:
|
||||
# A complete timeframe bar was formed
|
||||
self._performance_metrics['timeframe_bars_completed'] += 1
|
||||
|
||||
# Process the completed bar
|
||||
self.calculate_on_data(completed_bar, completed_bar['timestamp'])
|
||||
|
||||
# Return processing result
|
||||
return {
|
||||
'timestamp': completed_bar['timestamp'],
|
||||
'timeframe_minutes': self._primary_timeframe_minutes,
|
||||
'bar_data': completed_bar,
|
||||
'is_warmed_up': self.is_warmed_up,
|
||||
'processed_bar': True
|
||||
}
|
||||
|
||||
# No complete bar yet
|
||||
return None
|
||||
|
||||
def get_current_incomplete_bar(self) -> Optional[Dict[str, float]]:
|
||||
"""
|
||||
Get the current incomplete timeframe bar (for monitoring).
|
||||
|
||||
Useful for debugging and monitoring the aggregation process.
|
||||
|
||||
Returns:
|
||||
Current incomplete bar data or None if no aggregator
|
||||
"""
|
||||
if self._timeframe_aggregator is not None:
|
||||
return self._timeframe_aggregator.get_current_bar()
|
||||
return None
|
||||
|
||||
@property
|
||||
def calculation_mode(self) -> str:
|
||||
"""Current calculation mode: 'initialization' or 'incremental'"""
|
||||
return self._calculation_mode
|
||||
|
||||
@property
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""Whether strategy has sufficient data for reliable signals"""
|
||||
return self._is_warmed_up
|
||||
|
||||
@abstractmethod
|
||||
def get_minimum_buffer_size(self) -> Dict[str, int]:
|
||||
"""
|
||||
Return minimum data points needed for each timeframe.
|
||||
|
||||
This method must be implemented by each strategy to specify how much
|
||||
historical data is required for reliable calculations.
|
||||
|
||||
Returns:
|
||||
Dict[str, int]: {timeframe: min_points} mapping
|
||||
|
||||
Example:
|
||||
return {"15min": 50, "1min": 750} # 50 15min candles = 750 1min candles
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def calculate_on_data(self, new_data_point: Dict[str, float], timestamp: pd.Timestamp) -> None:
|
||||
"""
|
||||
Process a single new data point incrementally.
|
||||
|
||||
This method is called for each new data point and should update
|
||||
the strategy's internal state incrementally.
|
||||
|
||||
Args:
|
||||
new_data_point: OHLCV data point {open, high, low, close, volume}
|
||||
timestamp: Timestamp of the data point
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def supports_incremental_calculation(self) -> bool:
|
||||
"""
|
||||
Whether strategy supports incremental calculation.
|
||||
|
||||
Returns:
|
||||
bool: True if incremental mode supported, False for fallback to batch mode
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_entry_signal(self) -> IncStrategySignal:
|
||||
"""
|
||||
Generate entry signal based on current strategy state.
|
||||
|
||||
This method should use the current internal state to determine
|
||||
whether an entry signal should be generated.
|
||||
|
||||
Returns:
|
||||
IncStrategySignal: Entry signal with confidence level
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_exit_signal(self) -> IncStrategySignal:
|
||||
"""
|
||||
Generate exit signal based on current strategy state.
|
||||
|
||||
This method should use the current internal state to determine
|
||||
whether an exit signal should be generated.
|
||||
|
||||
Returns:
|
||||
IncStrategySignal: Exit signal with confidence level
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_confidence(self) -> float:
|
||||
"""
|
||||
Get strategy confidence for the current market state.
|
||||
|
||||
Default implementation returns 1.0. Strategies can override
|
||||
this to provide dynamic confidence based on market conditions.
|
||||
|
||||
Returns:
|
||||
float: Confidence level (0.0 to 1.0)
|
||||
"""
|
||||
return 1.0
|
||||
|
||||
def reset_calculation_state(self) -> None:
|
||||
"""Reset internal calculation state for reinitialization."""
|
||||
self._calculation_mode = "initialization"
|
||||
self._is_warmed_up = False
|
||||
self._data_points_received = 0
|
||||
self._timeframe_buffers.clear()
|
||||
self._timeframe_last_update.clear()
|
||||
self._indicator_states.clear()
|
||||
self._last_signals.clear()
|
||||
self._signal_history.clear()
|
||||
|
||||
# Reset timeframe aggregator
|
||||
if self._timeframe_aggregator is not None:
|
||||
self._timeframe_aggregator.reset()
|
||||
|
||||
# Reset performance metrics
|
||||
for key in self._performance_metrics:
|
||||
if isinstance(self._performance_metrics[key], deque):
|
||||
self._performance_metrics[key].clear()
|
||||
else:
|
||||
self._performance_metrics[key] = 0
|
||||
|
||||
def get_current_state_summary(self) -> Dict[str, Any]:
|
||||
"""Get summary of current calculation state for debugging."""
|
||||
return {
|
||||
'strategy_name': self.name,
|
||||
'calculation_mode': self._calculation_mode,
|
||||
'is_warmed_up': self._is_warmed_up,
|
||||
'data_points_received': self._data_points_received,
|
||||
'timeframes': list(self._timeframe_buffers.keys()),
|
||||
'buffer_sizes': {tf: len(buf) for tf, buf in self._timeframe_buffers.items()},
|
||||
'indicator_states': {name: state.get_state_summary() if hasattr(state, 'get_state_summary') else str(state)
|
||||
for name, state in self._indicator_states.items()},
|
||||
'last_signals': self._last_signals,
|
||||
'timeframe_aggregator': {
|
||||
'enabled': self._timeframe_aggregator is not None,
|
||||
'primary_timeframe_minutes': self._primary_timeframe_minutes,
|
||||
'current_incomplete_bar': self.get_current_incomplete_bar()
|
||||
},
|
||||
'performance_metrics': {
|
||||
'avg_update_time': sum(self._performance_metrics['update_times']) / len(self._performance_metrics['update_times'])
|
||||
if self._performance_metrics['update_times'] else 0,
|
||||
'avg_signal_time': sum(self._performance_metrics['signal_generation_times']) / len(self._performance_metrics['signal_generation_times'])
|
||||
if self._performance_metrics['signal_generation_times'] else 0,
|
||||
'validation_failures': self._performance_metrics['state_validation_failures'],
|
||||
'data_gaps_handled': self._performance_metrics['data_gaps_handled'],
|
||||
'minute_data_points_processed': self._performance_metrics['minute_data_points_processed'],
|
||||
'timeframe_bars_completed': self._performance_metrics['timeframe_bars_completed']
|
||||
}
|
||||
}
|
||||
|
||||
def _update_timeframe_buffers(self, new_data_point: Dict[str, float], timestamp: pd.Timestamp) -> None:
|
||||
"""Update all timeframe buffers with new data point."""
|
||||
# Get minimum buffer sizes
|
||||
min_buffer_sizes = self.get_minimum_buffer_size()
|
||||
|
||||
for timeframe in min_buffer_sizes.keys():
|
||||
# Calculate actual buffer size with multiplier
|
||||
min_size = min_buffer_sizes[timeframe]
|
||||
actual_buffer_size = int(min_size * self._buffer_size_multiplier)
|
||||
|
||||
# Initialize buffer if needed
|
||||
if timeframe not in self._timeframe_buffers:
|
||||
self._timeframe_buffers[timeframe] = deque(maxlen=actual_buffer_size)
|
||||
self._timeframe_last_update[timeframe] = None
|
||||
|
||||
# Check if this timeframe should be updated
|
||||
if self._should_update_timeframe(timeframe, timestamp):
|
||||
# For 1min timeframe, add data directly
|
||||
if timeframe == "1min":
|
||||
data_point = new_data_point.copy()
|
||||
data_point['timestamp'] = timestamp
|
||||
self._timeframe_buffers[timeframe].append(data_point)
|
||||
self._timeframe_last_update[timeframe] = timestamp
|
||||
else:
|
||||
# For other timeframes, we need to aggregate from 1min data
|
||||
self._aggregate_to_timeframe(timeframe, new_data_point, timestamp)
|
||||
|
||||
def _should_update_timeframe(self, timeframe: str, timestamp: pd.Timestamp) -> bool:
|
||||
"""Check if timeframe should be updated based on timestamp."""
|
||||
if timeframe == "1min":
|
||||
return True # Always update 1min
|
||||
|
||||
last_update = self._timeframe_last_update.get(timeframe)
|
||||
if last_update is None:
|
||||
return True # First update
|
||||
|
||||
# Calculate timeframe interval
|
||||
if timeframe.endswith("min"):
|
||||
minutes = int(timeframe[:-3])
|
||||
interval = pd.Timedelta(minutes=minutes)
|
||||
elif timeframe.endswith("h"):
|
||||
hours = int(timeframe[:-1])
|
||||
interval = pd.Timedelta(hours=hours)
|
||||
else:
|
||||
return True # Unknown timeframe, update anyway
|
||||
|
||||
# Check if enough time has passed
|
||||
return timestamp >= last_update + interval
|
||||
|
||||
def _aggregate_to_timeframe(self, timeframe: str, new_data_point: Dict[str, float], timestamp: pd.Timestamp) -> None:
|
||||
"""Aggregate 1min data to specified timeframe."""
|
||||
# This is a simplified aggregation - in practice, you might want more sophisticated logic
|
||||
buffer = self._timeframe_buffers[timeframe]
|
||||
|
||||
# If buffer is empty or we're starting a new period, add new candle
|
||||
if not buffer or self._should_update_timeframe(timeframe, timestamp):
|
||||
aggregated_point = new_data_point.copy()
|
||||
aggregated_point['timestamp'] = timestamp
|
||||
buffer.append(aggregated_point)
|
||||
self._timeframe_last_update[timeframe] = timestamp
|
||||
else:
|
||||
# Update the last candle in the buffer
|
||||
last_candle = buffer[-1]
|
||||
last_candle['high'] = max(last_candle['high'], new_data_point['high'])
|
||||
last_candle['low'] = min(last_candle['low'], new_data_point['low'])
|
||||
last_candle['close'] = new_data_point['close']
|
||||
last_candle['volume'] += new_data_point['volume']
|
||||
|
||||
def _get_timeframe_buffer(self, timeframe: str) -> pd.DataFrame:
|
||||
"""Get current buffer for specific timeframe as DataFrame."""
|
||||
if timeframe not in self._timeframe_buffers:
|
||||
return pd.DataFrame()
|
||||
|
||||
buffer_data = list(self._timeframe_buffers[timeframe])
|
||||
if not buffer_data:
|
||||
return pd.DataFrame()
|
||||
|
||||
df = pd.DataFrame(buffer_data)
|
||||
if 'timestamp' in df.columns:
|
||||
df = df.set_index('timestamp')
|
||||
|
||||
return df
|
||||
|
||||
def _validate_calculation_state(self) -> bool:
|
||||
"""Validate internal calculation state consistency."""
|
||||
if not self._state_validation_enabled:
|
||||
return True
|
||||
|
||||
try:
|
||||
# Check that all required buffers exist
|
||||
min_buffer_sizes = self.get_minimum_buffer_size()
|
||||
for timeframe in min_buffer_sizes.keys():
|
||||
if timeframe not in self._timeframe_buffers:
|
||||
logging.warning(f"Missing buffer for timeframe {timeframe}")
|
||||
return False
|
||||
|
||||
# Check that indicator states are valid
|
||||
for name, state in self._indicator_states.items():
|
||||
if hasattr(state, 'is_initialized') and not state.is_initialized:
|
||||
logging.warning(f"Indicator {name} not initialized")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"State validation failed: {e}")
|
||||
self._performance_metrics['state_validation_failures'] += 1
|
||||
return False
|
||||
|
||||
def _recover_from_state_corruption(self) -> None:
|
||||
"""Recover from corrupted calculation state."""
|
||||
logging.warning(f"Recovering from state corruption in strategy {self.name}")
|
||||
|
||||
# Reset to initialization mode
|
||||
self._calculation_mode = "initialization"
|
||||
self._is_warmed_up = False
|
||||
|
||||
# Try to recalculate from available buffer data
|
||||
try:
|
||||
self._reinitialize_from_buffers()
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to recover from buffers: {e}")
|
||||
# Complete reset as last resort
|
||||
self.reset_calculation_state()
|
||||
|
||||
def _reinitialize_from_buffers(self) -> None:
|
||||
"""Reinitialize indicators from available buffer data."""
|
||||
# This method should be overridden by specific strategies
|
||||
# to implement their own recovery logic
|
||||
pass
|
||||
|
||||
def handle_data_gap(self, gap_duration: pd.Timedelta) -> None:
|
||||
"""Handle gaps in data stream."""
|
||||
self._performance_metrics['data_gaps_handled'] += 1
|
||||
|
||||
if gap_duration > self._max_acceptable_gap:
|
||||
logging.warning(f"Data gap {gap_duration} exceeds maximum acceptable gap {self._max_acceptable_gap}")
|
||||
self._trigger_reinitialization()
|
||||
else:
|
||||
logging.info(f"Handling acceptable data gap: {gap_duration}")
|
||||
# For small gaps, continue with current state
|
||||
|
||||
def _trigger_reinitialization(self) -> None:
|
||||
"""Trigger strategy reinitialization due to data gap or corruption."""
|
||||
logging.info(f"Triggering reinitialization for strategy {self.name}")
|
||||
self.reset_calculation_state()
|
||||
|
||||
# Compatibility methods for original strategy interface
|
||||
def get_timeframes(self) -> List[str]:
|
||||
"""Get required timeframes (compatibility method)."""
|
||||
return list(self.get_minimum_buffer_size().keys())
|
||||
|
||||
def initialize(self, backtester) -> None:
|
||||
"""Initialize strategy (compatibility method)."""
|
||||
# This method provides compatibility with the original strategy interface
|
||||
# The actual initialization happens through the incremental interface
|
||||
self.initialized = True
|
||||
logging.info(f"Incremental strategy {self.name} initialized in compatibility mode")
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""String representation of the strategy."""
|
||||
return (f"{self.__class__.__name__}(name={self.name}, "
|
||||
f"weight={self.weight}, mode={self._calculation_mode}, "
|
||||
f"warmed_up={self._is_warmed_up}, "
|
||||
f"data_points={self._data_points_received})")
|
||||
@ -1,532 +0,0 @@
|
||||
"""
|
||||
Incremental BBRS Strategy
|
||||
|
||||
This module implements an incremental version of the Bollinger Bands + RSI Strategy (BBRS)
|
||||
for real-time data processing. It maintains constant memory usage and provides
|
||||
identical results to the batch implementation after the warm-up period.
|
||||
|
||||
Key Features:
|
||||
- Accepts minute-level data input for real-time compatibility
|
||||
- Internal timeframe aggregation (1min, 5min, 15min, 1h, etc.)
|
||||
- Incremental Bollinger Bands calculation
|
||||
- Incremental RSI calculation with Wilder's smoothing
|
||||
- Market regime detection (trending vs sideways)
|
||||
- Real-time signal generation
|
||||
- Constant memory usage
|
||||
"""
|
||||
|
||||
from typing import Dict, Optional, Union, Tuple
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from datetime import datetime, timedelta
|
||||
from .indicators.bollinger_bands import BollingerBandsState
|
||||
from .indicators.rsi import RSIState
|
||||
|
||||
|
||||
class TimeframeAggregator:
|
||||
"""
|
||||
Handles real-time aggregation of minute data to higher timeframes.
|
||||
|
||||
This class accumulates minute-level OHLCV data and produces complete
|
||||
bars when a timeframe period is completed.
|
||||
"""
|
||||
|
||||
def __init__(self, timeframe_minutes: int = 15):
|
||||
"""
|
||||
Initialize timeframe aggregator.
|
||||
|
||||
Args:
|
||||
timeframe_minutes: Target timeframe in minutes (e.g., 60 for 1h, 15 for 15min)
|
||||
"""
|
||||
self.timeframe_minutes = timeframe_minutes
|
||||
self.current_bar = None
|
||||
self.current_bar_start = None
|
||||
self.last_completed_bar = None
|
||||
|
||||
def update(self, timestamp: pd.Timestamp, ohlcv_data: Dict[str, float]) -> Optional[Dict[str, float]]:
|
||||
"""
|
||||
Update with new minute data and return completed bar if timeframe is complete.
|
||||
|
||||
Args:
|
||||
timestamp: Timestamp of the data
|
||||
ohlcv_data: OHLCV data dictionary
|
||||
|
||||
Returns:
|
||||
Completed OHLCV bar if timeframe period ended, None otherwise
|
||||
"""
|
||||
# Calculate which timeframe bar this timestamp belongs to
|
||||
bar_start = self._get_bar_start_time(timestamp)
|
||||
|
||||
# Check if we're starting a new bar
|
||||
if self.current_bar_start != bar_start:
|
||||
# Save the completed bar (if any)
|
||||
completed_bar = self.current_bar.copy() if self.current_bar is not None else None
|
||||
|
||||
# Start new bar
|
||||
self.current_bar_start = bar_start
|
||||
self.current_bar = {
|
||||
'timestamp': bar_start,
|
||||
'open': ohlcv_data['close'], # Use current close as open for new bar
|
||||
'high': ohlcv_data['close'],
|
||||
'low': ohlcv_data['close'],
|
||||
'close': ohlcv_data['close'],
|
||||
'volume': ohlcv_data['volume']
|
||||
}
|
||||
|
||||
# Return the completed bar (if any)
|
||||
if completed_bar is not None:
|
||||
self.last_completed_bar = completed_bar
|
||||
return completed_bar
|
||||
else:
|
||||
# Update current bar with new data
|
||||
if self.current_bar is not None:
|
||||
self.current_bar['high'] = max(self.current_bar['high'], ohlcv_data['high'])
|
||||
self.current_bar['low'] = min(self.current_bar['low'], ohlcv_data['low'])
|
||||
self.current_bar['close'] = ohlcv_data['close']
|
||||
self.current_bar['volume'] += ohlcv_data['volume']
|
||||
|
||||
return None # No completed bar yet
|
||||
|
||||
def _get_bar_start_time(self, timestamp: pd.Timestamp) -> pd.Timestamp:
|
||||
"""Calculate the start time of the timeframe bar for given timestamp."""
|
||||
# Round down to the nearest timeframe boundary
|
||||
minutes_since_midnight = timestamp.hour * 60 + timestamp.minute
|
||||
bar_minutes = (minutes_since_midnight // self.timeframe_minutes) * self.timeframe_minutes
|
||||
|
||||
return timestamp.replace(
|
||||
hour=bar_minutes // 60,
|
||||
minute=bar_minutes % 60,
|
||||
second=0,
|
||||
microsecond=0
|
||||
)
|
||||
|
||||
def get_current_bar(self) -> Optional[Dict[str, float]]:
|
||||
"""Get the current incomplete bar (for debugging)."""
|
||||
return self.current_bar.copy() if self.current_bar is not None else None
|
||||
|
||||
def reset(self):
|
||||
"""Reset aggregator state."""
|
||||
self.current_bar = None
|
||||
self.current_bar_start = None
|
||||
self.last_completed_bar = None
|
||||
|
||||
|
||||
class BBRSIncrementalState:
|
||||
"""
|
||||
Incremental BBRS strategy state for real-time processing.
|
||||
|
||||
This class maintains all the state needed for the BBRS strategy and can
|
||||
process new minute-level price data incrementally, internally aggregating
|
||||
to the configured timeframe before running indicators.
|
||||
|
||||
Attributes:
|
||||
timeframe_minutes (int): Strategy timeframe in minutes (default: 60 for 1h)
|
||||
bb_period (int): Bollinger Bands period
|
||||
rsi_period (int): RSI period
|
||||
bb_width_threshold (float): BB width threshold for market regime detection
|
||||
trending_bb_multiplier (float): BB multiplier for trending markets
|
||||
sideways_bb_multiplier (float): BB multiplier for sideways markets
|
||||
trending_rsi_thresholds (tuple): RSI thresholds for trending markets (low, high)
|
||||
sideways_rsi_thresholds (tuple): RSI thresholds for sideways markets (low, high)
|
||||
squeeze_strategy (bool): Enable squeeze strategy
|
||||
|
||||
Example:
|
||||
# Initialize strategy for 1-hour timeframe
|
||||
config = {
|
||||
"timeframe_minutes": 60, # 1 hour bars
|
||||
"bb_period": 20,
|
||||
"rsi_period": 14,
|
||||
"bb_width": 0.05,
|
||||
"trending": {
|
||||
"bb_std_dev_multiplier": 2.5,
|
||||
"rsi_threshold": [30, 70]
|
||||
},
|
||||
"sideways": {
|
||||
"bb_std_dev_multiplier": 1.8,
|
||||
"rsi_threshold": [40, 60]
|
||||
},
|
||||
"SqueezeStrategy": True
|
||||
}
|
||||
|
||||
strategy = BBRSIncrementalState(config)
|
||||
|
||||
# Process minute-level data in real-time
|
||||
for minute_data in live_data_stream:
|
||||
result = strategy.update_minute_data(minute_data['timestamp'], minute_data)
|
||||
if result is not None: # New timeframe bar completed
|
||||
if result['buy_signal']:
|
||||
print("Buy signal generated!")
|
||||
"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
"""
|
||||
Initialize incremental BBRS strategy.
|
||||
|
||||
Args:
|
||||
config: Strategy configuration dictionary
|
||||
"""
|
||||
# Store configuration
|
||||
self.timeframe_minutes = config.get("timeframe_minutes", 60) # Default to 1 hour
|
||||
self.bb_period = config.get("bb_period", 20)
|
||||
self.rsi_period = config.get("rsi_period", 14)
|
||||
self.bb_width_threshold = config.get("bb_width", 0.05)
|
||||
|
||||
# Market regime specific parameters
|
||||
trending_config = config.get("trending", {})
|
||||
sideways_config = config.get("sideways", {})
|
||||
|
||||
self.trending_bb_multiplier = trending_config.get("bb_std_dev_multiplier", 2.5)
|
||||
self.sideways_bb_multiplier = sideways_config.get("bb_std_dev_multiplier", 1.8)
|
||||
self.trending_rsi_thresholds = tuple(trending_config.get("rsi_threshold", [30, 70]))
|
||||
self.sideways_rsi_thresholds = tuple(sideways_config.get("rsi_threshold", [40, 60]))
|
||||
|
||||
self.squeeze_strategy = config.get("SqueezeStrategy", True)
|
||||
|
||||
# Initialize timeframe aggregator
|
||||
self.aggregator = TimeframeAggregator(self.timeframe_minutes)
|
||||
|
||||
# Initialize indicators with different multipliers for regime detection
|
||||
self.bb_trending = BollingerBandsState(self.bb_period, self.trending_bb_multiplier)
|
||||
self.bb_sideways = BollingerBandsState(self.bb_period, self.sideways_bb_multiplier)
|
||||
self.bb_reference = BollingerBandsState(self.bb_period, 2.0) # For regime detection
|
||||
self.rsi = RSIState(self.rsi_period)
|
||||
|
||||
# State tracking
|
||||
self.bars_processed = 0
|
||||
self.current_price = None
|
||||
self.current_volume = None
|
||||
self.volume_ma = None
|
||||
self.volume_sum = 0.0
|
||||
self.volume_history = [] # For volume MA calculation
|
||||
|
||||
# Signal state
|
||||
self.last_buy_signal = False
|
||||
self.last_sell_signal = False
|
||||
self.last_result = None
|
||||
|
||||
def update_minute_data(self, timestamp: pd.Timestamp, ohlcv_data: Dict[str, float]) -> Optional[Dict[str, Union[float, bool]]]:
|
||||
"""
|
||||
Update strategy with new minute-level OHLCV data.
|
||||
|
||||
This method accepts minute-level data and internally aggregates to the
|
||||
configured timeframe. It only processes indicators and generates signals
|
||||
when a complete timeframe bar is formed.
|
||||
|
||||
Args:
|
||||
timestamp: Timestamp of the minute data
|
||||
ohlcv_data: Dictionary with 'open', 'high', 'low', 'close', 'volume'
|
||||
|
||||
Returns:
|
||||
Strategy result dictionary if a timeframe bar completed, None otherwise
|
||||
"""
|
||||
# Validate input
|
||||
required_keys = ['open', 'high', 'low', 'close', 'volume']
|
||||
for key in required_keys:
|
||||
if key not in ohlcv_data:
|
||||
raise ValueError(f"Missing required key: {key}")
|
||||
|
||||
# Update timeframe aggregator
|
||||
completed_bar = self.aggregator.update(timestamp, ohlcv_data)
|
||||
|
||||
if completed_bar is not None:
|
||||
# Process the completed timeframe bar
|
||||
return self._process_timeframe_bar(completed_bar)
|
||||
|
||||
return None # No completed bar yet
|
||||
|
||||
def update(self, ohlcv_data: Dict[str, float]) -> Dict[str, Union[float, bool]]:
|
||||
"""
|
||||
Update strategy with pre-aggregated timeframe data (for testing/compatibility).
|
||||
|
||||
This method is for backward compatibility and testing with pre-aggregated data.
|
||||
For real-time use, prefer update_minute_data().
|
||||
|
||||
Args:
|
||||
ohlcv_data: Dictionary with 'open', 'high', 'low', 'close', 'volume'
|
||||
|
||||
Returns:
|
||||
Strategy result dictionary
|
||||
"""
|
||||
# Create a fake timestamp for compatibility
|
||||
fake_timestamp = pd.Timestamp.now()
|
||||
|
||||
# Process directly as a completed bar
|
||||
completed_bar = {
|
||||
'timestamp': fake_timestamp,
|
||||
'open': ohlcv_data['open'],
|
||||
'high': ohlcv_data['high'],
|
||||
'low': ohlcv_data['low'],
|
||||
'close': ohlcv_data['close'],
|
||||
'volume': ohlcv_data['volume']
|
||||
}
|
||||
|
||||
return self._process_timeframe_bar(completed_bar)
|
||||
|
||||
def _process_timeframe_bar(self, bar_data: Dict[str, float]) -> Dict[str, Union[float, bool]]:
|
||||
"""
|
||||
Process a completed timeframe bar and generate signals.
|
||||
|
||||
Args:
|
||||
bar_data: Completed timeframe bar data
|
||||
|
||||
Returns:
|
||||
Strategy result dictionary
|
||||
"""
|
||||
close_price = float(bar_data['close'])
|
||||
volume = float(bar_data['volume'])
|
||||
|
||||
# Update indicators
|
||||
bb_trending_result = self.bb_trending.update(close_price)
|
||||
bb_sideways_result = self.bb_sideways.update(close_price)
|
||||
bb_reference_result = self.bb_reference.update(close_price)
|
||||
rsi_value = self.rsi.update(close_price)
|
||||
|
||||
# Update volume tracking
|
||||
self._update_volume_tracking(volume)
|
||||
|
||||
# Determine market regime
|
||||
market_regime = self._determine_market_regime(bb_reference_result)
|
||||
|
||||
# Select appropriate BB values based on regime
|
||||
if market_regime == "sideways":
|
||||
bb_result = bb_sideways_result
|
||||
rsi_thresholds = self.sideways_rsi_thresholds
|
||||
else: # trending
|
||||
bb_result = bb_trending_result
|
||||
rsi_thresholds = self.trending_rsi_thresholds
|
||||
|
||||
# Generate signals
|
||||
buy_signal, sell_signal = self._generate_signals(
|
||||
close_price, volume, bb_result, rsi_value,
|
||||
market_regime, rsi_thresholds
|
||||
)
|
||||
|
||||
# Update state
|
||||
self.current_price = close_price
|
||||
self.current_volume = volume
|
||||
self.bars_processed += 1
|
||||
self.last_buy_signal = buy_signal
|
||||
self.last_sell_signal = sell_signal
|
||||
|
||||
# Create comprehensive result
|
||||
result = {
|
||||
# Timeframe info
|
||||
'timestamp': bar_data['timestamp'],
|
||||
'timeframe_minutes': self.timeframe_minutes,
|
||||
|
||||
# Price data
|
||||
'open': bar_data['open'],
|
||||
'high': bar_data['high'],
|
||||
'low': bar_data['low'],
|
||||
'close': close_price,
|
||||
'volume': volume,
|
||||
|
||||
# Bollinger Bands (regime-specific)
|
||||
'upper_band': bb_result['upper_band'],
|
||||
'middle_band': bb_result['middle_band'],
|
||||
'lower_band': bb_result['lower_band'],
|
||||
'bb_width': bb_result['bandwidth'],
|
||||
|
||||
# RSI
|
||||
'rsi': rsi_value,
|
||||
|
||||
# Market regime
|
||||
'market_regime': market_regime,
|
||||
'bb_width_reference': bb_reference_result['bandwidth'],
|
||||
|
||||
# Volume analysis
|
||||
'volume_ma': self.volume_ma,
|
||||
'volume_spike': self._check_volume_spike(volume),
|
||||
|
||||
# Signals
|
||||
'buy_signal': buy_signal,
|
||||
'sell_signal': sell_signal,
|
||||
|
||||
# Strategy metadata
|
||||
'is_warmed_up': self.is_warmed_up(),
|
||||
'bars_processed': self.bars_processed,
|
||||
'rsi_thresholds': rsi_thresholds,
|
||||
'bb_multiplier': bb_result.get('std_dev', self.trending_bb_multiplier)
|
||||
}
|
||||
|
||||
self.last_result = result
|
||||
return result
|
||||
|
||||
def _update_volume_tracking(self, volume: float) -> None:
|
||||
"""Update volume moving average tracking."""
|
||||
# Simple moving average for volume (20 periods)
|
||||
volume_period = 20
|
||||
|
||||
if len(self.volume_history) >= volume_period:
|
||||
# Remove oldest volume
|
||||
self.volume_sum -= self.volume_history[0]
|
||||
self.volume_history.pop(0)
|
||||
|
||||
# Add new volume
|
||||
self.volume_history.append(volume)
|
||||
self.volume_sum += volume
|
||||
|
||||
# Calculate moving average
|
||||
if len(self.volume_history) > 0:
|
||||
self.volume_ma = self.volume_sum / len(self.volume_history)
|
||||
else:
|
||||
self.volume_ma = volume
|
||||
|
||||
def _determine_market_regime(self, bb_reference: Dict[str, float]) -> str:
|
||||
"""
|
||||
Determine market regime based on Bollinger Band width.
|
||||
|
||||
Args:
|
||||
bb_reference: Reference BB result for regime detection
|
||||
|
||||
Returns:
|
||||
"sideways" or "trending"
|
||||
"""
|
||||
if not self.bb_reference.is_warmed_up():
|
||||
return "trending" # Default to trending during warm-up
|
||||
|
||||
bb_width = bb_reference['bandwidth']
|
||||
|
||||
if bb_width < self.bb_width_threshold:
|
||||
return "sideways"
|
||||
else:
|
||||
return "trending"
|
||||
|
||||
def _check_volume_spike(self, current_volume: float) -> bool:
|
||||
"""Check if current volume represents a spike (≥1.5× average)."""
|
||||
if self.volume_ma is None or self.volume_ma == 0:
|
||||
return False
|
||||
|
||||
return current_volume >= 1.5 * self.volume_ma
|
||||
|
||||
def _generate_signals(self, price: float, volume: float, bb_result: Dict[str, float],
|
||||
rsi_value: float, market_regime: str,
|
||||
rsi_thresholds: Tuple[float, float]) -> Tuple[bool, bool]:
|
||||
"""
|
||||
Generate buy/sell signals based on strategy logic.
|
||||
|
||||
Args:
|
||||
price: Current close price
|
||||
volume: Current volume
|
||||
bb_result: Bollinger Bands result
|
||||
rsi_value: Current RSI value
|
||||
market_regime: "sideways" or "trending"
|
||||
rsi_thresholds: (low_threshold, high_threshold)
|
||||
|
||||
Returns:
|
||||
(buy_signal, sell_signal)
|
||||
"""
|
||||
# Don't generate signals during warm-up
|
||||
if not self.is_warmed_up():
|
||||
return False, False
|
||||
|
||||
# Don't generate signals if RSI is NaN
|
||||
if np.isnan(rsi_value):
|
||||
return False, False
|
||||
|
||||
upper_band = bb_result['upper_band']
|
||||
lower_band = bb_result['lower_band']
|
||||
rsi_low, rsi_high = rsi_thresholds
|
||||
|
||||
volume_spike = self._check_volume_spike(volume)
|
||||
|
||||
buy_signal = False
|
||||
sell_signal = False
|
||||
|
||||
if market_regime == "sideways":
|
||||
# Sideways market (Mean Reversion)
|
||||
buy_condition = (price <= lower_band) and (rsi_value <= rsi_low)
|
||||
sell_condition = (price >= upper_band) and (rsi_value >= rsi_high)
|
||||
|
||||
if self.squeeze_strategy:
|
||||
# Add volume contraction filter for sideways markets
|
||||
volume_contraction = volume < 0.7 * (self.volume_ma or volume)
|
||||
buy_condition = buy_condition and volume_contraction
|
||||
sell_condition = sell_condition and volume_contraction
|
||||
|
||||
buy_signal = buy_condition
|
||||
sell_signal = sell_condition
|
||||
|
||||
else: # trending
|
||||
# Trending market (Breakout Mode)
|
||||
buy_condition = (price < lower_band) and (rsi_value < 50) and volume_spike
|
||||
sell_condition = (price > upper_band) and (rsi_value > 50) and volume_spike
|
||||
|
||||
buy_signal = buy_condition
|
||||
sell_signal = sell_condition
|
||||
|
||||
return buy_signal, sell_signal
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""
|
||||
Check if strategy is warmed up and ready for reliable signals.
|
||||
|
||||
Returns:
|
||||
True if all indicators are warmed up
|
||||
"""
|
||||
return (self.bb_trending.is_warmed_up() and
|
||||
self.bb_sideways.is_warmed_up() and
|
||||
self.bb_reference.is_warmed_up() and
|
||||
self.rsi.is_warmed_up() and
|
||||
len(self.volume_history) >= 20)
|
||||
|
||||
def get_current_incomplete_bar(self) -> Optional[Dict[str, float]]:
|
||||
"""
|
||||
Get the current incomplete timeframe bar (for monitoring).
|
||||
|
||||
Returns:
|
||||
Current incomplete bar data or None
|
||||
"""
|
||||
return self.aggregator.get_current_bar()
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset strategy state to initial conditions."""
|
||||
self.aggregator.reset()
|
||||
self.bb_trending.reset()
|
||||
self.bb_sideways.reset()
|
||||
self.bb_reference.reset()
|
||||
self.rsi.reset()
|
||||
|
||||
self.bars_processed = 0
|
||||
self.current_price = None
|
||||
self.current_volume = None
|
||||
self.volume_ma = None
|
||||
self.volume_sum = 0.0
|
||||
self.volume_history.clear()
|
||||
|
||||
self.last_buy_signal = False
|
||||
self.last_sell_signal = False
|
||||
self.last_result = None
|
||||
|
||||
def get_state_summary(self) -> Dict:
|
||||
"""Get comprehensive state summary for debugging."""
|
||||
return {
|
||||
'strategy_type': 'BBRS_Incremental',
|
||||
'timeframe_minutes': self.timeframe_minutes,
|
||||
'bars_processed': self.bars_processed,
|
||||
'is_warmed_up': self.is_warmed_up(),
|
||||
'current_price': self.current_price,
|
||||
'current_volume': self.current_volume,
|
||||
'volume_ma': self.volume_ma,
|
||||
'current_incomplete_bar': self.get_current_incomplete_bar(),
|
||||
'last_signals': {
|
||||
'buy': self.last_buy_signal,
|
||||
'sell': self.last_sell_signal
|
||||
},
|
||||
'indicators': {
|
||||
'bb_trending': self.bb_trending.get_state_summary(),
|
||||
'bb_sideways': self.bb_sideways.get_state_summary(),
|
||||
'bb_reference': self.bb_reference.get_state_summary(),
|
||||
'rsi': self.rsi.get_state_summary()
|
||||
},
|
||||
'config': {
|
||||
'bb_period': self.bb_period,
|
||||
'rsi_period': self.rsi_period,
|
||||
'bb_width_threshold': self.bb_width_threshold,
|
||||
'trending_bb_multiplier': self.trending_bb_multiplier,
|
||||
'sideways_bb_multiplier': self.sideways_bb_multiplier,
|
||||
'trending_rsi_thresholds': self.trending_rsi_thresholds,
|
||||
'sideways_rsi_thresholds': self.sideways_rsi_thresholds,
|
||||
'squeeze_strategy': self.squeeze_strategy
|
||||
}
|
||||
}
|
||||
@ -1,556 +0,0 @@
|
||||
# BBRS Strategy Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
The `BBRSIncrementalState` implements a sophisticated trading strategy combining Bollinger Bands and RSI indicators with market regime detection. It adapts its parameters based on market conditions (trending vs sideways) and provides real-time signal generation with volume analysis.
|
||||
|
||||
## Class: `BBRSIncrementalState`
|
||||
|
||||
### Purpose
|
||||
- **Market Regime Detection**: Automatically detects trending vs sideways markets
|
||||
- **Adaptive Parameters**: Uses different BB/RSI thresholds based on market regime
|
||||
- **Volume Analysis**: Incorporates volume spikes for signal confirmation
|
||||
- **Real-time Processing**: Processes minute-level data with timeframe aggregation
|
||||
|
||||
### Key Features
|
||||
- **Dual Bollinger Bands**: Different multipliers for trending/sideways markets
|
||||
- **RSI Integration**: Wilder's smoothing RSI with regime-specific thresholds
|
||||
- **Volume Confirmation**: Volume spike detection for signal validation
|
||||
- **Perfect Accuracy**: 100% accuracy after warm-up period
|
||||
- **Squeeze Strategy**: Optional squeeze detection for breakout signals
|
||||
|
||||
## Strategy Logic
|
||||
|
||||
### Market Regime Detection
|
||||
```python
|
||||
# Trending market: BB width > threshold
|
||||
if bb_width > bb_width_threshold:
|
||||
regime = "trending"
|
||||
bb_multiplier = 2.5
|
||||
rsi_thresholds = [30, 70]
|
||||
else:
|
||||
regime = "sideways"
|
||||
bb_multiplier = 1.8
|
||||
rsi_thresholds = [40, 60]
|
||||
```
|
||||
|
||||
### Signal Generation
|
||||
- **Buy Signal**: Price touches lower BB + RSI below lower threshold + volume spike
|
||||
- **Sell Signal**: Price touches upper BB + RSI above upper threshold + volume spike
|
||||
- **Regime Adaptation**: Parameters automatically adjust based on market conditions
|
||||
|
||||
## Configuration Parameters
|
||||
|
||||
```python
|
||||
config = {
|
||||
"timeframe_minutes": 60, # 1-hour bars
|
||||
"bb_period": 20, # Bollinger Bands period
|
||||
"rsi_period": 14, # RSI period
|
||||
"bb_width": 0.05, # BB width threshold for regime detection
|
||||
"trending": {
|
||||
"bb_std_dev_multiplier": 2.5,
|
||||
"rsi_threshold": [30, 70]
|
||||
},
|
||||
"sideways": {
|
||||
"bb_std_dev_multiplier": 1.8,
|
||||
"rsi_threshold": [40, 60]
|
||||
},
|
||||
"SqueezeStrategy": True # Enable squeeze detection
|
||||
}
|
||||
```
|
||||
|
||||
## Real-time Usage Example
|
||||
|
||||
### Basic Implementation
|
||||
|
||||
```python
|
||||
from cycles.IncStrategies.bbrs_incremental import BBRSIncrementalState
|
||||
import pandas as pd
|
||||
from datetime import datetime, timedelta
|
||||
import random
|
||||
|
||||
# Initialize BBRS strategy
|
||||
config = {
|
||||
"timeframe_minutes": 60, # 1-hour bars
|
||||
"bb_period": 20,
|
||||
"rsi_period": 14,
|
||||
"bb_width": 0.05,
|
||||
"trending": {
|
||||
"bb_std_dev_multiplier": 2.5,
|
||||
"rsi_threshold": [30, 70]
|
||||
},
|
||||
"sideways": {
|
||||
"bb_std_dev_multiplier": 1.8,
|
||||
"rsi_threshold": [40, 60]
|
||||
},
|
||||
"SqueezeStrategy": True
|
||||
}
|
||||
|
||||
strategy = BBRSIncrementalState(config)
|
||||
|
||||
# Simulate real-time minute data stream
|
||||
def simulate_market_data():
|
||||
"""Generate realistic market data with regime changes"""
|
||||
base_price = 45000.0 # Starting price (e.g., BTC)
|
||||
timestamp = datetime.now()
|
||||
market_regime = "trending" # Start in trending mode
|
||||
regime_counter = 0
|
||||
|
||||
while True:
|
||||
# Simulate regime changes
|
||||
regime_counter += 1
|
||||
if regime_counter % 200 == 0: # Change regime every 200 minutes
|
||||
market_regime = "sideways" if market_regime == "trending" else "trending"
|
||||
print(f"📊 Market regime changed to: {market_regime.upper()}")
|
||||
|
||||
# Generate price movement based on regime
|
||||
if market_regime == "trending":
|
||||
# Trending: larger moves, more directional
|
||||
price_change = random.gauss(0, 0.015) * base_price # ±1.5% std dev
|
||||
else:
|
||||
# Sideways: smaller moves, more mean-reverting
|
||||
price_change = random.gauss(0, 0.008) * base_price # ±0.8% std dev
|
||||
|
||||
close = base_price + price_change
|
||||
high = close + random.random() * 0.005 * base_price
|
||||
low = close - random.random() * 0.005 * base_price
|
||||
open_price = base_price
|
||||
|
||||
# Volume varies with volatility
|
||||
base_volume = 1000
|
||||
volume_multiplier = 1 + abs(price_change / base_price) * 10 # Higher volume with bigger moves
|
||||
volume = int(base_volume * volume_multiplier * random.uniform(0.5, 2.0))
|
||||
|
||||
yield {
|
||||
'timestamp': timestamp,
|
||||
'open': open_price,
|
||||
'high': high,
|
||||
'low': low,
|
||||
'close': close,
|
||||
'volume': volume
|
||||
}
|
||||
|
||||
base_price = close
|
||||
timestamp += timedelta(minutes=1)
|
||||
|
||||
# Process real-time data
|
||||
print("🚀 Starting BBRS Strategy Real-time Processing...")
|
||||
print("📊 Waiting for 1-hour bars to form...")
|
||||
|
||||
for minute_data in simulate_market_data():
|
||||
# Strategy handles minute-to-hour aggregation automatically
|
||||
result = strategy.update_minute_data(
|
||||
timestamp=pd.Timestamp(minute_data['timestamp']),
|
||||
ohlcv_data=minute_data
|
||||
)
|
||||
|
||||
# Check if a complete 1-hour bar was formed
|
||||
if result is not None:
|
||||
current_price = minute_data['close']
|
||||
timestamp = minute_data['timestamp']
|
||||
|
||||
print(f"\n⏰ Complete 1h bar at {timestamp}")
|
||||
print(f"💰 Price: ${current_price:,.2f}")
|
||||
|
||||
# Get strategy state
|
||||
state = strategy.get_state_summary()
|
||||
print(f"📈 Market Regime: {state.get('market_regime', 'Unknown')}")
|
||||
print(f"🔍 BB Width: {state.get('bb_width', 0):.4f}")
|
||||
print(f"📊 RSI: {state.get('rsi_value', 0):.2f}")
|
||||
print(f"📈 Volume MA Ratio: {state.get('volume_ma_ratio', 0):.2f}")
|
||||
|
||||
# Check for signals only if strategy is warmed up
|
||||
if strategy.is_warmed_up():
|
||||
# Process buy signals
|
||||
if result.get('buy_signal', False):
|
||||
print(f"🟢 BUY SIGNAL GENERATED!")
|
||||
print(f" 💵 Price: ${current_price:,.2f}")
|
||||
print(f" 📊 RSI: {state.get('rsi_value', 0):.2f}")
|
||||
print(f" 📈 BB Position: Lower band touch")
|
||||
print(f" 🔊 Volume Spike: {state.get('volume_spike', False)}")
|
||||
print(f" 🎯 Market Regime: {state.get('market_regime', 'Unknown')}")
|
||||
# execute_buy_order(result)
|
||||
|
||||
# Process sell signals
|
||||
if result.get('sell_signal', False):
|
||||
print(f"🔴 SELL SIGNAL GENERATED!")
|
||||
print(f" 💵 Price: ${current_price:,.2f}")
|
||||
print(f" 📊 RSI: {state.get('rsi_value', 0):.2f}")
|
||||
print(f" 📈 BB Position: Upper band touch")
|
||||
print(f" 🔊 Volume Spike: {state.get('volume_spike', False)}")
|
||||
print(f" 🎯 Market Regime: {state.get('market_regime', 'Unknown')}")
|
||||
# execute_sell_order(result)
|
||||
else:
|
||||
warmup_progress = strategy.bars_processed
|
||||
min_required = max(strategy.bb_period, strategy.rsi_period) + 10
|
||||
print(f"🔄 Warming up... ({warmup_progress}/{min_required} bars)")
|
||||
```
|
||||
|
||||
### Advanced Trading System Integration
|
||||
|
||||
```python
|
||||
class BBRSTradingSystem:
|
||||
def __init__(self, initial_capital=10000):
|
||||
self.config = {
|
||||
"timeframe_minutes": 60,
|
||||
"bb_period": 20,
|
||||
"rsi_period": 14,
|
||||
"bb_width": 0.05,
|
||||
"trending": {
|
||||
"bb_std_dev_multiplier": 2.5,
|
||||
"rsi_threshold": [30, 70]
|
||||
},
|
||||
"sideways": {
|
||||
"bb_std_dev_multiplier": 1.8,
|
||||
"rsi_threshold": [40, 60]
|
||||
},
|
||||
"SqueezeStrategy": True
|
||||
}
|
||||
|
||||
self.strategy = BBRSIncrementalState(self.config)
|
||||
self.capital = initial_capital
|
||||
self.position = None
|
||||
self.trades = []
|
||||
self.equity_curve = []
|
||||
|
||||
def process_market_data(self, timestamp, ohlcv_data):
|
||||
"""Process incoming market data and manage positions"""
|
||||
# Update strategy
|
||||
result = self.strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
|
||||
if result is not None and self.strategy.is_warmed_up():
|
||||
self._check_signals(timestamp, ohlcv_data['close'], result)
|
||||
self._update_equity(timestamp, ohlcv_data['close'])
|
||||
|
||||
def _check_signals(self, timestamp, current_price, result):
|
||||
"""Check for trading signals and execute trades"""
|
||||
# Handle buy signals
|
||||
if result.get('buy_signal', False) and self.position is None:
|
||||
self._execute_entry(timestamp, current_price, 'BUY', result)
|
||||
|
||||
# Handle sell signals
|
||||
if result.get('sell_signal', False) and self.position is not None:
|
||||
self._execute_exit(timestamp, current_price, 'SELL', result)
|
||||
|
||||
def _execute_entry(self, timestamp, price, signal_type, result):
|
||||
"""Execute entry trade"""
|
||||
# Calculate position size (risk 2% of capital)
|
||||
risk_amount = self.capital * 0.02
|
||||
shares = risk_amount / price
|
||||
|
||||
state = self.strategy.get_state_summary()
|
||||
|
||||
self.position = {
|
||||
'entry_time': timestamp,
|
||||
'entry_price': price,
|
||||
'shares': shares,
|
||||
'signal_type': signal_type,
|
||||
'market_regime': state.get('market_regime'),
|
||||
'rsi_value': state.get('rsi_value'),
|
||||
'bb_width': state.get('bb_width'),
|
||||
'volume_spike': state.get('volume_spike', False)
|
||||
}
|
||||
|
||||
print(f"🟢 {signal_type} POSITION OPENED")
|
||||
print(f" 📅 Time: {timestamp}")
|
||||
print(f" 💵 Price: ${price:,.2f}")
|
||||
print(f" 📊 Shares: {shares:.4f}")
|
||||
print(f" 🎯 Market Regime: {self.position['market_regime']}")
|
||||
print(f" 📈 RSI: {self.position['rsi_value']:.2f}")
|
||||
print(f" 🔊 Volume Spike: {self.position['volume_spike']}")
|
||||
|
||||
def _execute_exit(self, timestamp, price, signal_type, result):
|
||||
"""Execute exit trade"""
|
||||
if self.position:
|
||||
# Calculate P&L
|
||||
pnl = (price - self.position['entry_price']) * self.position['shares']
|
||||
pnl_percent = (pnl / (self.position['entry_price'] * self.position['shares'])) * 100
|
||||
|
||||
# Update capital
|
||||
self.capital += pnl
|
||||
|
||||
state = self.strategy.get_state_summary()
|
||||
|
||||
# Record trade
|
||||
trade = {
|
||||
'entry_time': self.position['entry_time'],
|
||||
'exit_time': timestamp,
|
||||
'entry_price': self.position['entry_price'],
|
||||
'exit_price': price,
|
||||
'shares': self.position['shares'],
|
||||
'pnl': pnl,
|
||||
'pnl_percent': pnl_percent,
|
||||
'duration': timestamp - self.position['entry_time'],
|
||||
'entry_regime': self.position['market_regime'],
|
||||
'exit_regime': state.get('market_regime'),
|
||||
'entry_rsi': self.position['rsi_value'],
|
||||
'exit_rsi': state.get('rsi_value'),
|
||||
'entry_volume_spike': self.position['volume_spike'],
|
||||
'exit_volume_spike': state.get('volume_spike', False)
|
||||
}
|
||||
|
||||
self.trades.append(trade)
|
||||
|
||||
print(f"🔴 {signal_type} POSITION CLOSED")
|
||||
print(f" 📅 Time: {timestamp}")
|
||||
print(f" 💵 Exit Price: ${price:,.2f}")
|
||||
print(f" 💰 P&L: ${pnl:,.2f} ({pnl_percent:+.2f}%)")
|
||||
print(f" ⏱️ Duration: {trade['duration']}")
|
||||
print(f" 🎯 Regime: {trade['entry_regime']} → {trade['exit_regime']}")
|
||||
print(f" 💼 New Capital: ${self.capital:,.2f}")
|
||||
|
||||
self.position = None
|
||||
|
||||
def _update_equity(self, timestamp, current_price):
|
||||
"""Update equity curve"""
|
||||
if self.position:
|
||||
unrealized_pnl = (current_price - self.position['entry_price']) * self.position['shares']
|
||||
current_equity = self.capital + unrealized_pnl
|
||||
else:
|
||||
current_equity = self.capital
|
||||
|
||||
self.equity_curve.append({
|
||||
'timestamp': timestamp,
|
||||
'equity': current_equity,
|
||||
'position': self.position is not None
|
||||
})
|
||||
|
||||
def get_performance_summary(self):
|
||||
"""Get trading performance summary"""
|
||||
if not self.trades:
|
||||
return {"message": "No completed trades yet"}
|
||||
|
||||
trades_df = pd.DataFrame(self.trades)
|
||||
|
||||
total_trades = len(trades_df)
|
||||
winning_trades = len(trades_df[trades_df['pnl'] > 0])
|
||||
losing_trades = len(trades_df[trades_df['pnl'] < 0])
|
||||
win_rate = (winning_trades / total_trades) * 100
|
||||
|
||||
total_pnl = trades_df['pnl'].sum()
|
||||
avg_win = trades_df[trades_df['pnl'] > 0]['pnl'].mean() if winning_trades > 0 else 0
|
||||
avg_loss = trades_df[trades_df['pnl'] < 0]['pnl'].mean() if losing_trades > 0 else 0
|
||||
|
||||
# Regime-specific performance
|
||||
trending_trades = trades_df[trades_df['entry_regime'] == 'trending']
|
||||
sideways_trades = trades_df[trades_df['entry_regime'] == 'sideways']
|
||||
|
||||
return {
|
||||
'total_trades': total_trades,
|
||||
'winning_trades': winning_trades,
|
||||
'losing_trades': losing_trades,
|
||||
'win_rate': win_rate,
|
||||
'total_pnl': total_pnl,
|
||||
'avg_win': avg_win,
|
||||
'avg_loss': avg_loss,
|
||||
'profit_factor': abs(avg_win / avg_loss) if avg_loss != 0 else float('inf'),
|
||||
'final_capital': self.capital,
|
||||
'trending_trades': len(trending_trades),
|
||||
'sideways_trades': len(sideways_trades),
|
||||
'trending_win_rate': (len(trending_trades[trending_trades['pnl'] > 0]) / len(trending_trades) * 100) if len(trending_trades) > 0 else 0,
|
||||
'sideways_win_rate': (len(sideways_trades[sideways_trades['pnl'] > 0]) / len(sideways_trades) * 100) if len(sideways_trades) > 0 else 0
|
||||
}
|
||||
|
||||
# Usage Example
|
||||
trading_system = BBRSTradingSystem(initial_capital=10000)
|
||||
|
||||
print("🚀 BBRS Trading System Started")
|
||||
print("💰 Initial Capital: $10,000")
|
||||
|
||||
# Simulate live trading
|
||||
for market_data in simulate_market_data():
|
||||
trading_system.process_market_data(
|
||||
timestamp=pd.Timestamp(market_data['timestamp']),
|
||||
ohlcv_data=market_data
|
||||
)
|
||||
|
||||
# Print performance summary every 100 bars
|
||||
if len(trading_system.equity_curve) % 100 == 0 and trading_system.trades:
|
||||
performance = trading_system.get_performance_summary()
|
||||
print(f"\n📊 Performance Summary (after {len(trading_system.equity_curve)} bars):")
|
||||
print(f" 💼 Capital: ${performance['final_capital']:,.2f}")
|
||||
print(f" 📈 Total Trades: {performance['total_trades']}")
|
||||
print(f" 🎯 Win Rate: {performance['win_rate']:.1f}%")
|
||||
print(f" 💰 Total P&L: ${performance['total_pnl']:,.2f}")
|
||||
print(f" 📊 Trending Trades: {performance['trending_trades']} (WR: {performance['trending_win_rate']:.1f}%)")
|
||||
print(f" 📊 Sideways Trades: {performance['sideways_trades']} (WR: {performance['sideways_win_rate']:.1f}%)")
|
||||
```
|
||||
|
||||
### Backtesting Example
|
||||
|
||||
```python
|
||||
def backtest_bbrs_strategy(historical_data, config):
|
||||
"""Comprehensive backtesting of BBRS strategy"""
|
||||
|
||||
strategy = BBRSIncrementalState(config)
|
||||
|
||||
signals = []
|
||||
trades = []
|
||||
current_position = None
|
||||
|
||||
print(f"🔄 Backtesting BBRS Strategy on {config['timeframe_minutes']}min timeframe...")
|
||||
print(f"📊 Data period: {historical_data.index[0]} to {historical_data.index[-1]}")
|
||||
|
||||
# Process historical data
|
||||
for timestamp, row in historical_data.iterrows():
|
||||
ohlcv_data = {
|
||||
'open': row['open'],
|
||||
'high': row['high'],
|
||||
'low': row['low'],
|
||||
'close': row['close'],
|
||||
'volume': row['volume']
|
||||
}
|
||||
|
||||
# Update strategy
|
||||
result = strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
|
||||
if result is not None and strategy.is_warmed_up():
|
||||
state = strategy.get_state_summary()
|
||||
|
||||
# Record buy signals
|
||||
if result.get('buy_signal', False):
|
||||
signals.append({
|
||||
'timestamp': timestamp,
|
||||
'type': 'BUY',
|
||||
'price': row['close'],
|
||||
'rsi': state.get('rsi_value'),
|
||||
'bb_width': state.get('bb_width'),
|
||||
'market_regime': state.get('market_regime'),
|
||||
'volume_spike': state.get('volume_spike', False)
|
||||
})
|
||||
|
||||
# Open position if none exists
|
||||
if current_position is None:
|
||||
current_position = {
|
||||
'entry_time': timestamp,
|
||||
'entry_price': row['close'],
|
||||
'entry_regime': state.get('market_regime'),
|
||||
'entry_rsi': state.get('rsi_value')
|
||||
}
|
||||
|
||||
# Record sell signals
|
||||
if result.get('sell_signal', False):
|
||||
signals.append({
|
||||
'timestamp': timestamp,
|
||||
'type': 'SELL',
|
||||
'price': row['close'],
|
||||
'rsi': state.get('rsi_value'),
|
||||
'bb_width': state.get('bb_width'),
|
||||
'market_regime': state.get('market_regime'),
|
||||
'volume_spike': state.get('volume_spike', False)
|
||||
})
|
||||
|
||||
# Close position if exists
|
||||
if current_position is not None:
|
||||
pnl = row['close'] - current_position['entry_price']
|
||||
pnl_percent = (pnl / current_position['entry_price']) * 100
|
||||
|
||||
trades.append({
|
||||
'entry_time': current_position['entry_time'],
|
||||
'exit_time': timestamp,
|
||||
'entry_price': current_position['entry_price'],
|
||||
'exit_price': row['close'],
|
||||
'pnl': pnl,
|
||||
'pnl_percent': pnl_percent,
|
||||
'duration': timestamp - current_position['entry_time'],
|
||||
'entry_regime': current_position['entry_regime'],
|
||||
'exit_regime': state.get('market_regime'),
|
||||
'entry_rsi': current_position['entry_rsi'],
|
||||
'exit_rsi': state.get('rsi_value')
|
||||
})
|
||||
|
||||
current_position = None
|
||||
|
||||
# Convert to DataFrames for analysis
|
||||
signals_df = pd.DataFrame(signals)
|
||||
trades_df = pd.DataFrame(trades)
|
||||
|
||||
# Calculate performance metrics
|
||||
if len(trades_df) > 0:
|
||||
total_trades = len(trades_df)
|
||||
winning_trades = len(trades_df[trades_df['pnl'] > 0])
|
||||
win_rate = (winning_trades / total_trades) * 100
|
||||
total_return = trades_df['pnl_percent'].sum()
|
||||
avg_return = trades_df['pnl_percent'].mean()
|
||||
max_win = trades_df['pnl_percent'].max()
|
||||
max_loss = trades_df['pnl_percent'].min()
|
||||
|
||||
# Regime-specific analysis
|
||||
trending_trades = trades_df[trades_df['entry_regime'] == 'trending']
|
||||
sideways_trades = trades_df[trades_df['entry_regime'] == 'sideways']
|
||||
|
||||
print(f"\n📊 Backtest Results:")
|
||||
print(f" 📈 Total Signals: {len(signals_df)}")
|
||||
print(f" 💼 Total Trades: {total_trades}")
|
||||
print(f" 🎯 Win Rate: {win_rate:.1f}%")
|
||||
print(f" 💰 Total Return: {total_return:.2f}%")
|
||||
print(f" 📊 Average Return: {avg_return:.2f}%")
|
||||
print(f" 🚀 Max Win: {max_win:.2f}%")
|
||||
print(f" 📉 Max Loss: {max_loss:.2f}%")
|
||||
print(f" 📈 Trending Trades: {len(trending_trades)} ({len(trending_trades[trending_trades['pnl'] > 0])} wins)")
|
||||
print(f" 📊 Sideways Trades: {len(sideways_trades)} ({len(sideways_trades[sideways_trades['pnl'] > 0])} wins)")
|
||||
|
||||
return signals_df, trades_df
|
||||
else:
|
||||
print("❌ No completed trades in backtest period")
|
||||
return signals_df, pd.DataFrame()
|
||||
|
||||
# Run backtest (example)
|
||||
# historical_data = pd.read_csv('btc_1min_data.csv', index_col='timestamp', parse_dates=True)
|
||||
# config = {
|
||||
# "timeframe_minutes": 60,
|
||||
# "bb_period": 20,
|
||||
# "rsi_period": 14,
|
||||
# "bb_width": 0.05,
|
||||
# "trending": {"bb_std_dev_multiplier": 2.5, "rsi_threshold": [30, 70]},
|
||||
# "sideways": {"bb_std_dev_multiplier": 1.8, "rsi_threshold": [40, 60]},
|
||||
# "SqueezeStrategy": True
|
||||
# }
|
||||
# signals, trades = backtest_bbrs_strategy(historical_data, config)
|
||||
```
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Timing Benchmarks
|
||||
- **Update Time**: <1ms per 1-hour bar
|
||||
- **Signal Generation**: <0.5ms per signal
|
||||
- **Memory Usage**: ~8MB constant
|
||||
- **Accuracy**: 100% after warm-up period
|
||||
|
||||
### Signal Quality
|
||||
- **Regime Adaptation**: Automatically adjusts to market conditions
|
||||
- **Volume Confirmation**: Reduces false signals by ~40%
|
||||
- **Signal Match Rate**: 95.45% vs original implementation
|
||||
- **False Signal Reduction**: Adaptive thresholds reduce noise
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Timeframe Selection**: 1h-4h timeframes work best for BB/RSI combination
|
||||
2. **Regime Monitoring**: Track market regime changes for strategy performance
|
||||
3. **Volume Analysis**: Use volume spikes for signal confirmation
|
||||
4. **Parameter Tuning**: Adjust BB width threshold based on asset volatility
|
||||
5. **Risk Management**: Implement proper position sizing and stop-losses
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
1. **No Signals**: Check if strategy is warmed up (needs ~30+ bars)
|
||||
2. **Too Many Signals**: Increase BB width threshold or RSI thresholds
|
||||
3. **Poor Performance**: Verify market regime detection is working correctly
|
||||
4. **Memory Usage**: Monitor volume history buffer size
|
||||
|
||||
### Debug Information
|
||||
```python
|
||||
# Get detailed strategy state
|
||||
state = strategy.get_state_summary()
|
||||
print(f"Strategy State: {state}")
|
||||
|
||||
# Check current incomplete bar
|
||||
current_bar = strategy.get_current_incomplete_bar()
|
||||
if current_bar:
|
||||
print(f"Current Bar: {current_bar}")
|
||||
|
||||
# Monitor regime changes
|
||||
print(f"Market Regime: {state.get('market_regime')}")
|
||||
print(f"BB Width: {state.get('bb_width'):.4f} (threshold: {strategy.bb_width_threshold})")
|
||||
```
|
||||
@ -1,470 +0,0 @@
|
||||
# MetaTrend Strategy Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
The `IncMetaTrendStrategy` implements a sophisticated trend-following strategy using multiple Supertrend indicators to determine market direction. It generates entry/exit signals based on meta-trend changes, providing robust trend detection with reduced false signals.
|
||||
|
||||
## Class: `IncMetaTrendStrategy`
|
||||
|
||||
### Purpose
|
||||
- **Trend Detection**: Uses 3 Supertrend indicators to identify strong trends
|
||||
- **Meta-trend Analysis**: Combines multiple timeframes for robust signal generation
|
||||
- **Real-time Processing**: Processes minute-level data with configurable timeframe aggregation
|
||||
|
||||
### Key Features
|
||||
- **Multi-Supertrend Analysis**: 3 Supertrend indicators with different parameters
|
||||
- **Meta-trend Logic**: Signals only when all indicators agree
|
||||
- **High Accuracy**: 98.5% accuracy vs corrected original implementation
|
||||
- **Fast Processing**: <1ms updates, sub-millisecond signal generation
|
||||
|
||||
## Strategy Logic
|
||||
|
||||
### Supertrend Configuration
|
||||
```python
|
||||
supertrend_configs = [
|
||||
(12, 3.0), # period=12, multiplier=3.0 (Conservative)
|
||||
(10, 1.0), # period=10, multiplier=1.0 (Sensitive)
|
||||
(11, 2.0) # period=11, multiplier=2.0 (Balanced)
|
||||
]
|
||||
```
|
||||
|
||||
### Meta-trend Calculation
|
||||
- **Meta-trend = 1**: All 3 Supertrends indicate uptrend (BUY condition)
|
||||
- **Meta-trend = -1**: All 3 Supertrends indicate downtrend (SELL condition)
|
||||
- **Meta-trend = 0**: Supertrends disagree (NEUTRAL - no action)
|
||||
|
||||
### Signal Generation
|
||||
- **Entry Signal**: Meta-trend changes from != 1 to == 1
|
||||
- **Exit Signal**: Meta-trend changes from != -1 to == -1
|
||||
|
||||
## Configuration Parameters
|
||||
|
||||
```python
|
||||
params = {
|
||||
"timeframe": "15min", # Primary analysis timeframe
|
||||
"enable_logging": False, # Enable detailed logging
|
||||
"buffer_size_multiplier": 2.0 # Memory management multiplier
|
||||
}
|
||||
```
|
||||
|
||||
## Real-time Usage Example
|
||||
|
||||
### Basic Implementation
|
||||
|
||||
```python
|
||||
from cycles.IncStrategies.metatrend_strategy import IncMetaTrendStrategy
|
||||
import pandas as pd
|
||||
from datetime import datetime, timedelta
|
||||
import random
|
||||
|
||||
# Initialize MetaTrend strategy
|
||||
strategy = IncMetaTrendStrategy(
|
||||
name="metatrend",
|
||||
weight=1.0,
|
||||
params={
|
||||
"timeframe": "15min", # 15-minute analysis
|
||||
"enable_logging": True # Enable detailed logging
|
||||
}
|
||||
)
|
||||
|
||||
# Simulate real-time minute data stream
|
||||
def simulate_market_data():
|
||||
"""Generate realistic market data with trends"""
|
||||
base_price = 50000.0 # Starting price (e.g., BTC)
|
||||
timestamp = datetime.now()
|
||||
trend_direction = 1 # 1 for up, -1 for down
|
||||
trend_strength = 0.001 # Trend strength
|
||||
|
||||
while True:
|
||||
# Add trend and noise
|
||||
trend_move = trend_direction * trend_strength * base_price
|
||||
noise = (random.random() - 0.5) * 0.002 * base_price # ±0.2% noise
|
||||
price_change = trend_move + noise
|
||||
|
||||
close = base_price + price_change
|
||||
high = close + random.random() * 0.001 * base_price
|
||||
low = close - random.random() * 0.001 * base_price
|
||||
open_price = base_price
|
||||
volume = random.randint(100, 1000)
|
||||
|
||||
# Occasionally change trend direction
|
||||
if random.random() < 0.01: # 1% chance per minute
|
||||
trend_direction *= -1
|
||||
print(f"📈 Trend direction changed to {'UP' if trend_direction > 0 else 'DOWN'}")
|
||||
|
||||
yield {
|
||||
'timestamp': timestamp,
|
||||
'open': open_price,
|
||||
'high': high,
|
||||
'low': low,
|
||||
'close': close,
|
||||
'volume': volume
|
||||
}
|
||||
|
||||
base_price = close
|
||||
timestamp += timedelta(minutes=1)
|
||||
|
||||
# Process real-time data
|
||||
print("🚀 Starting MetaTrend Strategy Real-time Processing...")
|
||||
print("📊 Waiting for 15-minute bars to form...")
|
||||
|
||||
for minute_data in simulate_market_data():
|
||||
# Strategy handles minute-to-15min aggregation automatically
|
||||
result = strategy.update_minute_data(
|
||||
timestamp=pd.Timestamp(minute_data['timestamp']),
|
||||
ohlcv_data=minute_data
|
||||
)
|
||||
|
||||
# Check if a complete 15-minute bar was formed
|
||||
if result is not None:
|
||||
current_price = minute_data['close']
|
||||
timestamp = minute_data['timestamp']
|
||||
|
||||
print(f"\n⏰ Complete 15min bar at {timestamp}")
|
||||
print(f"💰 Price: ${current_price:,.2f}")
|
||||
|
||||
# Get current meta-trend state
|
||||
meta_trend = strategy.get_current_meta_trend()
|
||||
individual_trends = strategy.get_individual_supertrend_states()
|
||||
|
||||
print(f"📈 Meta-trend: {meta_trend}")
|
||||
print(f"🔍 Individual Supertrends: {[s['trend'] for s in individual_trends]}")
|
||||
|
||||
# Check for signals only if strategy is warmed up
|
||||
if strategy.is_warmed_up:
|
||||
entry_signal = strategy.get_entry_signal()
|
||||
exit_signal = strategy.get_exit_signal()
|
||||
|
||||
# Process entry signals
|
||||
if entry_signal.signal_type == "ENTRY":
|
||||
print(f"🟢 ENTRY SIGNAL GENERATED!")
|
||||
print(f" 💪 Confidence: {entry_signal.confidence:.2f}")
|
||||
print(f" 💵 Price: ${entry_signal.price:,.2f}")
|
||||
print(f" 📊 Meta-trend: {entry_signal.metadata.get('meta_trend')}")
|
||||
print(f" 🎯 All Supertrends aligned for UPTREND")
|
||||
# execute_buy_order(entry_signal)
|
||||
|
||||
# Process exit signals
|
||||
if exit_signal.signal_type == "EXIT":
|
||||
print(f"🔴 EXIT SIGNAL GENERATED!")
|
||||
print(f" 💪 Confidence: {exit_signal.confidence:.2f}")
|
||||
print(f" 💵 Price: ${exit_signal.price:,.2f}")
|
||||
print(f" 📊 Meta-trend: {exit_signal.metadata.get('meta_trend')}")
|
||||
print(f" 🎯 All Supertrends aligned for DOWNTREND")
|
||||
# execute_sell_order(exit_signal)
|
||||
else:
|
||||
warmup_progress = len(strategy._meta_trend_history)
|
||||
min_required = max(strategy.get_minimum_buffer_size().values())
|
||||
print(f"🔄 Warming up... ({warmup_progress}/{min_required} bars)")
|
||||
```
|
||||
|
||||
### Advanced Trading System Integration
|
||||
|
||||
```python
|
||||
class MetaTrendTradingSystem:
|
||||
def __init__(self, initial_capital=10000):
|
||||
self.strategy = IncMetaTrendStrategy(
|
||||
name="metatrend_live",
|
||||
weight=1.0,
|
||||
params={
|
||||
"timeframe": "15min",
|
||||
"enable_logging": False # Disable for production
|
||||
}
|
||||
)
|
||||
|
||||
self.capital = initial_capital
|
||||
self.position = None
|
||||
self.trades = []
|
||||
self.equity_curve = []
|
||||
|
||||
def process_market_data(self, timestamp, ohlcv_data):
|
||||
"""Process incoming market data and manage positions"""
|
||||
# Update strategy
|
||||
result = self.strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
|
||||
if result is not None and self.strategy.is_warmed_up:
|
||||
self._check_signals(timestamp, ohlcv_data['close'])
|
||||
self._update_equity(timestamp, ohlcv_data['close'])
|
||||
|
||||
def _check_signals(self, timestamp, current_price):
|
||||
"""Check for trading signals and execute trades"""
|
||||
entry_signal = self.strategy.get_entry_signal()
|
||||
exit_signal = self.strategy.get_exit_signal()
|
||||
|
||||
# Handle entry signals
|
||||
if entry_signal.signal_type == "ENTRY" and self.position is None:
|
||||
self._execute_entry(timestamp, entry_signal)
|
||||
|
||||
# Handle exit signals
|
||||
if exit_signal.signal_type == "EXIT" and self.position is not None:
|
||||
self._execute_exit(timestamp, exit_signal)
|
||||
|
||||
def _execute_entry(self, timestamp, signal):
|
||||
"""Execute entry trade"""
|
||||
# Calculate position size (risk 2% of capital)
|
||||
risk_amount = self.capital * 0.02
|
||||
# Simple position sizing - could be more sophisticated
|
||||
shares = risk_amount / signal.price
|
||||
|
||||
self.position = {
|
||||
'entry_time': timestamp,
|
||||
'entry_price': signal.price,
|
||||
'shares': shares,
|
||||
'confidence': signal.confidence,
|
||||
'meta_trend': signal.metadata.get('meta_trend'),
|
||||
'individual_trends': signal.metadata.get('individual_trends', [])
|
||||
}
|
||||
|
||||
print(f"🟢 LONG POSITION OPENED")
|
||||
print(f" 📅 Time: {timestamp}")
|
||||
print(f" 💵 Price: ${signal.price:,.2f}")
|
||||
print(f" 📊 Shares: {shares:.4f}")
|
||||
print(f" 💪 Confidence: {signal.confidence:.2f}")
|
||||
print(f" 📈 Meta-trend: {self.position['meta_trend']}")
|
||||
|
||||
def _execute_exit(self, timestamp, signal):
|
||||
"""Execute exit trade"""
|
||||
if self.position:
|
||||
# Calculate P&L
|
||||
pnl = (signal.price - self.position['entry_price']) * self.position['shares']
|
||||
pnl_percent = (pnl / (self.position['entry_price'] * self.position['shares'])) * 100
|
||||
|
||||
# Update capital
|
||||
self.capital += pnl
|
||||
|
||||
# Record trade
|
||||
trade = {
|
||||
'entry_time': self.position['entry_time'],
|
||||
'exit_time': timestamp,
|
||||
'entry_price': self.position['entry_price'],
|
||||
'exit_price': signal.price,
|
||||
'shares': self.position['shares'],
|
||||
'pnl': pnl,
|
||||
'pnl_percent': pnl_percent,
|
||||
'duration': timestamp - self.position['entry_time'],
|
||||
'entry_confidence': self.position['confidence'],
|
||||
'exit_confidence': signal.confidence
|
||||
}
|
||||
|
||||
self.trades.append(trade)
|
||||
|
||||
print(f"🔴 LONG POSITION CLOSED")
|
||||
print(f" 📅 Time: {timestamp}")
|
||||
print(f" 💵 Exit Price: ${signal.price:,.2f}")
|
||||
print(f" 💰 P&L: ${pnl:,.2f} ({pnl_percent:+.2f}%)")
|
||||
print(f" ⏱️ Duration: {trade['duration']}")
|
||||
print(f" 💼 New Capital: ${self.capital:,.2f}")
|
||||
|
||||
self.position = None
|
||||
|
||||
def _update_equity(self, timestamp, current_price):
|
||||
"""Update equity curve"""
|
||||
if self.position:
|
||||
unrealized_pnl = (current_price - self.position['entry_price']) * self.position['shares']
|
||||
current_equity = self.capital + unrealized_pnl
|
||||
else:
|
||||
current_equity = self.capital
|
||||
|
||||
self.equity_curve.append({
|
||||
'timestamp': timestamp,
|
||||
'equity': current_equity,
|
||||
'position': self.position is not None
|
||||
})
|
||||
|
||||
def get_performance_summary(self):
|
||||
"""Get trading performance summary"""
|
||||
if not self.trades:
|
||||
return {"message": "No completed trades yet"}
|
||||
|
||||
trades_df = pd.DataFrame(self.trades)
|
||||
|
||||
total_trades = len(trades_df)
|
||||
winning_trades = len(trades_df[trades_df['pnl'] > 0])
|
||||
losing_trades = len(trades_df[trades_df['pnl'] < 0])
|
||||
win_rate = (winning_trades / total_trades) * 100
|
||||
|
||||
total_pnl = trades_df['pnl'].sum()
|
||||
avg_win = trades_df[trades_df['pnl'] > 0]['pnl'].mean() if winning_trades > 0 else 0
|
||||
avg_loss = trades_df[trades_df['pnl'] < 0]['pnl'].mean() if losing_trades > 0 else 0
|
||||
|
||||
return {
|
||||
'total_trades': total_trades,
|
||||
'winning_trades': winning_trades,
|
||||
'losing_trades': losing_trades,
|
||||
'win_rate': win_rate,
|
||||
'total_pnl': total_pnl,
|
||||
'avg_win': avg_win,
|
||||
'avg_loss': avg_loss,
|
||||
'profit_factor': abs(avg_win / avg_loss) if avg_loss != 0 else float('inf'),
|
||||
'final_capital': self.capital
|
||||
}
|
||||
|
||||
# Usage Example
|
||||
trading_system = MetaTrendTradingSystem(initial_capital=10000)
|
||||
|
||||
print("🚀 MetaTrend Trading System Started")
|
||||
print("💰 Initial Capital: $10,000")
|
||||
|
||||
# Simulate live trading
|
||||
for market_data in simulate_market_data():
|
||||
trading_system.process_market_data(
|
||||
timestamp=pd.Timestamp(market_data['timestamp']),
|
||||
ohlcv_data=market_data
|
||||
)
|
||||
|
||||
# Print performance summary every 100 bars
|
||||
if len(trading_system.equity_curve) % 100 == 0 and trading_system.trades:
|
||||
performance = trading_system.get_performance_summary()
|
||||
print(f"\n📊 Performance Summary (after {len(trading_system.equity_curve)} bars):")
|
||||
print(f" 💼 Capital: ${performance['final_capital']:,.2f}")
|
||||
print(f" 📈 Total Trades: {performance['total_trades']}")
|
||||
print(f" 🎯 Win Rate: {performance['win_rate']:.1f}%")
|
||||
print(f" 💰 Total P&L: ${performance['total_pnl']:,.2f}")
|
||||
```
|
||||
|
||||
### Backtesting Example
|
||||
|
||||
```python
|
||||
def backtest_metatrend_strategy(historical_data, timeframe="15min"):
|
||||
"""Comprehensive backtesting of MetaTrend strategy"""
|
||||
|
||||
strategy = IncMetaTrendStrategy(
|
||||
name="metatrend_backtest",
|
||||
weight=1.0,
|
||||
params={
|
||||
"timeframe": timeframe,
|
||||
"enable_logging": False
|
||||
}
|
||||
)
|
||||
|
||||
signals = []
|
||||
trades = []
|
||||
current_position = None
|
||||
|
||||
print(f"🔄 Backtesting MetaTrend Strategy on {timeframe} timeframe...")
|
||||
print(f"📊 Data period: {historical_data.index[0]} to {historical_data.index[-1]}")
|
||||
|
||||
# Process historical data
|
||||
for timestamp, row in historical_data.iterrows():
|
||||
ohlcv_data = {
|
||||
'open': row['open'],
|
||||
'high': row['high'],
|
||||
'low': row['low'],
|
||||
'close': row['close'],
|
||||
'volume': row['volume']
|
||||
}
|
||||
|
||||
# Update strategy
|
||||
result = strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
|
||||
if result is not None and strategy.is_warmed_up:
|
||||
entry_signal = strategy.get_entry_signal()
|
||||
exit_signal = strategy.get_exit_signal()
|
||||
|
||||
# Record entry signals
|
||||
if entry_signal.signal_type == "ENTRY":
|
||||
signals.append({
|
||||
'timestamp': timestamp,
|
||||
'type': 'ENTRY',
|
||||
'price': entry_signal.price,
|
||||
'confidence': entry_signal.confidence,
|
||||
'meta_trend': entry_signal.metadata.get('meta_trend')
|
||||
})
|
||||
|
||||
# Open position if none exists
|
||||
if current_position is None:
|
||||
current_position = {
|
||||
'entry_time': timestamp,
|
||||
'entry_price': entry_signal.price,
|
||||
'confidence': entry_signal.confidence
|
||||
}
|
||||
|
||||
# Record exit signals
|
||||
if exit_signal.signal_type == "EXIT":
|
||||
signals.append({
|
||||
'timestamp': timestamp,
|
||||
'type': 'EXIT',
|
||||
'price': exit_signal.price,
|
||||
'confidence': exit_signal.confidence,
|
||||
'meta_trend': exit_signal.metadata.get('meta_trend')
|
||||
})
|
||||
|
||||
# Close position if exists
|
||||
if current_position is not None:
|
||||
pnl = exit_signal.price - current_position['entry_price']
|
||||
pnl_percent = (pnl / current_position['entry_price']) * 100
|
||||
|
||||
trades.append({
|
||||
'entry_time': current_position['entry_time'],
|
||||
'exit_time': timestamp,
|
||||
'entry_price': current_position['entry_price'],
|
||||
'exit_price': exit_signal.price,
|
||||
'pnl': pnl,
|
||||
'pnl_percent': pnl_percent,
|
||||
'duration': timestamp - current_position['entry_time'],
|
||||
'entry_confidence': current_position['confidence'],
|
||||
'exit_confidence': exit_signal.confidence
|
||||
})
|
||||
|
||||
current_position = None
|
||||
|
||||
# Convert to DataFrames for analysis
|
||||
signals_df = pd.DataFrame(signals)
|
||||
trades_df = pd.DataFrame(trades)
|
||||
|
||||
# Calculate performance metrics
|
||||
if len(trades_df) > 0:
|
||||
total_trades = len(trades_df)
|
||||
winning_trades = len(trades_df[trades_df['pnl'] > 0])
|
||||
win_rate = (winning_trades / total_trades) * 100
|
||||
total_return = trades_df['pnl_percent'].sum()
|
||||
avg_return = trades_df['pnl_percent'].mean()
|
||||
max_win = trades_df['pnl_percent'].max()
|
||||
max_loss = trades_df['pnl_percent'].min()
|
||||
|
||||
print(f"\n📊 Backtest Results:")
|
||||
print(f" 📈 Total Signals: {len(signals_df)}")
|
||||
print(f" 💼 Total Trades: {total_trades}")
|
||||
print(f" 🎯 Win Rate: {win_rate:.1f}%")
|
||||
print(f" 💰 Total Return: {total_return:.2f}%")
|
||||
print(f" 📊 Average Return: {avg_return:.2f}%")
|
||||
print(f" 🚀 Max Win: {max_win:.2f}%")
|
||||
print(f" 📉 Max Loss: {max_loss:.2f}%")
|
||||
|
||||
return signals_df, trades_df
|
||||
else:
|
||||
print("❌ No completed trades in backtest period")
|
||||
return signals_df, pd.DataFrame()
|
||||
|
||||
# Run backtest (example)
|
||||
# historical_data = pd.read_csv('btc_1min_data.csv', index_col='timestamp', parse_dates=True)
|
||||
# signals, trades = backtest_metatrend_strategy(historical_data, timeframe="15min")
|
||||
```
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Timing Benchmarks
|
||||
- **Update Time**: <1ms per 15-minute bar
|
||||
- **Signal Generation**: <0.5ms per signal
|
||||
- **Memory Usage**: ~5MB constant
|
||||
- **Accuracy**: 98.5% vs original implementation
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
1. **No Signals**: Check if strategy is warmed up (needs ~50+ bars)
|
||||
2. **Conflicting Trends**: Normal behavior - wait for alignment
|
||||
3. **Late Signals**: Meta-trend prioritizes accuracy over speed
|
||||
4. **Memory Usage**: Monitor buffer sizes in long-running systems
|
||||
|
||||
### Debug Information
|
||||
```python
|
||||
# Get detailed strategy state
|
||||
state = strategy.get_current_state_summary()
|
||||
print(f"Strategy State: {state}")
|
||||
|
||||
# Get meta-trend history
|
||||
history = strategy.get_meta_trend_history(limit=10)
|
||||
for entry in history:
|
||||
print(f"{entry['timestamp']}: Meta-trend={entry['meta_trend']}, Trends={entry['individual_trends']}")
|
||||
```
|
||||
@ -1,342 +0,0 @@
|
||||
# RandomStrategy Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
The `IncRandomStrategy` is a testing strategy that generates random entry and exit signals with configurable probability and confidence levels. It's designed to test the incremental strategy framework and signal processing system while providing a baseline for performance comparisons.
|
||||
|
||||
## Class: `IncRandomStrategy`
|
||||
|
||||
### Purpose
|
||||
- **Testing Framework**: Validates incremental strategy system functionality
|
||||
- **Performance Baseline**: Provides minimal processing overhead for benchmarking
|
||||
- **Signal Testing**: Tests signal generation and processing pipelines
|
||||
|
||||
### Key Features
|
||||
- **Minimal Processing**: Extremely fast updates (0.006ms)
|
||||
- **Configurable Randomness**: Adjustable signal probabilities and confidence levels
|
||||
- **Reproducible Results**: Optional random seed for consistent testing
|
||||
- **Real-time Compatible**: Processes minute-level data with timeframe aggregation
|
||||
|
||||
## Configuration Parameters
|
||||
|
||||
```python
|
||||
params = {
|
||||
"entry_probability": 0.05, # 5% chance of entry signal per bar
|
||||
"exit_probability": 0.1, # 10% chance of exit signal per bar
|
||||
"min_confidence": 0.6, # Minimum signal confidence
|
||||
"max_confidence": 0.9, # Maximum signal confidence
|
||||
"timeframe": "1min", # Operating timeframe
|
||||
"signal_frequency": 1, # Signal every N bars
|
||||
"random_seed": 42 # Optional seed for reproducibility
|
||||
}
|
||||
```
|
||||
|
||||
## Real-time Usage Example
|
||||
|
||||
### Basic Implementation
|
||||
|
||||
```python
|
||||
from cycles.IncStrategies.random_strategy import IncRandomStrategy
|
||||
import pandas as pd
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
# Initialize strategy
|
||||
strategy = IncRandomStrategy(
|
||||
weight=1.0,
|
||||
params={
|
||||
"entry_probability": 0.1, # 10% chance per bar
|
||||
"exit_probability": 0.15, # 15% chance per bar
|
||||
"min_confidence": 0.7,
|
||||
"max_confidence": 0.9,
|
||||
"timeframe": "5min", # 5-minute bars
|
||||
"signal_frequency": 3, # Signal every 3 bars
|
||||
"random_seed": 42 # Reproducible for testing
|
||||
}
|
||||
)
|
||||
|
||||
# Simulate real-time minute data stream
|
||||
def simulate_live_data():
|
||||
"""Simulate live minute-level OHLCV data"""
|
||||
base_price = 100.0
|
||||
timestamp = datetime.now()
|
||||
|
||||
while True:
|
||||
# Generate realistic OHLCV data
|
||||
price_change = (random.random() - 0.5) * 2 # ±1 price movement
|
||||
close = base_price + price_change
|
||||
high = close + random.random() * 0.5
|
||||
low = close - random.random() * 0.5
|
||||
open_price = base_price
|
||||
volume = random.randint(1000, 5000)
|
||||
|
||||
yield {
|
||||
'timestamp': timestamp,
|
||||
'open': open_price,
|
||||
'high': high,
|
||||
'low': low,
|
||||
'close': close,
|
||||
'volume': volume
|
||||
}
|
||||
|
||||
base_price = close
|
||||
timestamp += timedelta(minutes=1)
|
||||
|
||||
# Process real-time data
|
||||
for minute_data in simulate_live_data():
|
||||
# Strategy handles timeframe aggregation (1min -> 5min)
|
||||
result = strategy.update_minute_data(
|
||||
timestamp=pd.Timestamp(minute_data['timestamp']),
|
||||
ohlcv_data=minute_data
|
||||
)
|
||||
|
||||
# Check if a complete 5-minute bar was formed
|
||||
if result is not None:
|
||||
print(f"Complete 5min bar at {minute_data['timestamp']}")
|
||||
|
||||
# Get signals
|
||||
entry_signal = strategy.get_entry_signal()
|
||||
exit_signal = strategy.get_exit_signal()
|
||||
|
||||
# Process entry signals
|
||||
if entry_signal.signal_type == "ENTRY":
|
||||
print(f"🟢 ENTRY Signal - Confidence: {entry_signal.confidence:.2f}")
|
||||
print(f" Price: ${entry_signal.price:.2f}")
|
||||
print(f" Metadata: {entry_signal.metadata}")
|
||||
# execute_buy_order(entry_signal)
|
||||
|
||||
# Process exit signals
|
||||
if exit_signal.signal_type == "EXIT":
|
||||
print(f"🔴 EXIT Signal - Confidence: {exit_signal.confidence:.2f}")
|
||||
print(f" Price: ${exit_signal.price:.2f}")
|
||||
print(f" Metadata: {exit_signal.metadata}")
|
||||
# execute_sell_order(exit_signal)
|
||||
|
||||
# Monitor strategy state
|
||||
if strategy.is_warmed_up:
|
||||
state = strategy.get_current_state_summary()
|
||||
print(f"Strategy State: {state}")
|
||||
```
|
||||
|
||||
### Integration with Trading System
|
||||
|
||||
```python
|
||||
class LiveTradingSystem:
|
||||
def __init__(self):
|
||||
self.strategy = IncRandomStrategy(
|
||||
weight=1.0,
|
||||
params={
|
||||
"entry_probability": 0.08,
|
||||
"exit_probability": 0.12,
|
||||
"min_confidence": 0.75,
|
||||
"max_confidence": 0.95,
|
||||
"timeframe": "15min",
|
||||
"random_seed": None # True randomness for live trading
|
||||
}
|
||||
)
|
||||
self.position = None
|
||||
self.orders = []
|
||||
|
||||
def process_market_data(self, timestamp, ohlcv_data):
|
||||
"""Process incoming market data"""
|
||||
# Update strategy with new data
|
||||
result = self.strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
|
||||
if result is not None: # Complete timeframe bar
|
||||
self._check_signals()
|
||||
|
||||
def _check_signals(self):
|
||||
"""Check for trading signals"""
|
||||
entry_signal = self.strategy.get_entry_signal()
|
||||
exit_signal = self.strategy.get_exit_signal()
|
||||
|
||||
# Handle entry signals
|
||||
if entry_signal.signal_type == "ENTRY" and self.position is None:
|
||||
self._execute_entry(entry_signal)
|
||||
|
||||
# Handle exit signals
|
||||
if exit_signal.signal_type == "EXIT" and self.position is not None:
|
||||
self._execute_exit(exit_signal)
|
||||
|
||||
def _execute_entry(self, signal):
|
||||
"""Execute entry order"""
|
||||
order = {
|
||||
'type': 'BUY',
|
||||
'price': signal.price,
|
||||
'confidence': signal.confidence,
|
||||
'timestamp': signal.metadata.get('timestamp'),
|
||||
'strategy': 'random'
|
||||
}
|
||||
|
||||
print(f"Executing BUY order: {order}")
|
||||
self.orders.append(order)
|
||||
self.position = order
|
||||
|
||||
def _execute_exit(self, signal):
|
||||
"""Execute exit order"""
|
||||
if self.position:
|
||||
order = {
|
||||
'type': 'SELL',
|
||||
'price': signal.price,
|
||||
'confidence': signal.confidence,
|
||||
'timestamp': signal.metadata.get('timestamp'),
|
||||
'entry_price': self.position['price'],
|
||||
'pnl': signal.price - self.position['price']
|
||||
}
|
||||
|
||||
print(f"Executing SELL order: {order}")
|
||||
self.orders.append(order)
|
||||
self.position = None
|
||||
|
||||
# Usage
|
||||
trading_system = LiveTradingSystem()
|
||||
|
||||
# Connect to live data feed
|
||||
for market_tick in live_market_feed:
|
||||
trading_system.process_market_data(
|
||||
timestamp=market_tick['timestamp'],
|
||||
ohlcv_data=market_tick
|
||||
)
|
||||
```
|
||||
|
||||
### Backtesting Example
|
||||
|
||||
```python
|
||||
import pandas as pd
|
||||
|
||||
def backtest_random_strategy(historical_data):
|
||||
"""Backtest RandomStrategy on historical data"""
|
||||
|
||||
strategy = IncRandomStrategy(
|
||||
weight=1.0,
|
||||
params={
|
||||
"entry_probability": 0.05,
|
||||
"exit_probability": 0.08,
|
||||
"min_confidence": 0.8,
|
||||
"max_confidence": 0.95,
|
||||
"timeframe": "1h",
|
||||
"random_seed": 123 # Reproducible results
|
||||
}
|
||||
)
|
||||
|
||||
signals = []
|
||||
positions = []
|
||||
current_position = None
|
||||
|
||||
# Process historical data
|
||||
for timestamp, row in historical_data.iterrows():
|
||||
ohlcv_data = {
|
||||
'open': row['open'],
|
||||
'high': row['high'],
|
||||
'low': row['low'],
|
||||
'close': row['close'],
|
||||
'volume': row['volume']
|
||||
}
|
||||
|
||||
# Update strategy (assuming data is already in target timeframe)
|
||||
result = strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
|
||||
if result is not None and strategy.is_warmed_up:
|
||||
entry_signal = strategy.get_entry_signal()
|
||||
exit_signal = strategy.get_exit_signal()
|
||||
|
||||
# Record signals
|
||||
if entry_signal.signal_type == "ENTRY":
|
||||
signals.append({
|
||||
'timestamp': timestamp,
|
||||
'type': 'ENTRY',
|
||||
'price': entry_signal.price,
|
||||
'confidence': entry_signal.confidence
|
||||
})
|
||||
|
||||
if current_position is None:
|
||||
current_position = {
|
||||
'entry_time': timestamp,
|
||||
'entry_price': entry_signal.price,
|
||||
'confidence': entry_signal.confidence
|
||||
}
|
||||
|
||||
if exit_signal.signal_type == "EXIT" and current_position:
|
||||
signals.append({
|
||||
'timestamp': timestamp,
|
||||
'type': 'EXIT',
|
||||
'price': exit_signal.price,
|
||||
'confidence': exit_signal.confidence
|
||||
})
|
||||
|
||||
# Close position
|
||||
pnl = exit_signal.price - current_position['entry_price']
|
||||
positions.append({
|
||||
'entry_time': current_position['entry_time'],
|
||||
'exit_time': timestamp,
|
||||
'entry_price': current_position['entry_price'],
|
||||
'exit_price': exit_signal.price,
|
||||
'pnl': pnl,
|
||||
'duration': timestamp - current_position['entry_time']
|
||||
})
|
||||
current_position = None
|
||||
|
||||
return pd.DataFrame(signals), pd.DataFrame(positions)
|
||||
|
||||
# Run backtest
|
||||
# historical_data = pd.read_csv('historical_data.csv', index_col='timestamp', parse_dates=True)
|
||||
# signals_df, positions_df = backtest_random_strategy(historical_data)
|
||||
# print(f"Generated {len(signals_df)} signals and {len(positions_df)} completed trades")
|
||||
```
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Timing Benchmarks
|
||||
- **Update Time**: ~0.006ms per data point
|
||||
- **Signal Generation**: ~0.048ms per signal
|
||||
- **Memory Usage**: <1MB constant
|
||||
- **Throughput**: >100,000 updates/second
|
||||
|
||||
## Testing and Validation
|
||||
|
||||
### Unit Tests
|
||||
```python
|
||||
def test_random_strategy():
|
||||
"""Test RandomStrategy functionality"""
|
||||
strategy = IncRandomStrategy(
|
||||
params={
|
||||
"entry_probability": 1.0, # Always generate signals
|
||||
"exit_probability": 1.0,
|
||||
"random_seed": 42
|
||||
}
|
||||
)
|
||||
|
||||
# Test data
|
||||
test_data = {
|
||||
'open': 100.0,
|
||||
'high': 101.0,
|
||||
'low': 99.0,
|
||||
'close': 100.5,
|
||||
'volume': 1000
|
||||
}
|
||||
|
||||
timestamp = pd.Timestamp('2024-01-01 10:00:00')
|
||||
|
||||
# Process data
|
||||
result = strategy.update_minute_data(timestamp, test_data)
|
||||
|
||||
# Verify signals
|
||||
entry_signal = strategy.get_entry_signal()
|
||||
exit_signal = strategy.get_exit_signal()
|
||||
|
||||
assert entry_signal.signal_type == "ENTRY"
|
||||
assert exit_signal.signal_type == "EXIT"
|
||||
assert 0.6 <= entry_signal.confidence <= 0.9
|
||||
assert 0.6 <= exit_signal.confidence <= 0.9
|
||||
|
||||
# Run test
|
||||
test_random_strategy()
|
||||
print("✅ RandomStrategy tests passed")
|
||||
```
|
||||
|
||||
## Use Cases
|
||||
|
||||
1. **Framework Testing**: Validate incremental strategy system
|
||||
2. **Performance Benchmarking**: Baseline for strategy comparison
|
||||
3. **Signal Pipeline Testing**: Test signal processing and execution
|
||||
4. **Load Testing**: High-frequency signal generation testing
|
||||
5. **Integration Testing**: Verify trading system integration
|
||||
@ -1,520 +0,0 @@
|
||||
# Real-Time Strategy Implementation Plan - Option 1: Incremental Calculation Architecture
|
||||
|
||||
## Implementation Overview
|
||||
|
||||
This document outlines the step-by-step implementation plan for updating the trading strategy system to support real-time data processing with incremental calculations. The implementation is divided into phases to ensure stability and backward compatibility.
|
||||
|
||||
## Phase 1: Foundation and Base Classes (Week 1-2) ✅ COMPLETED
|
||||
|
||||
### 1.1 Create Indicator State Classes ✅ COMPLETED
|
||||
**Priority: HIGH**
|
||||
**Files created:**
|
||||
- `cycles/IncStrategies/indicators/`
|
||||
- `__init__.py` ✅
|
||||
- `base.py` - Base IndicatorState class ✅
|
||||
- `moving_average.py` - MovingAverageState ✅
|
||||
- `rsi.py` - RSIState ✅
|
||||
- `supertrend.py` - SupertrendState ✅
|
||||
- `bollinger_bands.py` - BollingerBandsState ✅
|
||||
- `atr.py` - ATRState (for Supertrend) ✅
|
||||
|
||||
**Tasks:**
|
||||
- [x] Create `IndicatorState` abstract base class
|
||||
- [x] Implement `MovingAverageState` with incremental calculation
|
||||
- [x] Implement `RSIState` with incremental calculation
|
||||
- [x] Implement `ATRState` for Supertrend calculations
|
||||
- [x] Implement `SupertrendState` with incremental calculation
|
||||
- [x] Implement `BollingerBandsState` with incremental calculation
|
||||
- [x] Add comprehensive unit tests for each indicator state ✅
|
||||
- [x] Validate accuracy against traditional batch calculations ✅
|
||||
|
||||
**Acceptance Criteria:**
|
||||
- ✅ All indicator states produce identical results to batch calculations (within 0.01% tolerance)
|
||||
- ✅ Memory usage is constant regardless of data length
|
||||
- ✅ Update time is <0.1ms per data point
|
||||
- ✅ All indicators handle edge cases (NaN, zero values, etc.)
|
||||
|
||||
### 1.2 Update Base Strategy Class ✅ COMPLETED
|
||||
**Priority: HIGH**
|
||||
**Files created:**
|
||||
- `cycles/IncStrategies/base.py` ✅
|
||||
|
||||
**Tasks:**
|
||||
- [x] Add new abstract methods to `IncStrategyBase`:
|
||||
- `get_minimum_buffer_size()`
|
||||
- `calculate_on_data()`
|
||||
- `supports_incremental_calculation()`
|
||||
- [x] Add new properties:
|
||||
- `calculation_mode`
|
||||
- `is_warmed_up`
|
||||
- [x] Add internal state management:
|
||||
- `_calculation_mode`
|
||||
- `_is_warmed_up`
|
||||
- `_data_points_received`
|
||||
- `_timeframe_buffers`
|
||||
- `_timeframe_last_update`
|
||||
- `_indicator_states`
|
||||
- `_last_signals`
|
||||
- `_signal_history`
|
||||
- [x] Implement buffer management methods:
|
||||
- `_update_timeframe_buffers()`
|
||||
- `_should_update_timeframe()`
|
||||
- `_get_timeframe_buffer()`
|
||||
- [x] Add error handling and recovery methods:
|
||||
- `_validate_calculation_state()`
|
||||
- `_recover_from_state_corruption()`
|
||||
- `handle_data_gap()`
|
||||
- [x] Provide default implementations for backward compatibility
|
||||
|
||||
**Acceptance Criteria:**
|
||||
- ✅ Existing strategies continue to work without modification (compatibility layer)
|
||||
- ✅ New interface is fully documented
|
||||
- ✅ Buffer management is memory-efficient
|
||||
- ✅ Error recovery mechanisms are robust
|
||||
|
||||
### 1.3 Create Configuration System ✅ COMPLETED
|
||||
**Priority: MEDIUM**
|
||||
**Files created:**
|
||||
- Configuration integrated into base classes ✅
|
||||
|
||||
**Tasks:**
|
||||
- [x] Define strategy configuration dataclass (integrated into base class)
|
||||
- [x] Add incremental calculation settings
|
||||
- [x] Add buffer size configuration
|
||||
- [x] Add performance monitoring settings
|
||||
- [x] Add error handling configuration
|
||||
|
||||
## Phase 2: Strategy Implementation (Week 3-4) ✅ COMPLETED
|
||||
|
||||
### 2.1 Update RandomStrategy (Simplest) ✅ COMPLETED
|
||||
**Priority: HIGH**
|
||||
**Files created:**
|
||||
- `cycles/IncStrategies/random_strategy.py` ✅
|
||||
- `cycles/IncStrategies/test_random_strategy.py` ✅
|
||||
|
||||
**Tasks:**
|
||||
- [x] Implement `get_minimum_buffer_size()` (return {"1min": 1})
|
||||
- [x] Implement `calculate_on_data()` (minimal processing)
|
||||
- [x] Implement `supports_incremental_calculation()` (return True)
|
||||
- [x] Update signal generation to work without pre-calculated arrays
|
||||
- [x] Add comprehensive testing
|
||||
- [x] Validate against current implementation
|
||||
|
||||
**Acceptance Criteria:**
|
||||
- ✅ RandomStrategy works in both batch and incremental modes
|
||||
- ✅ Signal generation is identical between modes
|
||||
- ✅ Memory usage is minimal
|
||||
- ✅ Performance is optimal (0.006ms update, 0.048ms signal generation)
|
||||
|
||||
### 2.2 Update MetaTrend Strategy (Supertrend-based) ✅ COMPLETED
|
||||
**Priority: HIGH**
|
||||
**Files created:**
|
||||
- `cycles/IncStrategies/metatrend_strategy.py` ✅
|
||||
- `test_metatrend_comparison.py` ✅
|
||||
- `plot_original_vs_incremental.py` ✅
|
||||
|
||||
**Tasks:**
|
||||
- [x] Implement `get_minimum_buffer_size()` based on timeframe
|
||||
- [x] Implement `_initialize_indicator_states()` for three Supertrend indicators
|
||||
- [x] Implement `calculate_on_data()` with incremental Supertrend updates
|
||||
- [x] Update `get_entry_signal()` to work with current state instead of arrays
|
||||
- [x] Update `get_exit_signal()` to work with current state instead of arrays
|
||||
- [x] Implement meta-trend calculation from current Supertrend states
|
||||
- [x] Add state validation and recovery
|
||||
- [x] Comprehensive testing against current implementation
|
||||
- [x] Visual comparison plotting with signal analysis
|
||||
- [x] Bug discovery and validation in original DefaultStrategy
|
||||
|
||||
**Implementation Details:**
|
||||
- **SupertrendCollection**: Manages 3 Supertrend indicators with parameters (12,3.0), (10,1.0), (11,2.0)
|
||||
- **Meta-trend Logic**: Uptrend when all agree (+1), Downtrend when all agree (-1), Neutral otherwise (0)
|
||||
- **Signal Generation**: Entry on meta-trend change to +1, Exit on meta-trend change to -1
|
||||
- **Performance**: <1ms updates, 17 signals vs 106 (original buggy), mathematically accurate
|
||||
|
||||
**Testing Results:**
|
||||
- ✅ 98.5% accuracy vs corrected original strategy (99.5% vs buggy original)
|
||||
- ✅ Comprehensive visual comparison with 525,601 data points (2022-2023)
|
||||
- ✅ Bug discovery in original DefaultStrategy exit condition
|
||||
- ✅ Production-ready incremental implementation validated
|
||||
|
||||
**Acceptance Criteria:**
|
||||
- ✅ Supertrend calculations are identical to batch mode
|
||||
- ✅ Meta-trend logic produces correct signals (bug-free)
|
||||
- ✅ Memory usage is bounded by buffer size
|
||||
- ✅ Performance meets <1ms update target
|
||||
- ✅ Visual validation confirms correct behavior
|
||||
|
||||
### 2.3 Update BBRSStrategy (Bollinger Bands + RSI) ✅ COMPLETED
|
||||
**Priority: HIGH**
|
||||
**Files created:**
|
||||
- `cycles/IncStrategies/bbrs_incremental.py` ✅
|
||||
- `test_bbrs_incremental.py` ✅
|
||||
- `test_realtime_bbrs.py` ✅
|
||||
- `test_incremental_indicators.py` ✅
|
||||
|
||||
**Tasks:**
|
||||
- [x] Implement `get_minimum_buffer_size()` based on BB and RSI periods
|
||||
- [x] Implement `_initialize_indicator_states()` for BB, RSI, and market regime
|
||||
- [x] Implement `calculate_on_data()` with incremental indicator updates
|
||||
- [x] Update signal generation to work with current indicator states
|
||||
- [x] Implement market regime detection with incremental updates
|
||||
- [x] Add state validation and recovery
|
||||
- [x] Comprehensive testing against current implementation
|
||||
- [x] Add real-time minute-level data processing with timeframe aggregation
|
||||
- [x] Implement TimeframeAggregator for internal data aggregation
|
||||
- [x] Validate incremental indicators (BB, RSI) against original implementations
|
||||
- [x] Test real-time simulation with different timeframes (15min, 1h)
|
||||
- [x] Verify consistency between minute-level and pre-aggregated processing
|
||||
|
||||
**Implementation Details:**
|
||||
- **TimeframeAggregator**: Handles real-time aggregation of minute data to higher timeframes
|
||||
- **BBRSIncrementalState**: Complete incremental BBRS strategy with market regime detection
|
||||
- **Real-time Compatibility**: Accepts minute-level data, internally aggregates to configured timeframe
|
||||
- **Market Regime Logic**: Trending vs Sideways detection based on Bollinger Band width
|
||||
- **Signal Generation**: Regime-specific buy/sell logic with volume analysis
|
||||
- **Performance**: Constant memory usage, O(1) updates per data point
|
||||
|
||||
**Testing Results:**
|
||||
- ✅ Perfect accuracy (0.000000 difference) vs original implementation after warm-up
|
||||
- ✅ Real-time processing: 2,881 minutes → 192 15min bars (exact match)
|
||||
- ✅ Real-time processing: 2,881 minutes → 48 1h bars (exact match)
|
||||
- ✅ Incremental indicators validated: BB (perfect), RSI (0.04 mean difference after warm-up)
|
||||
- ✅ Signal generation: 95.45% match rate for buy/sell signals
|
||||
- ✅ Market regime detection working correctly
|
||||
- ✅ Visual comparison plots generated and validated
|
||||
|
||||
**Acceptance Criteria:**
|
||||
- ✅ BB and RSI calculations match batch mode exactly (after warm-up period)
|
||||
- ✅ Market regime detection works incrementally
|
||||
- ✅ Signal generation is identical between modes (95.45% match rate)
|
||||
- ✅ Performance meets targets (constant memory, fast updates)
|
||||
- ✅ Real-time minute-level data processing works correctly
|
||||
- ✅ Internal timeframe aggregation produces identical results to pre-aggregated data
|
||||
|
||||
## Phase 3: Strategy Manager Updates (Week 5) 📋 PENDING
|
||||
|
||||
### 3.1 Update StrategyManager
|
||||
**Priority: HIGH**
|
||||
**Files to create:**
|
||||
- `cycles/IncStrategies/manager.py`
|
||||
|
||||
**Tasks:**
|
||||
- [ ] Add `process_new_data()` method for coordinating incremental updates
|
||||
- [ ] Add buffer size calculation across all strategies
|
||||
- [ ] Add initialization mode detection and coordination
|
||||
- [ ] Update signal combination to work with incremental mode
|
||||
- [ ] Add performance monitoring and metrics collection
|
||||
- [ ] Add error handling for strategy failures
|
||||
- [ ] Add configuration management
|
||||
|
||||
**Acceptance Criteria:**
|
||||
- Manager coordinates multiple strategies efficiently
|
||||
- Buffer sizes are calculated correctly
|
||||
- Error handling is robust
|
||||
- Performance monitoring works
|
||||
|
||||
### 3.2 Add Performance Monitoring
|
||||
**Priority: MEDIUM**
|
||||
**Files to create:**
|
||||
- `cycles/IncStrategies/monitoring.py`
|
||||
|
||||
**Tasks:**
|
||||
- [ ] Create performance metrics collection
|
||||
- [ ] Add latency measurement
|
||||
- [ ] Add memory usage tracking
|
||||
- [ ] Add signal generation frequency tracking
|
||||
- [ ] Add error rate monitoring
|
||||
- [ ] Create performance reporting
|
||||
|
||||
## Phase 4: Integration and Testing (Week 6) 📋 PENDING
|
||||
|
||||
### 4.1 Update StrategyTrader Integration
|
||||
**Priority: HIGH**
|
||||
**Files to modify:**
|
||||
- `TraderFrontend/trader/strategy_trader.py`
|
||||
|
||||
**Tasks:**
|
||||
- [ ] Update `_process_strategies()` to use incremental mode
|
||||
- [ ] Add buffer management for real-time data
|
||||
- [ ] Update initialization to support incremental mode
|
||||
- [ ] Add performance monitoring integration
|
||||
- [ ] Add error recovery mechanisms
|
||||
- [ ] Update configuration handling
|
||||
|
||||
**Acceptance Criteria:**
|
||||
- Real-time trading works with incremental strategies
|
||||
- Performance is significantly improved
|
||||
- Memory usage is bounded
|
||||
- Error recovery works correctly
|
||||
|
||||
### 4.2 Update Backtesting Integration
|
||||
**Priority: MEDIUM**
|
||||
**Files to modify:**
|
||||
- `cycles/backtest.py`
|
||||
- `main.py`
|
||||
|
||||
**Tasks:**
|
||||
- [ ] Add support for incremental mode in backtesting
|
||||
- [ ] Maintain backward compatibility with batch mode
|
||||
- [ ] Add performance comparison between modes
|
||||
- [ ] Update configuration handling
|
||||
|
||||
**Acceptance Criteria:**
|
||||
- Backtesting works in both modes
|
||||
- Results are identical between modes
|
||||
- Performance comparison is available
|
||||
|
||||
### 4.3 Comprehensive Testing ✅ COMPLETED (MetaTrend)
|
||||
**Priority: HIGH**
|
||||
**Files created:**
|
||||
- `test_metatrend_comparison.py` ✅
|
||||
- `plot_original_vs_incremental.py` ✅
|
||||
- `SIGNAL_COMPARISON_SUMMARY.md` ✅
|
||||
|
||||
**Tasks:**
|
||||
- [x] Create unit tests for MetaTrend indicator states
|
||||
- [x] Create integration tests for MetaTrend strategy implementation
|
||||
- [x] Create performance benchmarks
|
||||
- [x] Create accuracy validation tests
|
||||
- [x] Create memory usage tests
|
||||
- [x] Create error recovery tests
|
||||
- [x] Create real-time simulation tests
|
||||
- [x] Create visual comparison and analysis tools
|
||||
- [ ] Extend testing to other strategies (BBRSStrategy, etc.)
|
||||
|
||||
**Acceptance Criteria:**
|
||||
- ✅ MetaTrend tests pass with 98.5% accuracy
|
||||
- ✅ Performance targets are met (<1ms updates)
|
||||
- ✅ Memory usage is within bounds
|
||||
- ✅ Error recovery works correctly
|
||||
- ✅ Visual validation confirms correct behavior
|
||||
|
||||
## Phase 5: Optimization and Documentation (Week 7) 🔄 IN PROGRESS
|
||||
|
||||
### 5.1 Performance Optimization ✅ COMPLETED (MetaTrend)
|
||||
**Priority: MEDIUM**
|
||||
|
||||
**Tasks:**
|
||||
- [x] Profile and optimize MetaTrend indicator calculations
|
||||
- [x] Optimize buffer management
|
||||
- [x] Optimize signal generation
|
||||
- [x] Add caching where appropriate
|
||||
- [x] Optimize memory allocation patterns
|
||||
- [ ] Extend optimization to other strategies
|
||||
|
||||
### 5.2 Documentation ✅ COMPLETED (MetaTrend)
|
||||
**Priority: MEDIUM**
|
||||
|
||||
**Tasks:**
|
||||
- [x] Update MetaTrend strategy docstrings
|
||||
- [x] Create MetaTrend implementation guide
|
||||
- [x] Create performance analysis documentation
|
||||
- [x] Create visual comparison documentation
|
||||
- [x] Update README files for MetaTrend
|
||||
- [ ] Extend documentation to other strategies
|
||||
|
||||
### 5.3 Configuration and Monitoring ✅ COMPLETED (MetaTrend)
|
||||
**Priority: LOW**
|
||||
|
||||
**Tasks:**
|
||||
- [x] Add MetaTrend configuration validation
|
||||
- [x] Add runtime configuration updates
|
||||
- [x] Add monitoring for MetaTrend performance
|
||||
- [x] Add alerting for performance issues
|
||||
- [ ] Extend to other strategies
|
||||
|
||||
## Implementation Status Summary
|
||||
|
||||
### ✅ Completed (Phase 1, 2.1, 2.2, 2.3)
|
||||
- **Foundation Infrastructure**: Complete incremental indicator system
|
||||
- **Base Classes**: Full `IncStrategyBase` with buffer management and error handling
|
||||
- **Indicator States**: All required indicators (MA, RSI, ATR, Supertrend, Bollinger Bands)
|
||||
- **Memory Management**: Bounded buffer system with configurable sizes
|
||||
- **Error Handling**: State validation, corruption recovery, data gap handling
|
||||
- **Performance Monitoring**: Built-in metrics collection and timing
|
||||
- **IncRandomStrategy**: Complete implementation with testing (0.006ms updates, 0.048ms signals)
|
||||
- **IncMetaTrendStrategy**: Complete implementation with comprehensive testing and validation
|
||||
- 98.5% accuracy vs corrected original strategy
|
||||
- Visual comparison tools and analysis
|
||||
- Bug discovery in original DefaultStrategy
|
||||
- Production-ready with <1ms updates
|
||||
- **BBRSIncrementalStrategy**: Complete implementation with real-time processing capabilities
|
||||
- Perfect accuracy (0.000000 difference) vs original implementation after warm-up
|
||||
- Real-time minute-level data processing with internal timeframe aggregation
|
||||
- Market regime detection (trending vs sideways) working correctly
|
||||
- 95.45% signal match rate with comprehensive testing
|
||||
- TimeframeAggregator for seamless real-time data handling
|
||||
- Production-ready for live trading systems
|
||||
|
||||
### 🔄 Current Focus (Phase 3)
|
||||
- **Strategy Manager**: Coordinating multiple incremental strategies
|
||||
- **Integration Testing**: Ensuring all components work together
|
||||
- **Performance Optimization**: Fine-tuning for production deployment
|
||||
|
||||
### 📋 Remaining Work
|
||||
- Strategy manager updates
|
||||
- Integration with existing systems
|
||||
- Comprehensive testing suite for strategy combinations
|
||||
- Performance optimization for multi-strategy scenarios
|
||||
- Documentation updates for deployment guides
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### MetaTrend Strategy Implementation ✅
|
||||
|
||||
#### Buffer Size Calculations
|
||||
```python
|
||||
def get_minimum_buffer_size(self) -> Dict[str, int]:
|
||||
primary_tf = self.params.get("timeframe", "1min")
|
||||
|
||||
# Supertrend needs warmup period for reliable calculation
|
||||
if primary_tf == "15min":
|
||||
return {"15min": 50, "1min": 750} # 50 * 15 = 750 minutes
|
||||
elif primary_tf == "5min":
|
||||
return {"5min": 50, "1min": 250} # 50 * 5 = 250 minutes
|
||||
elif primary_tf == "30min":
|
||||
return {"30min": 50, "1min": 1500} # 50 * 30 = 1500 minutes
|
||||
elif primary_tf == "1h":
|
||||
return {"1h": 50, "1min": 3000} # 50 * 60 = 3000 minutes
|
||||
else: # 1min
|
||||
return {"1min": 50}
|
||||
```
|
||||
|
||||
#### Supertrend Parameters
|
||||
- ST1: Period=12, Multiplier=3.0
|
||||
- ST2: Period=10, Multiplier=1.0
|
||||
- ST3: Period=11, Multiplier=2.0
|
||||
|
||||
#### Meta-trend Logic
|
||||
- **Uptrend (+1)**: All 3 Supertrends agree on uptrend
|
||||
- **Downtrend (-1)**: All 3 Supertrends agree on downtrend
|
||||
- **Neutral (0)**: Supertrends disagree
|
||||
|
||||
#### Signal Generation
|
||||
- **Entry**: Meta-trend changes from != 1 to == 1
|
||||
- **Exit**: Meta-trend changes from != -1 to == -1
|
||||
|
||||
### BBRSStrategy Implementation ✅
|
||||
|
||||
#### Buffer Size Calculations
|
||||
```python
|
||||
def get_minimum_buffer_size(self) -> Dict[str, int]:
|
||||
bb_period = self.params.get("bb_period", 20)
|
||||
rsi_period = self.params.get("rsi_period", 14)
|
||||
volume_ma_period = 20
|
||||
|
||||
# Need max of all periods plus warmup
|
||||
min_periods = max(bb_period, rsi_period, volume_ma_period) + 20
|
||||
return {"1min": min_periods}
|
||||
```
|
||||
|
||||
#### Timeframe Aggregation
|
||||
- **TimeframeAggregator**: Handles real-time aggregation of minute data to higher timeframes
|
||||
- **Configurable Timeframes**: 1min, 5min, 15min, 30min, 1h, etc.
|
||||
- **OHLCV Aggregation**: Proper open/high/low/close/volume aggregation
|
||||
- **Bar Completion**: Only processes indicators when complete timeframe bars are formed
|
||||
|
||||
#### Market Regime Detection
|
||||
- **Trending Market**: BB width >= threshold (default 0.05)
|
||||
- **Sideways Market**: BB width < threshold
|
||||
- **Adaptive Parameters**: Different BB multipliers and RSI thresholds per regime
|
||||
|
||||
#### Signal Generation Logic
|
||||
```python
|
||||
# Sideways Market (Mean Reversion)
|
||||
buy_condition = (price <= lower_band) and (rsi_value <= rsi_low)
|
||||
sell_condition = (price >= upper_band) and (rsi_value >= rsi_high)
|
||||
|
||||
# Trending Market (Breakout Mode)
|
||||
buy_condition = (price < lower_band) and (rsi_value < 50) and volume_spike
|
||||
sell_condition = (price > upper_band) and (rsi_value > 50) and volume_spike
|
||||
```
|
||||
|
||||
#### Real-time Processing Flow
|
||||
1. **Minute Data Input**: Accept live minute-level OHLCV data
|
||||
2. **Timeframe Aggregation**: Accumulate into configured timeframe bars
|
||||
3. **Indicator Updates**: Update BB, RSI, volume MA when bar completes
|
||||
4. **Market Regime**: Determine trending vs sideways based on BB width
|
||||
5. **Signal Generation**: Apply regime-specific buy/sell logic
|
||||
6. **State Management**: Maintain constant memory usage
|
||||
|
||||
### Error Recovery Strategy
|
||||
|
||||
1. **State Validation**: Periodic validation of indicator states ✅
|
||||
2. **Graceful Degradation**: Fall back to batch calculation if incremental fails ✅
|
||||
3. **Automatic Recovery**: Reinitialize from buffer data when corruption detected ✅
|
||||
4. **Monitoring**: Track error rates and performance metrics ✅
|
||||
|
||||
### Performance Targets
|
||||
|
||||
- **Incremental Update**: <1ms per data point ✅
|
||||
- **Signal Generation**: <10ms per strategy ✅
|
||||
- **Memory Usage**: <100MB per strategy (bounded by buffer size) ✅
|
||||
- **Accuracy**: 99.99% identical to batch calculations ✅ (98.5% for MetaTrend due to original bug)
|
||||
|
||||
### Testing Strategy
|
||||
|
||||
1. **Unit Tests**: Test each component in isolation ✅ (MetaTrend)
|
||||
2. **Integration Tests**: Test strategy combinations ✅ (MetaTrend)
|
||||
3. **Performance Tests**: Benchmark against current implementation ✅ (MetaTrend)
|
||||
4. **Accuracy Tests**: Validate against known good results ✅ (MetaTrend)
|
||||
5. **Stress Tests**: Test with high-frequency data ✅ (MetaTrend)
|
||||
6. **Memory Tests**: Validate memory usage bounds ✅ (MetaTrend)
|
||||
7. **Visual Tests**: Create comparison plots and analysis ✅ (MetaTrend)
|
||||
|
||||
## Risk Mitigation
|
||||
|
||||
### Technical Risks
|
||||
- **Accuracy Issues**: Comprehensive testing and validation ✅
|
||||
- **Performance Regression**: Benchmarking and optimization ✅
|
||||
- **Memory Leaks**: Careful buffer management and testing ✅
|
||||
- **State Corruption**: Validation and recovery mechanisms ✅
|
||||
|
||||
### Implementation Risks
|
||||
- **Complexity**: Phased implementation with incremental testing ✅
|
||||
- **Breaking Changes**: Backward compatibility layer ✅
|
||||
- **Timeline**: Conservative estimates with buffer time ✅
|
||||
|
||||
### Operational Risks
|
||||
- **Production Issues**: Gradual rollout with monitoring ✅
|
||||
- **Data Quality**: Robust error handling and validation ✅
|
||||
- **System Load**: Performance monitoring and alerting ✅
|
||||
|
||||
## Success Criteria
|
||||
|
||||
### Functional Requirements
|
||||
- [x] MetaTrend strategy works in incremental mode ✅
|
||||
- [x] Signal generation is mathematically correct (bug-free) ✅
|
||||
- [x] Real-time performance is significantly improved ✅
|
||||
- [x] Memory usage is bounded and predictable ✅
|
||||
- [ ] All strategies work in incremental mode (BBRSStrategy pending)
|
||||
|
||||
### Performance Requirements
|
||||
- [x] 10x improvement in processing speed for real-time data ✅
|
||||
- [x] 90% reduction in memory usage for long-running systems ✅
|
||||
- [x] <1ms latency for incremental updates ✅
|
||||
- [x] <10ms latency for signal generation ✅
|
||||
|
||||
### Quality Requirements
|
||||
- [x] 100% test coverage for MetaTrend strategy ✅
|
||||
- [x] 98.5% accuracy compared to corrected batch calculations ✅
|
||||
- [x] Zero memory leaks in long-running tests ✅
|
||||
- [x] Robust error handling and recovery ✅
|
||||
- [ ] Extend quality requirements to remaining strategies
|
||||
|
||||
## Key Achievements
|
||||
|
||||
### MetaTrend Strategy Success ✅
|
||||
- **Bug Discovery**: Found and documented critical bug in original DefaultStrategy exit condition
|
||||
- **Mathematical Accuracy**: Achieved 98.5% signal match with corrected implementation
|
||||
- **Performance**: <1ms updates, suitable for high-frequency trading
|
||||
- **Visual Validation**: Comprehensive plotting and analysis tools created
|
||||
- **Production Ready**: Fully tested and validated for live trading systems
|
||||
|
||||
### Architecture Success ✅
|
||||
- **Unified Interface**: All incremental strategies follow consistent `IncStrategyBase` pattern
|
||||
- **Memory Efficiency**: Bounded buffer system prevents memory growth
|
||||
- **Error Recovery**: Robust state validation and recovery mechanisms
|
||||
- **Performance Monitoring**: Built-in metrics and timing analysis
|
||||
|
||||
This implementation plan provides a structured approach to implementing the incremental calculation architecture while maintaining system stability and backward compatibility. The MetaTrend strategy implementation serves as a proven template for future strategy conversions.
|
||||
@ -1,342 +0,0 @@
|
||||
# Real-Time Strategy Architecture - Technical Specification
|
||||
|
||||
## Overview
|
||||
|
||||
This document outlines the technical specification for updating the trading strategy system to support real-time data processing with incremental calculations. The current architecture processes entire datasets during initialization, which is inefficient for real-time trading where new data arrives continuously.
|
||||
|
||||
## Current Architecture Issues
|
||||
|
||||
### Problems with Current Implementation
|
||||
1. **Initialization-Heavy Design**: All calculations performed during `initialize()` method
|
||||
2. **Full Dataset Processing**: Entire historical dataset processed on each initialization
|
||||
3. **Memory Inefficient**: Stores complete calculation history in arrays
|
||||
4. **No Incremental Updates**: Cannot add new data without full recalculation
|
||||
5. **Performance Bottleneck**: Recalculating years of data for each new candle
|
||||
6. **Index-Based Access**: Signal generation relies on pre-calculated arrays with fixed indices
|
||||
|
||||
### Current Strategy Flow
|
||||
```
|
||||
Data → initialize() → Full Calculation → Store Arrays → get_signal(index)
|
||||
```
|
||||
|
||||
## Target Architecture: Incremental Calculation
|
||||
|
||||
### New Strategy Flow
|
||||
```
|
||||
Initial Data → initialize() → Warm-up Calculation → Ready State
|
||||
New Data Point → calculate_on_data() → Update State → get_signal()
|
||||
```
|
||||
|
||||
## Technical Requirements
|
||||
|
||||
### 1. Base Strategy Interface Updates
|
||||
|
||||
#### New Abstract Methods
|
||||
```python
|
||||
@abstractmethod
|
||||
def get_minimum_buffer_size(self) -> Dict[str, int]:
|
||||
"""
|
||||
Return minimum data points needed for each timeframe.
|
||||
|
||||
Returns:
|
||||
Dict[str, int]: {timeframe: min_points} mapping
|
||||
|
||||
Example:
|
||||
{"15min": 50, "1min": 750} # 50 15min candles = 750 1min candles
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def calculate_on_data(self, new_data_point: Dict, timestamp: pd.Timestamp) -> None:
|
||||
"""
|
||||
Process a single new data point incrementally.
|
||||
|
||||
Args:
|
||||
new_data_point: OHLCV data point {open, high, low, close, volume}
|
||||
timestamp: Timestamp of the data point
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def supports_incremental_calculation(self) -> bool:
|
||||
"""
|
||||
Whether strategy supports incremental calculation.
|
||||
|
||||
Returns:
|
||||
bool: True if incremental mode supported
|
||||
"""
|
||||
pass
|
||||
```
|
||||
|
||||
#### New Properties and Methods
|
||||
```python
|
||||
@property
|
||||
def calculation_mode(self) -> str:
|
||||
"""Current calculation mode: 'initialization' or 'incremental'"""
|
||||
return self._calculation_mode
|
||||
|
||||
@property
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""Whether strategy has sufficient data for reliable signals"""
|
||||
return self._is_warmed_up
|
||||
|
||||
def reset_calculation_state(self) -> None:
|
||||
"""Reset internal calculation state for reinitialization"""
|
||||
pass
|
||||
|
||||
def get_current_state_summary(self) -> Dict:
|
||||
"""Get summary of current calculation state for debugging"""
|
||||
pass
|
||||
```
|
||||
|
||||
### 2. Internal State Management
|
||||
|
||||
#### State Variables
|
||||
Each strategy must maintain:
|
||||
```python
|
||||
class StrategyBase:
|
||||
def __init__(self, ...):
|
||||
# Calculation state
|
||||
self._calculation_mode = "initialization" # or "incremental"
|
||||
self._is_warmed_up = False
|
||||
self._data_points_received = 0
|
||||
|
||||
# Timeframe-specific buffers
|
||||
self._timeframe_buffers = {} # {timeframe: deque(maxlen=buffer_size)}
|
||||
self._timeframe_last_update = {} # {timeframe: timestamp}
|
||||
|
||||
# Indicator states (strategy-specific)
|
||||
self._indicator_states = {}
|
||||
|
||||
# Signal generation state
|
||||
self._last_signals = {} # Cache recent signals
|
||||
self._signal_history = deque(maxlen=100) # Recent signal history
|
||||
```
|
||||
|
||||
#### Buffer Management
|
||||
```python
|
||||
def _update_timeframe_buffers(self, new_data_point: Dict, timestamp: pd.Timestamp):
|
||||
"""Update all timeframe buffers with new data point"""
|
||||
|
||||
def _should_update_timeframe(self, timeframe: str, timestamp: pd.Timestamp) -> bool:
|
||||
"""Check if timeframe should be updated based on timestamp"""
|
||||
|
||||
def _get_timeframe_buffer(self, timeframe: str) -> pd.DataFrame:
|
||||
"""Get current buffer for specific timeframe"""
|
||||
```
|
||||
|
||||
### 3. Strategy-Specific Requirements
|
||||
|
||||
#### DefaultStrategy (Supertrend-based)
|
||||
```python
|
||||
class DefaultStrategy(StrategyBase):
|
||||
def get_minimum_buffer_size(self) -> Dict[str, int]:
|
||||
primary_tf = self.params.get("timeframe", "15min")
|
||||
if primary_tf == "15min":
|
||||
return {"15min": 50, "1min": 750}
|
||||
elif primary_tf == "5min":
|
||||
return {"5min": 50, "1min": 250}
|
||||
# ... other timeframes
|
||||
|
||||
def _initialize_indicator_states(self):
|
||||
"""Initialize Supertrend calculation states"""
|
||||
self._supertrend_states = [
|
||||
SupertrendState(period=10, multiplier=3.0),
|
||||
SupertrendState(period=11, multiplier=2.0),
|
||||
SupertrendState(period=12, multiplier=1.0)
|
||||
]
|
||||
|
||||
def _update_supertrend_incrementally(self, ohlc_data):
|
||||
"""Update Supertrend calculations with new data"""
|
||||
# Incremental ATR calculation
|
||||
# Incremental Supertrend calculation
|
||||
# Update meta-trend based on all three Supertrends
|
||||
```
|
||||
|
||||
#### BBRSStrategy (Bollinger Bands + RSI)
|
||||
```python
|
||||
class BBRSStrategy(StrategyBase):
|
||||
def get_minimum_buffer_size(self) -> Dict[str, int]:
|
||||
bb_period = self.params.get("bb_period", 20)
|
||||
rsi_period = self.params.get("rsi_period", 14)
|
||||
min_periods = max(bb_period, rsi_period) + 10 # +10 for warmup
|
||||
return {"1min": min_periods}
|
||||
|
||||
def _initialize_indicator_states(self):
|
||||
"""Initialize BB and RSI calculation states"""
|
||||
self._bb_state = BollingerBandsState(period=self.params.get("bb_period", 20))
|
||||
self._rsi_state = RSIState(period=self.params.get("rsi_period", 14))
|
||||
self._market_regime_state = MarketRegimeState()
|
||||
|
||||
def _update_indicators_incrementally(self, price_data):
|
||||
"""Update BB, RSI, and market regime with new data"""
|
||||
# Incremental moving average for BB
|
||||
# Incremental RSI calculation
|
||||
# Market regime detection update
|
||||
```
|
||||
|
||||
#### RandomStrategy
|
||||
```python
|
||||
class RandomStrategy(StrategyBase):
|
||||
def get_minimum_buffer_size(self) -> Dict[str, int]:
|
||||
return {"1min": 1} # No indicators needed
|
||||
|
||||
def supports_incremental_calculation(self) -> bool:
|
||||
return True # Always supports incremental
|
||||
```
|
||||
|
||||
### 4. Indicator State Classes
|
||||
|
||||
#### Base Indicator State
|
||||
```python
|
||||
class IndicatorState(ABC):
|
||||
"""Base class for maintaining indicator calculation state"""
|
||||
|
||||
@abstractmethod
|
||||
def update(self, new_value: float) -> float:
|
||||
"""Update indicator with new value and return current indicator value"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""Whether indicator has enough data for reliable values"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def reset(self) -> None:
|
||||
"""Reset indicator state"""
|
||||
pass
|
||||
```
|
||||
|
||||
#### Specific Indicator States
|
||||
```python
|
||||
class MovingAverageState(IndicatorState):
|
||||
"""Maintains state for incremental moving average calculation"""
|
||||
|
||||
class RSIState(IndicatorState):
|
||||
"""Maintains state for incremental RSI calculation"""
|
||||
|
||||
class SupertrendState(IndicatorState):
|
||||
"""Maintains state for incremental Supertrend calculation"""
|
||||
|
||||
class BollingerBandsState(IndicatorState):
|
||||
"""Maintains state for incremental Bollinger Bands calculation"""
|
||||
```
|
||||
|
||||
### 5. Data Flow Architecture
|
||||
|
||||
#### Initialization Phase
|
||||
```
|
||||
1. Strategy.initialize(backtester)
|
||||
2. Strategy._resample_data(original_data)
|
||||
3. Strategy._initialize_indicator_states()
|
||||
4. Strategy._warm_up_with_historical_data()
|
||||
5. Strategy._calculation_mode = "incremental"
|
||||
6. Strategy._is_warmed_up = True
|
||||
```
|
||||
|
||||
#### Real-Time Processing Phase
|
||||
```
|
||||
1. New data arrives → StrategyManager.process_new_data()
|
||||
2. StrategyManager → Strategy.calculate_on_data(new_point)
|
||||
3. Strategy._update_timeframe_buffers()
|
||||
4. Strategy._update_indicators_incrementally()
|
||||
5. Strategy ready for get_entry_signal()/get_exit_signal()
|
||||
```
|
||||
|
||||
### 6. Performance Requirements
|
||||
|
||||
#### Memory Efficiency
|
||||
- Maximum buffer size per timeframe: configurable (default: 200 periods)
|
||||
- Use `collections.deque` with `maxlen` for automatic buffer management
|
||||
- Store only essential state, not full calculation history
|
||||
|
||||
#### Processing Speed
|
||||
- Target: <1ms per data point for incremental updates
|
||||
- Target: <10ms for signal generation
|
||||
- Batch processing support for multiple data points
|
||||
|
||||
#### Accuracy Requirements
|
||||
- Incremental calculations must match batch calculations within 0.01% tolerance
|
||||
- Indicator values must be identical to traditional calculation methods
|
||||
- Signal timing must be preserved exactly
|
||||
|
||||
### 7. Error Handling and Recovery
|
||||
|
||||
#### State Corruption Recovery
|
||||
```python
|
||||
def _validate_calculation_state(self) -> bool:
|
||||
"""Validate internal calculation state consistency"""
|
||||
|
||||
def _recover_from_state_corruption(self) -> None:
|
||||
"""Recover from corrupted calculation state"""
|
||||
# Reset to initialization mode
|
||||
# Recalculate from available buffer data
|
||||
# Resume incremental mode
|
||||
```
|
||||
|
||||
#### Data Gap Handling
|
||||
```python
|
||||
def handle_data_gap(self, gap_duration: pd.Timedelta) -> None:
|
||||
"""Handle gaps in data stream"""
|
||||
if gap_duration > self._max_acceptable_gap:
|
||||
self._trigger_reinitialization()
|
||||
else:
|
||||
self._interpolate_missing_data()
|
||||
```
|
||||
|
||||
### 8. Backward Compatibility
|
||||
|
||||
#### Compatibility Layer
|
||||
- Existing `initialize()` method continues to work
|
||||
- New methods are optional with default implementations
|
||||
- Gradual migration path for existing strategies
|
||||
- Fallback to batch calculation if incremental not supported
|
||||
|
||||
#### Migration Strategy
|
||||
1. Phase 1: Add new interface with default implementations
|
||||
2. Phase 2: Implement incremental calculation for each strategy
|
||||
3. Phase 3: Optimize and remove batch calculation fallbacks
|
||||
4. Phase 4: Make incremental calculation mandatory
|
||||
|
||||
### 9. Testing Requirements
|
||||
|
||||
#### Unit Tests
|
||||
- Test incremental vs. batch calculation accuracy
|
||||
- Test state management and recovery
|
||||
- Test buffer management and memory usage
|
||||
- Test performance benchmarks
|
||||
|
||||
#### Integration Tests
|
||||
- Test with real-time data streams
|
||||
- Test strategy manager coordination
|
||||
- Test error recovery scenarios
|
||||
- Test memory usage over extended periods
|
||||
|
||||
#### Performance Tests
|
||||
- Benchmark incremental vs. batch processing
|
||||
- Memory usage profiling
|
||||
- Latency measurements for signal generation
|
||||
- Stress testing with high-frequency data
|
||||
|
||||
### 10. Configuration and Monitoring
|
||||
|
||||
#### Configuration Options
|
||||
```python
|
||||
STRATEGY_CONFIG = {
|
||||
"calculation_mode": "incremental", # or "batch"
|
||||
"buffer_size_multiplier": 2.0, # multiply minimum buffer size
|
||||
"max_acceptable_gap": "5min", # max data gap before reinitialization
|
||||
"enable_state_validation": True, # enable periodic state validation
|
||||
"performance_monitoring": True # enable performance metrics
|
||||
}
|
||||
```
|
||||
|
||||
#### Monitoring Metrics
|
||||
- Calculation latency per strategy
|
||||
- Memory usage per strategy
|
||||
- State validation failures
|
||||
- Data gap occurrences
|
||||
- Signal generation frequency
|
||||
|
||||
This specification provides the foundation for implementing efficient real-time strategy processing while maintaining accuracy and reliability.
|
||||
@ -1,447 +0,0 @@
|
||||
"""
|
||||
Example usage of the Incremental Backtester.
|
||||
|
||||
This script demonstrates how to use the IncBacktester for various scenarios:
|
||||
1. Single strategy backtesting
|
||||
2. Multiple strategy comparison
|
||||
3. Parameter optimization with multiprocessing
|
||||
4. Custom analysis and result saving
|
||||
5. Comprehensive result logging and action tracking
|
||||
|
||||
Run this script to see the backtester in action with real or synthetic data.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
import os
|
||||
|
||||
from cycles.IncStrategies import (
|
||||
IncBacktester, BacktestConfig, IncRandomStrategy
|
||||
)
|
||||
from cycles.utils.storage import Storage
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def ensure_results_directory():
|
||||
"""Ensure the results directory exists."""
|
||||
results_dir = "results"
|
||||
if not os.path.exists(results_dir):
|
||||
os.makedirs(results_dir)
|
||||
logger.info(f"Created results directory: {results_dir}")
|
||||
return results_dir
|
||||
|
||||
|
||||
def create_sample_data(days: int = 30) -> pd.DataFrame:
|
||||
"""
|
||||
Create sample OHLCV data for demonstration.
|
||||
|
||||
Args:
|
||||
days: Number of days of data to generate
|
||||
|
||||
Returns:
|
||||
pd.DataFrame: Sample OHLCV data
|
||||
"""
|
||||
# Create date range
|
||||
end_date = datetime.now()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
timestamps = pd.date_range(start=start_date, end=end_date, freq='1min')
|
||||
|
||||
# Generate realistic price data
|
||||
np.random.seed(42)
|
||||
n_points = len(timestamps)
|
||||
|
||||
# Start with a base price
|
||||
base_price = 45000
|
||||
|
||||
# Generate price movements with trend and volatility
|
||||
trend = np.linspace(0, 0.1, n_points) # Slight upward trend
|
||||
volatility = np.random.normal(0, 0.002, n_points) # 0.2% volatility
|
||||
|
||||
# Calculate prices
|
||||
log_returns = trend + volatility
|
||||
prices = base_price * np.exp(np.cumsum(log_returns))
|
||||
|
||||
# Generate OHLCV data
|
||||
data = []
|
||||
for i, (timestamp, close_price) in enumerate(zip(timestamps, prices)):
|
||||
# Generate realistic OHLC
|
||||
intrabar_vol = close_price * 0.001
|
||||
|
||||
open_price = close_price + np.random.normal(0, intrabar_vol)
|
||||
high_price = max(open_price, close_price) + abs(np.random.normal(0, intrabar_vol))
|
||||
low_price = min(open_price, close_price) - abs(np.random.normal(0, intrabar_vol))
|
||||
volume = np.random.uniform(50, 500)
|
||||
|
||||
data.append({
|
||||
'open': open_price,
|
||||
'high': high_price,
|
||||
'low': low_price,
|
||||
'close': close_price,
|
||||
'volume': volume
|
||||
})
|
||||
|
||||
df = pd.DataFrame(data, index=timestamps)
|
||||
return df
|
||||
|
||||
|
||||
def example_single_strategy():
|
||||
"""Example 1: Single strategy backtesting with comprehensive results."""
|
||||
print("\n" + "="*60)
|
||||
print("EXAMPLE 1: Single Strategy Backtesting")
|
||||
print("="*60)
|
||||
|
||||
# Create sample data
|
||||
data = create_sample_data(days=7) # 1 week of data
|
||||
|
||||
# Save data
|
||||
storage = Storage()
|
||||
data_file = "sample_data_single.csv"
|
||||
storage.save_data(data, data_file)
|
||||
|
||||
# Configure backtest
|
||||
config = BacktestConfig(
|
||||
data_file=data_file,
|
||||
start_date=data.index[0].strftime("%Y-%m-%d"),
|
||||
end_date=data.index[-1].strftime("%Y-%m-%d"),
|
||||
initial_usd=10000,
|
||||
stop_loss_pct=0.02,
|
||||
take_profit_pct=0.05
|
||||
)
|
||||
|
||||
# Create strategy
|
||||
strategy = IncRandomStrategy(params={
|
||||
"timeframe": "15min",
|
||||
"entry_probability": 0.15,
|
||||
"exit_probability": 0.2,
|
||||
"random_seed": 42
|
||||
})
|
||||
|
||||
# Run backtest
|
||||
backtester = IncBacktester(config, storage)
|
||||
results = backtester.run_single_strategy(strategy)
|
||||
|
||||
# Print results
|
||||
print(f"\nResults:")
|
||||
print(f" Strategy: {results['strategy_name']}")
|
||||
print(f" Profit: {results['profit_ratio']*100:.2f}%")
|
||||
print(f" Final Balance: ${results['final_usd']:,.2f}")
|
||||
print(f" Trades: {results['n_trades']}")
|
||||
print(f" Win Rate: {results['win_rate']*100:.1f}%")
|
||||
print(f" Max Drawdown: {results['max_drawdown']*100:.2f}%")
|
||||
|
||||
# Save comprehensive results
|
||||
backtester.save_comprehensive_results([results], "example_single_strategy")
|
||||
|
||||
# Cleanup
|
||||
if os.path.exists(f"data/{data_file}"):
|
||||
os.remove(f"data/{data_file}")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def example_multiple_strategies():
|
||||
"""Example 2: Multiple strategy comparison with comprehensive results."""
|
||||
print("\n" + "="*60)
|
||||
print("EXAMPLE 2: Multiple Strategy Comparison")
|
||||
print("="*60)
|
||||
|
||||
# Create sample data
|
||||
data = create_sample_data(days=10) # 10 days of data
|
||||
|
||||
# Save data
|
||||
storage = Storage()
|
||||
data_file = "sample_data_multiple.csv"
|
||||
storage.save_data(data, data_file)
|
||||
|
||||
# Configure backtest
|
||||
config = BacktestConfig(
|
||||
data_file=data_file,
|
||||
start_date=data.index[0].strftime("%Y-%m-%d"),
|
||||
end_date=data.index[-1].strftime("%Y-%m-%d"),
|
||||
initial_usd=10000,
|
||||
stop_loss_pct=0.015
|
||||
)
|
||||
|
||||
# Create multiple strategies with different parameters
|
||||
strategies = [
|
||||
IncRandomStrategy(params={
|
||||
"timeframe": "5min",
|
||||
"entry_probability": 0.1,
|
||||
"exit_probability": 0.15,
|
||||
"random_seed": 42
|
||||
}),
|
||||
IncRandomStrategy(params={
|
||||
"timeframe": "15min",
|
||||
"entry_probability": 0.12,
|
||||
"exit_probability": 0.18,
|
||||
"random_seed": 123
|
||||
}),
|
||||
IncRandomStrategy(params={
|
||||
"timeframe": "30min",
|
||||
"entry_probability": 0.08,
|
||||
"exit_probability": 0.12,
|
||||
"random_seed": 456
|
||||
}),
|
||||
IncRandomStrategy(params={
|
||||
"timeframe": "1h",
|
||||
"entry_probability": 0.06,
|
||||
"exit_probability": 0.1,
|
||||
"random_seed": 789
|
||||
})
|
||||
]
|
||||
|
||||
# Run backtest
|
||||
backtester = IncBacktester(config, storage)
|
||||
results = backtester.run_multiple_strategies(strategies)
|
||||
|
||||
# Print comparison
|
||||
print(f"\nStrategy Comparison:")
|
||||
print(f"{'Strategy':<20} {'Timeframe':<10} {'Profit %':<10} {'Trades':<8} {'Win Rate %':<12}")
|
||||
print("-" * 70)
|
||||
|
||||
for i, result in enumerate(results):
|
||||
if result.get("success", True):
|
||||
timeframe = result['strategy_params']['timeframe']
|
||||
profit = result['profit_ratio'] * 100
|
||||
trades = result['n_trades']
|
||||
win_rate = result['win_rate'] * 100
|
||||
print(f"Strategy {i+1:<13} {timeframe:<10} {profit:<10.2f} {trades:<8} {win_rate:<12.1f}")
|
||||
|
||||
# Get summary statistics
|
||||
summary = backtester.get_summary_statistics(results)
|
||||
print(f"\nSummary Statistics:")
|
||||
print(f" Best Profit: {summary['profit_ratio']['max']*100:.2f}%")
|
||||
print(f" Worst Profit: {summary['profit_ratio']['min']*100:.2f}%")
|
||||
print(f" Average Profit: {summary['profit_ratio']['mean']*100:.2f}%")
|
||||
print(f" Profit Std Dev: {summary['profit_ratio']['std']*100:.2f}%")
|
||||
|
||||
# Save comprehensive results
|
||||
backtester.save_comprehensive_results(results, "example_multiple_strategies", summary)
|
||||
|
||||
# Cleanup
|
||||
if os.path.exists(f"data/{data_file}"):
|
||||
os.remove(f"data/{data_file}")
|
||||
|
||||
return results, summary
|
||||
|
||||
|
||||
def example_parameter_optimization():
|
||||
"""Example 3: Parameter optimization with multiprocessing and comprehensive results."""
|
||||
print("\n" + "="*60)
|
||||
print("EXAMPLE 3: Parameter Optimization")
|
||||
print("="*60)
|
||||
|
||||
# Create sample data
|
||||
data = create_sample_data(days=5) # 5 days for faster optimization
|
||||
|
||||
# Save data
|
||||
storage = Storage()
|
||||
data_file = "sample_data_optimization.csv"
|
||||
storage.save_data(data, data_file)
|
||||
|
||||
# Configure backtest
|
||||
config = BacktestConfig(
|
||||
data_file=data_file,
|
||||
start_date=data.index[0].strftime("%Y-%m-%d"),
|
||||
end_date=data.index[-1].strftime("%Y-%m-%d"),
|
||||
initial_usd=10000
|
||||
)
|
||||
|
||||
# Define parameter grids
|
||||
strategy_param_grid = {
|
||||
"timeframe": ["5min", "15min", "30min"],
|
||||
"entry_probability": [0.08, 0.12, 0.16],
|
||||
"exit_probability": [0.1, 0.15, 0.2],
|
||||
"random_seed": [42] # Keep seed constant for fair comparison
|
||||
}
|
||||
|
||||
trader_param_grid = {
|
||||
"stop_loss_pct": [0.01, 0.015, 0.02],
|
||||
"take_profit_pct": [0.0, 0.03, 0.05]
|
||||
}
|
||||
|
||||
# Run optimization (will use SystemUtils to determine optimal workers)
|
||||
backtester = IncBacktester(config, storage)
|
||||
|
||||
print(f"Starting optimization with {len(strategy_param_grid['timeframe']) * len(strategy_param_grid['entry_probability']) * len(strategy_param_grid['exit_probability']) * len(trader_param_grid['stop_loss_pct']) * len(trader_param_grid['take_profit_pct'])} combinations...")
|
||||
|
||||
results = backtester.optimize_parameters(
|
||||
strategy_class=IncRandomStrategy,
|
||||
param_grid=strategy_param_grid,
|
||||
trader_param_grid=trader_param_grid,
|
||||
max_workers=None # Use SystemUtils for optimal worker count
|
||||
)
|
||||
|
||||
# Get summary
|
||||
summary = backtester.get_summary_statistics(results)
|
||||
|
||||
# Print optimization results
|
||||
print(f"\nOptimization Results:")
|
||||
print(f" Total Combinations: {summary['total_runs']}")
|
||||
print(f" Successful Runs: {summary['successful_runs']}")
|
||||
print(f" Failed Runs: {summary['failed_runs']}")
|
||||
|
||||
if summary['successful_runs'] > 0:
|
||||
print(f" Best Profit: {summary['profit_ratio']['max']*100:.2f}%")
|
||||
print(f" Worst Profit: {summary['profit_ratio']['min']*100:.2f}%")
|
||||
print(f" Average Profit: {summary['profit_ratio']['mean']*100:.2f}%")
|
||||
|
||||
# Show top 3 configurations
|
||||
valid_results = [r for r in results if r.get("success", True)]
|
||||
valid_results.sort(key=lambda x: x["profit_ratio"], reverse=True)
|
||||
|
||||
print(f"\nTop 3 Configurations:")
|
||||
for i, result in enumerate(valid_results[:3]):
|
||||
print(f" {i+1}. Profit: {result['profit_ratio']*100:.2f}% | "
|
||||
f"Timeframe: {result['strategy_params']['timeframe']} | "
|
||||
f"Entry Prob: {result['strategy_params']['entry_probability']} | "
|
||||
f"Stop Loss: {result['trader_params']['stop_loss_pct']*100:.1f}%")
|
||||
|
||||
# Save comprehensive results
|
||||
backtester.save_comprehensive_results(results, "example_parameter_optimization", summary)
|
||||
|
||||
# Cleanup
|
||||
if os.path.exists(f"data/{data_file}"):
|
||||
os.remove(f"data/{data_file}")
|
||||
|
||||
return results, summary
|
||||
|
||||
|
||||
def example_custom_analysis():
|
||||
"""Example 4: Custom analysis with detailed result examination."""
|
||||
print("\n" + "="*60)
|
||||
print("EXAMPLE 4: Custom Analysis")
|
||||
print("="*60)
|
||||
|
||||
# Create sample data with more volatility for interesting results
|
||||
data = create_sample_data(days=14) # 2 weeks
|
||||
|
||||
# Save data
|
||||
storage = Storage()
|
||||
data_file = "sample_data_analysis.csv"
|
||||
storage.save_data(data, data_file)
|
||||
|
||||
# Configure backtest
|
||||
config = BacktestConfig(
|
||||
data_file=data_file,
|
||||
start_date=data.index[0].strftime("%Y-%m-%d"),
|
||||
end_date=data.index[-1].strftime("%Y-%m-%d"),
|
||||
initial_usd=25000, # Larger starting capital
|
||||
stop_loss_pct=0.025,
|
||||
take_profit_pct=0.04
|
||||
)
|
||||
|
||||
# Create strategy with specific parameters for analysis
|
||||
strategy = IncRandomStrategy(params={
|
||||
"timeframe": "30min",
|
||||
"entry_probability": 0.1,
|
||||
"exit_probability": 0.15,
|
||||
"random_seed": 42
|
||||
})
|
||||
|
||||
# Run backtest
|
||||
backtester = IncBacktester(config, storage)
|
||||
results = backtester.run_single_strategy(strategy)
|
||||
|
||||
# Detailed analysis
|
||||
print(f"\nDetailed Analysis:")
|
||||
print(f" Strategy: {results['strategy_name']}")
|
||||
print(f" Timeframe: {results['strategy_params']['timeframe']}")
|
||||
print(f" Data Period: {config.start_date} to {config.end_date}")
|
||||
print(f" Data Points: {results['data_points']:,}")
|
||||
print(f" Processing Time: {results['backtest_duration_seconds']:.2f}s")
|
||||
|
||||
print(f"\nPerformance Metrics:")
|
||||
print(f" Initial Capital: ${results['initial_usd']:,.2f}")
|
||||
print(f" Final Balance: ${results['final_usd']:,.2f}")
|
||||
print(f" Total Return: {results['profit_ratio']*100:.2f}%")
|
||||
print(f" Total Trades: {results['n_trades']}")
|
||||
|
||||
if results['n_trades'] > 0:
|
||||
print(f" Win Rate: {results['win_rate']*100:.1f}%")
|
||||
print(f" Average Trade: ${results['avg_trade']:.2f}")
|
||||
print(f" Max Drawdown: {results['max_drawdown']*100:.2f}%")
|
||||
print(f" Total Fees: ${results['total_fees_usd']:.2f}")
|
||||
|
||||
# Calculate additional metrics
|
||||
days_traded = (pd.to_datetime(config.end_date) - pd.to_datetime(config.start_date)).days
|
||||
annualized_return = (1 + results['profit_ratio']) ** (365 / days_traded) - 1
|
||||
print(f" Annualized Return: {annualized_return*100:.2f}%")
|
||||
|
||||
# Risk metrics
|
||||
if results['max_drawdown'] > 0:
|
||||
calmar_ratio = annualized_return / results['max_drawdown']
|
||||
print(f" Calmar Ratio: {calmar_ratio:.2f}")
|
||||
|
||||
# Save comprehensive results with custom analysis
|
||||
backtester.save_comprehensive_results([results], "example_custom_analysis")
|
||||
|
||||
# Cleanup
|
||||
if os.path.exists(f"data/{data_file}"):
|
||||
os.remove(f"data/{data_file}")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all examples."""
|
||||
print("Incremental Backtester Examples")
|
||||
print("="*60)
|
||||
print("This script demonstrates various features of the IncBacktester:")
|
||||
print("1. Single strategy backtesting")
|
||||
print("2. Multiple strategy comparison")
|
||||
print("3. Parameter optimization with multiprocessing")
|
||||
print("4. Custom analysis and metrics")
|
||||
print("5. Comprehensive result saving and action logging")
|
||||
|
||||
# Ensure results directory exists
|
||||
ensure_results_directory()
|
||||
|
||||
try:
|
||||
# Run all examples
|
||||
single_results = example_single_strategy()
|
||||
multiple_results, multiple_summary = example_multiple_strategies()
|
||||
optimization_results, optimization_summary = example_parameter_optimization()
|
||||
analysis_results = example_custom_analysis()
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("ALL EXAMPLES COMPLETED SUCCESSFULLY!")
|
||||
print("="*60)
|
||||
print("\n📊 Comprehensive results have been saved to the 'results' directory.")
|
||||
print("Each example generated multiple files:")
|
||||
print(" 📋 Summary JSON with session info and statistics")
|
||||
print(" 📈 Detailed CSV with all backtest results")
|
||||
print(" 📝 Action log JSON with all operations performed")
|
||||
print(" 📁 Individual strategy JSON files with trades and details")
|
||||
print(" 🗂️ Master index JSON for easy navigation")
|
||||
|
||||
print(f"\n🎯 Key Insights:")
|
||||
print(f" • Single strategy achieved {single_results['profit_ratio']*100:.2f}% return")
|
||||
print(f" • Multiple strategies: best {multiple_summary['profit_ratio']['max']*100:.2f}%, worst {multiple_summary['profit_ratio']['min']*100:.2f}%")
|
||||
print(f" • Optimization tested {optimization_summary['total_runs']} combinations")
|
||||
print(f" • Custom analysis provided detailed risk metrics")
|
||||
|
||||
print(f"\n🔧 System Performance:")
|
||||
print(f" • Used SystemUtils for optimal CPU core utilization")
|
||||
print(f" • All actions logged for reproducibility")
|
||||
print(f" • Results saved in multiple formats for analysis")
|
||||
|
||||
print(f"\n✅ The incremental backtester is ready for production use!")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Example failed: {e}")
|
||||
print(f"\nError: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,736 +0,0 @@
|
||||
"""
|
||||
Incremental Backtester for testing incremental strategies.
|
||||
|
||||
This module provides the IncBacktester class that orchestrates multiple IncTraders
|
||||
for parallel testing, handles data loading and feeding, and supports multiprocessing
|
||||
for parameter optimization.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from typing import Dict, List, Optional, Any, Callable, Union, Tuple
|
||||
import logging
|
||||
import time
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
from itertools import product
|
||||
import multiprocessing as mp
|
||||
from dataclasses import dataclass
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
from .inc_trader import IncTrader
|
||||
from .base import IncStrategyBase
|
||||
from ..utils.storage import Storage
|
||||
from ..utils.system import SystemUtils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _worker_function(args: Tuple[type, Dict, Dict, 'BacktestConfig', str]) -> Dict[str, Any]:
|
||||
"""
|
||||
Worker function for multiprocessing parameter optimization.
|
||||
|
||||
This function must be at module level to be picklable for multiprocessing.
|
||||
|
||||
Args:
|
||||
args: Tuple containing (strategy_class, strategy_params, trader_params, config, data_file)
|
||||
|
||||
Returns:
|
||||
Dict containing backtest results
|
||||
"""
|
||||
try:
|
||||
strategy_class, strategy_params, trader_params, config, data_file = args
|
||||
|
||||
# Create new storage and backtester instance for this worker
|
||||
storage = Storage()
|
||||
worker_backtester = IncBacktester(config, storage)
|
||||
|
||||
# Create strategy instance
|
||||
strategy = strategy_class(params=strategy_params)
|
||||
|
||||
# Run backtest
|
||||
result = worker_backtester.run_single_strategy(strategy, trader_params)
|
||||
result["success"] = True
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Worker error for {strategy_params}, {trader_params}: {e}")
|
||||
return {
|
||||
"strategy_params": strategy_params,
|
||||
"trader_params": trader_params,
|
||||
"error": str(e),
|
||||
"success": False
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class BacktestConfig:
|
||||
"""Configuration for backtesting runs."""
|
||||
data_file: str
|
||||
start_date: str
|
||||
end_date: str
|
||||
initial_usd: float = 10000
|
||||
timeframe: str = "1min"
|
||||
|
||||
# Trader parameters
|
||||
stop_loss_pct: float = 0.0
|
||||
take_profit_pct: float = 0.0
|
||||
|
||||
# Performance settings
|
||||
max_workers: Optional[int] = None
|
||||
chunk_size: int = 1000
|
||||
|
||||
|
||||
class IncBacktester:
|
||||
"""
|
||||
Incremental backtester for testing incremental strategies.
|
||||
|
||||
This class orchestrates multiple IncTraders for parallel testing:
|
||||
- Loads data using the existing Storage class
|
||||
- Creates multiple IncTrader instances with different parameters
|
||||
- Feeds data sequentially to all traders
|
||||
- Collects and aggregates results
|
||||
- Supports multiprocessing for parallel execution
|
||||
- Uses SystemUtils for optimal worker count determination
|
||||
|
||||
The backtester can run multiple strategies simultaneously or test
|
||||
parameter combinations across multiple CPU cores.
|
||||
|
||||
Example:
|
||||
# Single strategy backtest
|
||||
config = BacktestConfig(
|
||||
data_file="btc_1min_2023.csv",
|
||||
start_date="2023-01-01",
|
||||
end_date="2023-12-31",
|
||||
initial_usd=10000
|
||||
)
|
||||
|
||||
strategy = IncRandomStrategy(params={"timeframe": "15min"})
|
||||
backtester = IncBacktester(config)
|
||||
results = backtester.run_single_strategy(strategy)
|
||||
|
||||
# Multiple strategies
|
||||
strategies = [strategy1, strategy2, strategy3]
|
||||
results = backtester.run_multiple_strategies(strategies)
|
||||
|
||||
# Parameter optimization
|
||||
param_grid = {
|
||||
"timeframe": ["5min", "15min", "30min"],
|
||||
"stop_loss_pct": [0.01, 0.02, 0.03]
|
||||
}
|
||||
results = backtester.optimize_parameters(strategy_class, param_grid)
|
||||
"""
|
||||
|
||||
def __init__(self, config: BacktestConfig, storage: Optional[Storage] = None):
|
||||
"""
|
||||
Initialize the incremental backtester.
|
||||
|
||||
Args:
|
||||
config: Backtesting configuration
|
||||
storage: Storage instance for data loading (creates new if None)
|
||||
"""
|
||||
self.config = config
|
||||
self.storage = storage or Storage()
|
||||
self.system_utils = SystemUtils(logging=logger)
|
||||
self.data = None
|
||||
self.results_cache = {}
|
||||
|
||||
# Track all actions performed during backtesting
|
||||
self.action_log = []
|
||||
self.session_start_time = datetime.now()
|
||||
|
||||
logger.info(f"IncBacktester initialized: {config.data_file}, "
|
||||
f"{config.start_date} to {config.end_date}")
|
||||
|
||||
self._log_action("backtester_initialized", {
|
||||
"config": config.__dict__,
|
||||
"session_start": self.session_start_time.isoformat()
|
||||
})
|
||||
|
||||
def _log_action(self, action_type: str, details: Dict[str, Any]) -> None:
|
||||
"""Log an action performed during backtesting."""
|
||||
self.action_log.append({
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"action_type": action_type,
|
||||
"details": details
|
||||
})
|
||||
|
||||
def load_data(self) -> pd.DataFrame:
|
||||
"""
|
||||
Load and prepare data for backtesting.
|
||||
|
||||
Returns:
|
||||
pd.DataFrame: Loaded OHLCV data with DatetimeIndex
|
||||
"""
|
||||
if self.data is None:
|
||||
logger.info(f"Loading data from {self.config.data_file}...")
|
||||
start_time = time.time()
|
||||
|
||||
self.data = self.storage.load_data(
|
||||
self.config.data_file,
|
||||
self.config.start_date,
|
||||
self.config.end_date
|
||||
)
|
||||
|
||||
load_time = time.time() - start_time
|
||||
logger.info(f"Data loaded: {len(self.data)} rows in {load_time:.2f}s")
|
||||
|
||||
# Validate data
|
||||
if self.data.empty:
|
||||
raise ValueError(f"No data loaded for the specified date range")
|
||||
|
||||
required_columns = ['open', 'high', 'low', 'close', 'volume']
|
||||
missing_columns = [col for col in required_columns if col not in self.data.columns]
|
||||
if missing_columns:
|
||||
raise ValueError(f"Missing required columns: {missing_columns}")
|
||||
|
||||
self._log_action("data_loaded", {
|
||||
"file": self.config.data_file,
|
||||
"rows": len(self.data),
|
||||
"load_time_seconds": load_time,
|
||||
"date_range": f"{self.config.start_date} to {self.config.end_date}",
|
||||
"columns": list(self.data.columns)
|
||||
})
|
||||
|
||||
return self.data
|
||||
|
||||
def run_single_strategy(self, strategy: IncStrategyBase,
|
||||
trader_params: Optional[Dict] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Run backtest for a single strategy.
|
||||
|
||||
Args:
|
||||
strategy: Incremental strategy instance
|
||||
trader_params: Additional trader parameters
|
||||
|
||||
Returns:
|
||||
Dict containing backtest results
|
||||
"""
|
||||
data = self.load_data()
|
||||
|
||||
# Merge trader parameters
|
||||
final_trader_params = {
|
||||
"stop_loss_pct": self.config.stop_loss_pct,
|
||||
"take_profit_pct": self.config.take_profit_pct
|
||||
}
|
||||
if trader_params:
|
||||
final_trader_params.update(trader_params)
|
||||
|
||||
# Create trader
|
||||
trader = IncTrader(
|
||||
strategy=strategy,
|
||||
initial_usd=self.config.initial_usd,
|
||||
params=final_trader_params
|
||||
)
|
||||
|
||||
# Run backtest
|
||||
logger.info(f"Starting backtest for {strategy.name}...")
|
||||
start_time = time.time()
|
||||
|
||||
self._log_action("single_strategy_backtest_started", {
|
||||
"strategy_name": strategy.name,
|
||||
"strategy_params": strategy.params,
|
||||
"trader_params": final_trader_params,
|
||||
"data_points": len(data)
|
||||
})
|
||||
|
||||
for timestamp, row in data.iterrows():
|
||||
ohlcv_data = {
|
||||
'open': row['open'],
|
||||
'high': row['high'],
|
||||
'low': row['low'],
|
||||
'close': row['close'],
|
||||
'volume': row['volume']
|
||||
}
|
||||
trader.process_data_point(timestamp, ohlcv_data)
|
||||
|
||||
# Finalize and get results
|
||||
trader.finalize()
|
||||
results = trader.get_results()
|
||||
|
||||
backtest_time = time.time() - start_time
|
||||
results["backtest_duration_seconds"] = backtest_time
|
||||
results["data_points"] = len(data)
|
||||
results["config"] = self.config.__dict__
|
||||
|
||||
logger.info(f"Backtest completed for {strategy.name} in {backtest_time:.2f}s: "
|
||||
f"${results['final_usd']:.2f} ({results['profit_ratio']*100:.2f}%), "
|
||||
f"{results['n_trades']} trades")
|
||||
|
||||
self._log_action("single_strategy_backtest_completed", {
|
||||
"strategy_name": strategy.name,
|
||||
"backtest_duration_seconds": backtest_time,
|
||||
"final_usd": results['final_usd'],
|
||||
"profit_ratio": results['profit_ratio'],
|
||||
"n_trades": results['n_trades'],
|
||||
"win_rate": results['win_rate']
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
def run_multiple_strategies(self, strategies: List[IncStrategyBase],
|
||||
trader_params: Optional[Dict] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Run backtest for multiple strategies simultaneously.
|
||||
|
||||
Args:
|
||||
strategies: List of incremental strategy instances
|
||||
trader_params: Additional trader parameters
|
||||
|
||||
Returns:
|
||||
List of backtest results for each strategy
|
||||
"""
|
||||
self._log_action("multiple_strategies_backtest_started", {
|
||||
"strategy_count": len(strategies),
|
||||
"strategy_names": [s.name for s in strategies]
|
||||
})
|
||||
|
||||
results = []
|
||||
|
||||
for strategy in strategies:
|
||||
try:
|
||||
result = self.run_single_strategy(strategy, trader_params)
|
||||
results.append(result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error running strategy {strategy.name}: {e}")
|
||||
# Add error result
|
||||
error_result = {
|
||||
"strategy_name": strategy.name,
|
||||
"error": str(e),
|
||||
"success": False
|
||||
}
|
||||
results.append(error_result)
|
||||
|
||||
self._log_action("strategy_error", {
|
||||
"strategy_name": strategy.name,
|
||||
"error": str(e)
|
||||
})
|
||||
|
||||
self._log_action("multiple_strategies_backtest_completed", {
|
||||
"total_strategies": len(strategies),
|
||||
"successful_strategies": len([r for r in results if r.get("success", True)]),
|
||||
"failed_strategies": len([r for r in results if not r.get("success", True)])
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
def optimize_parameters(self, strategy_class: type, param_grid: Dict[str, List],
|
||||
trader_param_grid: Optional[Dict[str, List]] = None,
|
||||
max_workers: Optional[int] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Optimize strategy parameters using grid search with multiprocessing.
|
||||
|
||||
Args:
|
||||
strategy_class: Strategy class to instantiate
|
||||
param_grid: Grid of strategy parameters to test
|
||||
trader_param_grid: Grid of trader parameters to test
|
||||
max_workers: Maximum number of worker processes (uses SystemUtils if None)
|
||||
|
||||
Returns:
|
||||
List of results for each parameter combination
|
||||
"""
|
||||
# Generate parameter combinations
|
||||
strategy_combinations = list(self._generate_param_combinations(param_grid))
|
||||
trader_combinations = list(self._generate_param_combinations(trader_param_grid or {}))
|
||||
|
||||
# If no trader param grid, use default
|
||||
if not trader_combinations:
|
||||
trader_combinations = [{}]
|
||||
|
||||
# Create all combinations
|
||||
all_combinations = []
|
||||
for strategy_params in strategy_combinations:
|
||||
for trader_params in trader_combinations:
|
||||
all_combinations.append((strategy_params, trader_params))
|
||||
|
||||
logger.info(f"Starting parameter optimization: {len(all_combinations)} combinations")
|
||||
|
||||
# Determine number of workers using SystemUtils
|
||||
if max_workers is None:
|
||||
max_workers = self.system_utils.get_optimal_workers()
|
||||
else:
|
||||
max_workers = min(max_workers, len(all_combinations))
|
||||
|
||||
self._log_action("parameter_optimization_started", {
|
||||
"strategy_class": strategy_class.__name__,
|
||||
"total_combinations": len(all_combinations),
|
||||
"max_workers": max_workers,
|
||||
"strategy_param_grid": param_grid,
|
||||
"trader_param_grid": trader_param_grid or {}
|
||||
})
|
||||
|
||||
# Run optimization
|
||||
if max_workers == 1 or len(all_combinations) == 1:
|
||||
# Single-threaded execution
|
||||
results = []
|
||||
for strategy_params, trader_params in all_combinations:
|
||||
result = self._run_single_combination(strategy_class, strategy_params, trader_params)
|
||||
results.append(result)
|
||||
else:
|
||||
# Multi-threaded execution
|
||||
results = self._run_parallel_optimization(
|
||||
strategy_class, all_combinations, max_workers
|
||||
)
|
||||
|
||||
# Sort results by profit ratio
|
||||
valid_results = [r for r in results if r.get("success", True)]
|
||||
valid_results.sort(key=lambda x: x.get("profit_ratio", -float('inf')), reverse=True)
|
||||
|
||||
logger.info(f"Parameter optimization completed: {len(valid_results)} successful runs")
|
||||
|
||||
self._log_action("parameter_optimization_completed", {
|
||||
"total_runs": len(results),
|
||||
"successful_runs": len(valid_results),
|
||||
"failed_runs": len(results) - len(valid_results),
|
||||
"best_profit_ratio": valid_results[0]["profit_ratio"] if valid_results else None,
|
||||
"worst_profit_ratio": valid_results[-1]["profit_ratio"] if valid_results else None
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
def _generate_param_combinations(self, param_grid: Dict[str, List]) -> List[Dict]:
|
||||
"""Generate all parameter combinations from grid."""
|
||||
if not param_grid:
|
||||
return [{}]
|
||||
|
||||
keys = list(param_grid.keys())
|
||||
values = list(param_grid.values())
|
||||
|
||||
combinations = []
|
||||
for combination in product(*values):
|
||||
param_dict = dict(zip(keys, combination))
|
||||
combinations.append(param_dict)
|
||||
|
||||
return combinations
|
||||
|
||||
def _run_single_combination(self, strategy_class: type, strategy_params: Dict,
|
||||
trader_params: Dict) -> Dict[str, Any]:
|
||||
"""Run backtest for a single parameter combination."""
|
||||
try:
|
||||
# Create strategy instance
|
||||
strategy = strategy_class(params=strategy_params)
|
||||
|
||||
# Run backtest
|
||||
result = self.run_single_strategy(strategy, trader_params)
|
||||
result["success"] = True
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in parameter combination {strategy_params}, {trader_params}: {e}")
|
||||
return {
|
||||
"strategy_params": strategy_params,
|
||||
"trader_params": trader_params,
|
||||
"error": str(e),
|
||||
"success": False
|
||||
}
|
||||
|
||||
def _run_parallel_optimization(self, strategy_class: type, combinations: List,
|
||||
max_workers: int) -> List[Dict[str, Any]]:
|
||||
"""Run parameter optimization in parallel."""
|
||||
results = []
|
||||
|
||||
# Prepare arguments for worker function
|
||||
worker_args = []
|
||||
for strategy_params, trader_params in combinations:
|
||||
args = (strategy_class, strategy_params, trader_params, self.config, self.config.data_file)
|
||||
worker_args.append(args)
|
||||
|
||||
# Execute in parallel
|
||||
with ProcessPoolExecutor(max_workers=max_workers) as executor:
|
||||
# Submit all jobs
|
||||
future_to_params = {
|
||||
executor.submit(_worker_function, args): args[1:3] # strategy_params, trader_params
|
||||
for args in worker_args
|
||||
}
|
||||
|
||||
# Collect results as they complete
|
||||
for future in as_completed(future_to_params):
|
||||
combo = future_to_params[future]
|
||||
try:
|
||||
result = future.result()
|
||||
results.append(result)
|
||||
|
||||
if result.get("success", True):
|
||||
logger.info(f"Completed: {combo[0]} -> "
|
||||
f"${result.get('final_usd', 0):.2f} "
|
||||
f"({result.get('profit_ratio', 0)*100:.2f}%)")
|
||||
except Exception as e:
|
||||
logger.error(f"Worker error for {combo}: {e}")
|
||||
results.append({
|
||||
"strategy_params": combo[0],
|
||||
"trader_params": combo[1],
|
||||
"error": str(e),
|
||||
"success": False
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
def get_summary_statistics(self, results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""
|
||||
Calculate summary statistics across multiple backtest results.
|
||||
|
||||
Args:
|
||||
results: List of backtest results
|
||||
|
||||
Returns:
|
||||
Dict containing summary statistics
|
||||
"""
|
||||
valid_results = [r for r in results if r.get("success", True)]
|
||||
|
||||
if not valid_results:
|
||||
return {
|
||||
"total_runs": len(results),
|
||||
"successful_runs": 0,
|
||||
"failed_runs": len(results),
|
||||
"error": "No valid results to summarize"
|
||||
}
|
||||
|
||||
# Extract metrics
|
||||
profit_ratios = [r["profit_ratio"] for r in valid_results]
|
||||
final_balances = [r["final_usd"] for r in valid_results]
|
||||
n_trades_list = [r["n_trades"] for r in valid_results]
|
||||
win_rates = [r["win_rate"] for r in valid_results]
|
||||
max_drawdowns = [r["max_drawdown"] for r in valid_results]
|
||||
|
||||
summary = {
|
||||
"total_runs": len(results),
|
||||
"successful_runs": len(valid_results),
|
||||
"failed_runs": len(results) - len(valid_results),
|
||||
|
||||
# Profit statistics
|
||||
"profit_ratio": {
|
||||
"mean": np.mean(profit_ratios),
|
||||
"std": np.std(profit_ratios),
|
||||
"min": np.min(profit_ratios),
|
||||
"max": np.max(profit_ratios),
|
||||
"median": np.median(profit_ratios)
|
||||
},
|
||||
|
||||
# Balance statistics
|
||||
"final_usd": {
|
||||
"mean": np.mean(final_balances),
|
||||
"std": np.std(final_balances),
|
||||
"min": np.min(final_balances),
|
||||
"max": np.max(final_balances),
|
||||
"median": np.median(final_balances)
|
||||
},
|
||||
|
||||
# Trading statistics
|
||||
"n_trades": {
|
||||
"mean": np.mean(n_trades_list),
|
||||
"std": np.std(n_trades_list),
|
||||
"min": np.min(n_trades_list),
|
||||
"max": np.max(n_trades_list),
|
||||
"median": np.median(n_trades_list)
|
||||
},
|
||||
|
||||
# Performance statistics
|
||||
"win_rate": {
|
||||
"mean": np.mean(win_rates),
|
||||
"std": np.std(win_rates),
|
||||
"min": np.min(win_rates),
|
||||
"max": np.max(win_rates),
|
||||
"median": np.median(win_rates)
|
||||
},
|
||||
|
||||
"max_drawdown": {
|
||||
"mean": np.mean(max_drawdowns),
|
||||
"std": np.std(max_drawdowns),
|
||||
"min": np.min(max_drawdowns),
|
||||
"max": np.max(max_drawdowns),
|
||||
"median": np.median(max_drawdowns)
|
||||
},
|
||||
|
||||
# Best performing run
|
||||
"best_run": max(valid_results, key=lambda x: x["profit_ratio"]),
|
||||
"worst_run": min(valid_results, key=lambda x: x["profit_ratio"])
|
||||
}
|
||||
|
||||
return summary
|
||||
|
||||
def save_comprehensive_results(self, results: List[Dict[str, Any]],
|
||||
base_filename: str,
|
||||
summary: Optional[Dict[str, Any]] = None) -> None:
|
||||
"""
|
||||
Save comprehensive backtest results including summary, individual results, and action log.
|
||||
|
||||
Args:
|
||||
results: List of backtest results
|
||||
base_filename: Base filename (without extension)
|
||||
summary: Optional summary statistics
|
||||
"""
|
||||
try:
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
|
||||
# 1. Save summary report
|
||||
if summary is None:
|
||||
summary = self.get_summary_statistics(results)
|
||||
|
||||
summary_data = {
|
||||
"session_info": {
|
||||
"timestamp": timestamp,
|
||||
"session_start": self.session_start_time.isoformat(),
|
||||
"session_duration_seconds": (datetime.now() - self.session_start_time).total_seconds(),
|
||||
"config": self.config.__dict__
|
||||
},
|
||||
"summary_statistics": summary,
|
||||
"action_log_summary": {
|
||||
"total_actions": len(self.action_log),
|
||||
"action_types": list(set(action["action_type"] for action in self.action_log))
|
||||
}
|
||||
}
|
||||
|
||||
summary_filename = f"{base_filename}_summary_{timestamp}.json"
|
||||
with open(f"results/{summary_filename}", 'w') as f:
|
||||
json.dump(summary_data, f, indent=2, default=str)
|
||||
logger.info(f"Summary saved to results/{summary_filename}")
|
||||
|
||||
# 2. Save detailed results CSV
|
||||
self.save_results(results, f"{base_filename}_detailed_{timestamp}.csv")
|
||||
|
||||
# 3. Save individual strategy results
|
||||
valid_results = [r for r in results if r.get("success", True)]
|
||||
for i, result in enumerate(valid_results):
|
||||
strategy_filename = f"{base_filename}_strategy_{i+1}_{result['strategy_name']}_{timestamp}.json"
|
||||
|
||||
# Include trades and detailed info
|
||||
strategy_data = {
|
||||
"strategy_info": {
|
||||
"name": result['strategy_name'],
|
||||
"params": result.get('strategy_params', {}),
|
||||
"trader_params": result.get('trader_params', {})
|
||||
},
|
||||
"performance": {
|
||||
"initial_usd": result['initial_usd'],
|
||||
"final_usd": result['final_usd'],
|
||||
"profit_ratio": result['profit_ratio'],
|
||||
"n_trades": result['n_trades'],
|
||||
"win_rate": result['win_rate'],
|
||||
"max_drawdown": result['max_drawdown'],
|
||||
"avg_trade": result['avg_trade'],
|
||||
"total_fees_usd": result['total_fees_usd']
|
||||
},
|
||||
"execution": {
|
||||
"backtest_duration_seconds": result.get('backtest_duration_seconds', 0),
|
||||
"data_points_processed": result.get('data_points_processed', 0),
|
||||
"warmup_complete": result.get('warmup_complete', False)
|
||||
},
|
||||
"trades": result.get('trades', [])
|
||||
}
|
||||
|
||||
with open(f"results/{strategy_filename}", 'w') as f:
|
||||
json.dump(strategy_data, f, indent=2, default=str)
|
||||
logger.info(f"Strategy {i+1} details saved to results/{strategy_filename}")
|
||||
|
||||
# 4. Save complete action log
|
||||
action_log_filename = f"{base_filename}_actions_{timestamp}.json"
|
||||
action_log_data = {
|
||||
"session_info": {
|
||||
"timestamp": timestamp,
|
||||
"session_start": self.session_start_time.isoformat(),
|
||||
"total_actions": len(self.action_log)
|
||||
},
|
||||
"actions": self.action_log
|
||||
}
|
||||
|
||||
with open(f"results/{action_log_filename}", 'w') as f:
|
||||
json.dump(action_log_data, f, indent=2, default=str)
|
||||
logger.info(f"Action log saved to results/{action_log_filename}")
|
||||
|
||||
# 5. Create a master index file
|
||||
index_filename = f"{base_filename}_index_{timestamp}.json"
|
||||
index_data = {
|
||||
"session_info": {
|
||||
"timestamp": timestamp,
|
||||
"base_filename": base_filename,
|
||||
"total_strategies": len(valid_results),
|
||||
"session_duration_seconds": (datetime.now() - self.session_start_time).total_seconds()
|
||||
},
|
||||
"files": {
|
||||
"summary": summary_filename,
|
||||
"detailed_csv": f"{base_filename}_detailed_{timestamp}.csv",
|
||||
"action_log": action_log_filename,
|
||||
"individual_strategies": [
|
||||
f"{base_filename}_strategy_{i+1}_{result['strategy_name']}_{timestamp}.json"
|
||||
for i, result in enumerate(valid_results)
|
||||
]
|
||||
},
|
||||
"quick_stats": {
|
||||
"best_profit": summary.get("profit_ratio", {}).get("max", 0) if summary.get("profit_ratio") else 0,
|
||||
"worst_profit": summary.get("profit_ratio", {}).get("min", 0) if summary.get("profit_ratio") else 0,
|
||||
"avg_profit": summary.get("profit_ratio", {}).get("mean", 0) if summary.get("profit_ratio") else 0,
|
||||
"total_successful_runs": summary.get("successful_runs", 0),
|
||||
"total_failed_runs": summary.get("failed_runs", 0)
|
||||
}
|
||||
}
|
||||
|
||||
with open(f"results/{index_filename}", 'w') as f:
|
||||
json.dump(index_data, f, indent=2, default=str)
|
||||
logger.info(f"Master index saved to results/{index_filename}")
|
||||
|
||||
print(f"\n📊 Comprehensive results saved:")
|
||||
print(f" 📋 Summary: results/{summary_filename}")
|
||||
print(f" 📈 Detailed CSV: results/{base_filename}_detailed_{timestamp}.csv")
|
||||
print(f" 📝 Action Log: results/{action_log_filename}")
|
||||
print(f" 📁 Individual Strategies: {len(valid_results)} files")
|
||||
print(f" 🗂️ Master Index: results/{index_filename}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving comprehensive results: {e}")
|
||||
raise
|
||||
|
||||
def save_results(self, results: List[Dict[str, Any]], filename: str) -> None:
|
||||
"""
|
||||
Save backtest results to file.
|
||||
|
||||
Args:
|
||||
results: List of backtest results
|
||||
filename: Output filename
|
||||
"""
|
||||
try:
|
||||
# Convert results to DataFrame for easy saving
|
||||
df_data = []
|
||||
for result in results:
|
||||
if result.get("success", True):
|
||||
row = {
|
||||
"strategy_name": result.get("strategy_name", ""),
|
||||
"profit_ratio": result.get("profit_ratio", 0),
|
||||
"final_usd": result.get("final_usd", 0),
|
||||
"n_trades": result.get("n_trades", 0),
|
||||
"win_rate": result.get("win_rate", 0),
|
||||
"max_drawdown": result.get("max_drawdown", 0),
|
||||
"avg_trade": result.get("avg_trade", 0),
|
||||
"total_fees_usd": result.get("total_fees_usd", 0),
|
||||
"backtest_duration_seconds": result.get("backtest_duration_seconds", 0),
|
||||
"data_points_processed": result.get("data_points_processed", 0)
|
||||
}
|
||||
|
||||
# Add strategy parameters
|
||||
strategy_params = result.get("strategy_params", {})
|
||||
for key, value in strategy_params.items():
|
||||
row[f"strategy_{key}"] = value
|
||||
|
||||
# Add trader parameters
|
||||
trader_params = result.get("trader_params", {})
|
||||
for key, value in trader_params.items():
|
||||
row[f"trader_{key}"] = value
|
||||
|
||||
df_data.append(row)
|
||||
|
||||
# Save to CSV
|
||||
df = pd.DataFrame(df_data)
|
||||
self.storage.save_data(df, filename)
|
||||
|
||||
logger.info(f"Results saved to {filename}: {len(df_data)} rows")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving results to {filename}: {e}")
|
||||
raise
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""String representation of the backtester."""
|
||||
return (f"IncBacktester(data_file={self.config.data_file}, "
|
||||
f"date_range={self.config.start_date} to {self.config.end_date}, "
|
||||
f"initial_usd=${self.config.initial_usd})")
|
||||
@ -1,344 +0,0 @@
|
||||
"""
|
||||
Incremental Trader for backtesting incremental strategies.
|
||||
|
||||
This module provides the IncTrader class that manages a single incremental strategy
|
||||
during backtesting, handling position state, trade execution, and performance tracking.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from typing import Dict, Optional, List, Any
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
|
||||
from .base import IncStrategyBase, IncStrategySignal
|
||||
from ..market_fees import MarketFees
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TradeRecord:
|
||||
"""Record of a completed trade."""
|
||||
entry_time: pd.Timestamp
|
||||
exit_time: pd.Timestamp
|
||||
entry_price: float
|
||||
exit_price: float
|
||||
entry_fee: float
|
||||
exit_fee: float
|
||||
profit_pct: float
|
||||
exit_reason: str
|
||||
strategy_name: str
|
||||
|
||||
|
||||
class IncTrader:
|
||||
"""
|
||||
Incremental trader that manages a single strategy during backtesting.
|
||||
|
||||
This class handles:
|
||||
- Strategy initialization and data feeding
|
||||
- Position management (USD/coin balance)
|
||||
- Trade execution based on strategy signals
|
||||
- Performance tracking and metrics collection
|
||||
- Fee calculation and trade logging
|
||||
|
||||
The trader processes data points sequentially, feeding them to the strategy
|
||||
and executing trades based on the generated signals.
|
||||
|
||||
Example:
|
||||
strategy = IncRandomStrategy(params={"timeframe": "15min"})
|
||||
trader = IncTrader(
|
||||
strategy=strategy,
|
||||
initial_usd=10000,
|
||||
params={"stop_loss_pct": 0.02}
|
||||
)
|
||||
|
||||
# Process data sequentially
|
||||
for timestamp, ohlcv_data in data_stream:
|
||||
trader.process_data_point(timestamp, ohlcv_data)
|
||||
|
||||
# Get results
|
||||
results = trader.get_results()
|
||||
"""
|
||||
|
||||
def __init__(self, strategy: IncStrategyBase, initial_usd: float = 10000,
|
||||
params: Optional[Dict] = None):
|
||||
"""
|
||||
Initialize the incremental trader.
|
||||
|
||||
Args:
|
||||
strategy: Incremental strategy instance
|
||||
initial_usd: Initial USD balance
|
||||
params: Trader parameters (stop_loss_pct, take_profit_pct, etc.)
|
||||
"""
|
||||
self.strategy = strategy
|
||||
self.initial_usd = initial_usd
|
||||
self.params = params or {}
|
||||
|
||||
# Position state
|
||||
self.usd = initial_usd
|
||||
self.coin = 0.0
|
||||
self.position = 0 # 0 = no position, 1 = long position
|
||||
self.entry_price = 0.0
|
||||
self.entry_time = None
|
||||
|
||||
# Performance tracking
|
||||
self.max_balance = initial_usd
|
||||
self.drawdowns = []
|
||||
self.trade_records = []
|
||||
self.current_timestamp = None
|
||||
self.current_price = None
|
||||
|
||||
# Strategy state
|
||||
self.data_points_processed = 0
|
||||
self.warmup_complete = False
|
||||
|
||||
# Parameters
|
||||
self.stop_loss_pct = self.params.get("stop_loss_pct", 0.0)
|
||||
self.take_profit_pct = self.params.get("take_profit_pct", 0.0)
|
||||
|
||||
logger.info(f"IncTrader initialized: strategy={strategy.name}, "
|
||||
f"initial_usd=${initial_usd}, stop_loss={self.stop_loss_pct*100:.1f}%")
|
||||
|
||||
def process_data_point(self, timestamp: pd.Timestamp, ohlcv_data: Dict[str, float]) -> None:
|
||||
"""
|
||||
Process a single data point through the strategy and handle trading logic.
|
||||
|
||||
Args:
|
||||
timestamp: Data point timestamp
|
||||
ohlcv_data: OHLCV data dictionary with keys: open, high, low, close, volume
|
||||
"""
|
||||
self.current_timestamp = timestamp
|
||||
self.current_price = ohlcv_data['close']
|
||||
self.data_points_processed += 1
|
||||
|
||||
try:
|
||||
# Feed data to strategy (handles timeframe aggregation internally)
|
||||
result = self.strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
|
||||
# Check if strategy is warmed up
|
||||
if not self.warmup_complete and self.strategy.is_warmed_up:
|
||||
self.warmup_complete = True
|
||||
logger.info(f"Strategy {self.strategy.name} warmed up after "
|
||||
f"{self.data_points_processed} data points")
|
||||
|
||||
# Only process signals if strategy is warmed up and we have a complete timeframe bar
|
||||
if self.warmup_complete and result is not None:
|
||||
self._process_trading_logic()
|
||||
|
||||
# Update performance tracking
|
||||
self._update_performance_metrics()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing data point at {timestamp}: {e}")
|
||||
raise
|
||||
|
||||
def _process_trading_logic(self) -> None:
|
||||
"""Process trading logic based on current position and strategy signals."""
|
||||
if self.position == 0:
|
||||
# No position - check for entry signals
|
||||
self._check_entry_signals()
|
||||
else:
|
||||
# In position - check for exit signals
|
||||
self._check_exit_signals()
|
||||
|
||||
def _check_entry_signals(self) -> None:
|
||||
"""Check for entry signals when not in position."""
|
||||
try:
|
||||
entry_signal = self.strategy.get_entry_signal()
|
||||
|
||||
if entry_signal.signal_type == "ENTRY" and entry_signal.confidence > 0:
|
||||
self._execute_entry(entry_signal)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking entry signals: {e}")
|
||||
|
||||
def _check_exit_signals(self) -> None:
|
||||
"""Check for exit signals when in position."""
|
||||
try:
|
||||
# Check strategy exit signals
|
||||
exit_signal = self.strategy.get_exit_signal()
|
||||
|
||||
if exit_signal.signal_type == "EXIT" and exit_signal.confidence > 0:
|
||||
exit_reason = exit_signal.metadata.get("type", "STRATEGY_EXIT")
|
||||
self._execute_exit(exit_reason, exit_signal.price)
|
||||
return
|
||||
|
||||
# Check stop loss
|
||||
if self.stop_loss_pct > 0:
|
||||
stop_loss_price = self.entry_price * (1 - self.stop_loss_pct)
|
||||
if self.current_price <= stop_loss_price:
|
||||
self._execute_exit("STOP_LOSS", self.current_price)
|
||||
return
|
||||
|
||||
# Check take profit
|
||||
if self.take_profit_pct > 0:
|
||||
take_profit_price = self.entry_price * (1 + self.take_profit_pct)
|
||||
if self.current_price >= take_profit_price:
|
||||
self._execute_exit("TAKE_PROFIT", self.current_price)
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking exit signals: {e}")
|
||||
|
||||
def _execute_entry(self, signal: IncStrategySignal) -> None:
|
||||
"""Execute entry trade."""
|
||||
entry_price = signal.price if signal.price else self.current_price
|
||||
entry_fee = MarketFees.calculate_okx_taker_maker_fee(self.usd, is_maker=False)
|
||||
usd_after_fee = self.usd - entry_fee
|
||||
|
||||
self.coin = usd_after_fee / entry_price
|
||||
self.entry_price = entry_price
|
||||
self.entry_time = self.current_timestamp
|
||||
self.usd = 0.0
|
||||
self.position = 1
|
||||
|
||||
logger.info(f"ENTRY: {self.strategy.name} at ${entry_price:.2f}, "
|
||||
f"confidence={signal.confidence:.2f}, fee=${entry_fee:.2f}")
|
||||
|
||||
def _execute_exit(self, exit_reason: str, exit_price: Optional[float] = None) -> None:
|
||||
"""Execute exit trade."""
|
||||
exit_price = exit_price if exit_price else self.current_price
|
||||
usd_gross = self.coin * exit_price
|
||||
exit_fee = MarketFees.calculate_okx_taker_maker_fee(usd_gross, is_maker=False)
|
||||
|
||||
self.usd = usd_gross - exit_fee
|
||||
|
||||
# Calculate profit
|
||||
profit_pct = (exit_price - self.entry_price) / self.entry_price
|
||||
|
||||
# Record trade
|
||||
trade_record = TradeRecord(
|
||||
entry_time=self.entry_time,
|
||||
exit_time=self.current_timestamp,
|
||||
entry_price=self.entry_price,
|
||||
exit_price=exit_price,
|
||||
entry_fee=MarketFees.calculate_okx_taker_maker_fee(
|
||||
self.coin * self.entry_price, is_maker=False
|
||||
),
|
||||
exit_fee=exit_fee,
|
||||
profit_pct=profit_pct,
|
||||
exit_reason=exit_reason,
|
||||
strategy_name=self.strategy.name
|
||||
)
|
||||
self.trade_records.append(trade_record)
|
||||
|
||||
# Reset position
|
||||
self.coin = 0.0
|
||||
self.position = 0
|
||||
self.entry_price = 0.0
|
||||
self.entry_time = None
|
||||
|
||||
logger.info(f"EXIT: {self.strategy.name} at ${exit_price:.2f}, "
|
||||
f"reason={exit_reason}, profit={profit_pct*100:.2f}%, fee=${exit_fee:.2f}")
|
||||
|
||||
def _update_performance_metrics(self) -> None:
|
||||
"""Update performance tracking metrics."""
|
||||
# Calculate current balance
|
||||
if self.position == 0:
|
||||
current_balance = self.usd
|
||||
else:
|
||||
current_balance = self.coin * self.current_price
|
||||
|
||||
# Update max balance and drawdown
|
||||
if current_balance > self.max_balance:
|
||||
self.max_balance = current_balance
|
||||
|
||||
drawdown = (self.max_balance - current_balance) / self.max_balance
|
||||
self.drawdowns.append(drawdown)
|
||||
|
||||
def finalize(self) -> None:
|
||||
"""Finalize trading session (close any open positions)."""
|
||||
if self.position == 1:
|
||||
self._execute_exit("EOD", self.current_price)
|
||||
logger.info(f"Closed final position for {self.strategy.name} at EOD")
|
||||
|
||||
def get_results(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get comprehensive trading results.
|
||||
|
||||
Returns:
|
||||
Dict containing performance metrics, trade records, and statistics
|
||||
"""
|
||||
final_balance = self.usd
|
||||
n_trades = len(self.trade_records)
|
||||
|
||||
# Calculate statistics
|
||||
if n_trades > 0:
|
||||
profits = [trade.profit_pct for trade in self.trade_records]
|
||||
wins = [p for p in profits if p > 0]
|
||||
win_rate = len(wins) / n_trades
|
||||
avg_trade = np.mean(profits)
|
||||
total_fees = sum(trade.entry_fee + trade.exit_fee for trade in self.trade_records)
|
||||
else:
|
||||
win_rate = 0.0
|
||||
avg_trade = 0.0
|
||||
total_fees = 0.0
|
||||
|
||||
max_drawdown = max(self.drawdowns) if self.drawdowns else 0.0
|
||||
profit_ratio = (final_balance - self.initial_usd) / self.initial_usd
|
||||
|
||||
# Convert trade records to dictionaries
|
||||
trades = []
|
||||
for trade in self.trade_records:
|
||||
trades.append({
|
||||
'entry_time': trade.entry_time,
|
||||
'exit_time': trade.exit_time,
|
||||
'entry': trade.entry_price,
|
||||
'exit': trade.exit_price,
|
||||
'profit_pct': trade.profit_pct,
|
||||
'type': trade.exit_reason,
|
||||
'fee_usd': trade.entry_fee + trade.exit_fee,
|
||||
'strategy': trade.strategy_name
|
||||
})
|
||||
|
||||
results = {
|
||||
"strategy_name": self.strategy.name,
|
||||
"strategy_params": self.strategy.params,
|
||||
"trader_params": self.params,
|
||||
"initial_usd": self.initial_usd,
|
||||
"final_usd": final_balance,
|
||||
"profit_ratio": profit_ratio,
|
||||
"n_trades": n_trades,
|
||||
"win_rate": win_rate,
|
||||
"max_drawdown": max_drawdown,
|
||||
"avg_trade": avg_trade,
|
||||
"total_fees_usd": total_fees,
|
||||
"data_points_processed": self.data_points_processed,
|
||||
"warmup_complete": self.warmup_complete,
|
||||
"trades": trades
|
||||
}
|
||||
|
||||
# Add first and last trade info if available
|
||||
if n_trades > 0:
|
||||
results["first_trade"] = {
|
||||
"entry_time": self.trade_records[0].entry_time,
|
||||
"entry": self.trade_records[0].entry_price
|
||||
}
|
||||
results["last_trade"] = {
|
||||
"exit_time": self.trade_records[-1].exit_time,
|
||||
"exit": self.trade_records[-1].exit_price
|
||||
}
|
||||
|
||||
return results
|
||||
|
||||
def get_current_state(self) -> Dict[str, Any]:
|
||||
"""Get current trader state for debugging."""
|
||||
return {
|
||||
"strategy": self.strategy.name,
|
||||
"position": self.position,
|
||||
"usd": self.usd,
|
||||
"coin": self.coin,
|
||||
"current_price": self.current_price,
|
||||
"entry_price": self.entry_price,
|
||||
"data_points_processed": self.data_points_processed,
|
||||
"warmup_complete": self.warmup_complete,
|
||||
"n_trades": len(self.trade_records),
|
||||
"strategy_state": self.strategy.get_current_state_summary()
|
||||
}
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""String representation of the trader."""
|
||||
return (f"IncTrader(strategy={self.strategy.name}, "
|
||||
f"position={self.position}, usd=${self.usd:.2f}, "
|
||||
f"trades={len(self.trade_records)})")
|
||||
@ -1,44 +0,0 @@
|
||||
"""
|
||||
Incremental Indicator States Module
|
||||
|
||||
This module contains indicator state classes that maintain calculation state
|
||||
for incremental processing of technical indicators.
|
||||
|
||||
All indicator states implement the IndicatorState interface and provide:
|
||||
- Incremental updates with new data points
|
||||
- Constant memory usage regardless of data history
|
||||
- Identical results to traditional batch calculations
|
||||
- Warm-up detection for reliable indicator values
|
||||
|
||||
Classes:
|
||||
IndicatorState: Abstract base class for all indicator states
|
||||
MovingAverageState: Incremental moving average calculation
|
||||
ExponentialMovingAverageState: Incremental exponential moving average calculation
|
||||
RSIState: Incremental RSI calculation
|
||||
SimpleRSIState: Incremental simple RSI calculation
|
||||
ATRState: Incremental Average True Range calculation
|
||||
SimpleATRState: Incremental simple ATR calculation
|
||||
SupertrendState: Incremental Supertrend calculation
|
||||
BollingerBandsState: Incremental Bollinger Bands calculation
|
||||
BollingerBandsOHLCState: Incremental Bollinger Bands OHLC calculation
|
||||
"""
|
||||
|
||||
from .base import IndicatorState
|
||||
from .moving_average import MovingAverageState, ExponentialMovingAverageState
|
||||
from .rsi import RSIState, SimpleRSIState
|
||||
from .atr import ATRState, SimpleATRState
|
||||
from .supertrend import SupertrendState
|
||||
from .bollinger_bands import BollingerBandsState, BollingerBandsOHLCState
|
||||
|
||||
__all__ = [
|
||||
'IndicatorState',
|
||||
'MovingAverageState',
|
||||
'ExponentialMovingAverageState',
|
||||
'RSIState',
|
||||
'SimpleRSIState',
|
||||
'ATRState',
|
||||
'SimpleATRState',
|
||||
'SupertrendState',
|
||||
'BollingerBandsState',
|
||||
'BollingerBandsOHLCState'
|
||||
]
|
||||
@ -1,242 +0,0 @@
|
||||
"""
|
||||
Average True Range (ATR) Indicator State
|
||||
|
||||
This module implements incremental ATR calculation that maintains constant memory usage
|
||||
and provides identical results to traditional batch calculations. ATR is used by
|
||||
Supertrend and other volatility-based indicators.
|
||||
"""
|
||||
|
||||
from typing import Dict, Union, Optional
|
||||
from .base import OHLCIndicatorState
|
||||
from .moving_average import ExponentialMovingAverageState
|
||||
|
||||
|
||||
class ATRState(OHLCIndicatorState):
|
||||
"""
|
||||
Incremental Average True Range calculation state.
|
||||
|
||||
ATR measures market volatility by calculating the average of true ranges over
|
||||
a specified period. True Range is the maximum of:
|
||||
1. Current High - Current Low
|
||||
2. |Current High - Previous Close|
|
||||
3. |Current Low - Previous Close|
|
||||
|
||||
This implementation uses exponential moving average for smoothing, which is
|
||||
more responsive than simple moving average and requires less memory.
|
||||
|
||||
Attributes:
|
||||
period (int): The ATR period
|
||||
ema_state (ExponentialMovingAverageState): EMA state for smoothing true ranges
|
||||
previous_close (float): Previous period's close price
|
||||
|
||||
Example:
|
||||
atr = ATRState(period=14)
|
||||
|
||||
# Add OHLC data incrementally
|
||||
ohlc = {'open': 100, 'high': 105, 'low': 98, 'close': 103}
|
||||
atr_value = atr.update(ohlc) # Returns current ATR value
|
||||
|
||||
# Check if warmed up
|
||||
if atr.is_warmed_up():
|
||||
current_atr = atr.get_current_value()
|
||||
"""
|
||||
|
||||
def __init__(self, period: int = 14):
|
||||
"""
|
||||
Initialize ATR state.
|
||||
|
||||
Args:
|
||||
period: Number of periods for ATR calculation (default: 14)
|
||||
|
||||
Raises:
|
||||
ValueError: If period is not a positive integer
|
||||
"""
|
||||
super().__init__(period)
|
||||
self.ema_state = ExponentialMovingAverageState(period)
|
||||
self.previous_close = None
|
||||
self.is_initialized = True
|
||||
|
||||
def update(self, ohlc_data: Dict[str, float]) -> float:
|
||||
"""
|
||||
Update ATR with new OHLC data.
|
||||
|
||||
Args:
|
||||
ohlc_data: Dictionary with 'open', 'high', 'low', 'close' keys
|
||||
|
||||
Returns:
|
||||
Current ATR value
|
||||
|
||||
Raises:
|
||||
ValueError: If OHLC data is invalid
|
||||
TypeError: If ohlc_data is not a dictionary
|
||||
"""
|
||||
# Validate input
|
||||
if not isinstance(ohlc_data, dict):
|
||||
raise TypeError(f"ohlc_data must be a dictionary, got {type(ohlc_data)}")
|
||||
|
||||
self.validate_input(ohlc_data)
|
||||
|
||||
high = float(ohlc_data['high'])
|
||||
low = float(ohlc_data['low'])
|
||||
close = float(ohlc_data['close'])
|
||||
|
||||
# Calculate True Range
|
||||
if self.previous_close is None:
|
||||
# First period - True Range is just High - Low
|
||||
true_range = high - low
|
||||
else:
|
||||
# True Range is the maximum of:
|
||||
# 1. Current High - Current Low
|
||||
# 2. |Current High - Previous Close|
|
||||
# 3. |Current Low - Previous Close|
|
||||
tr1 = high - low
|
||||
tr2 = abs(high - self.previous_close)
|
||||
tr3 = abs(low - self.previous_close)
|
||||
true_range = max(tr1, tr2, tr3)
|
||||
|
||||
# Update EMA with the true range
|
||||
atr_value = self.ema_state.update(true_range)
|
||||
|
||||
# Store current close as previous close for next calculation
|
||||
self.previous_close = close
|
||||
self.values_received += 1
|
||||
|
||||
# Store current ATR value
|
||||
self._current_values = {'atr': atr_value}
|
||||
|
||||
return atr_value
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""
|
||||
Check if ATR has enough data for reliable values.
|
||||
|
||||
Returns:
|
||||
True if EMA state is warmed up (has enough true range values)
|
||||
"""
|
||||
return self.ema_state.is_warmed_up()
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset ATR state to initial conditions."""
|
||||
self.ema_state.reset()
|
||||
self.previous_close = None
|
||||
self.values_received = 0
|
||||
self._current_values = {}
|
||||
|
||||
def get_current_value(self) -> Optional[float]:
|
||||
"""
|
||||
Get current ATR value without updating.
|
||||
|
||||
Returns:
|
||||
Current ATR value, or None if not warmed up
|
||||
"""
|
||||
if not self.is_warmed_up():
|
||||
return None
|
||||
return self.ema_state.get_current_value()
|
||||
|
||||
def get_state_summary(self) -> dict:
|
||||
"""Get detailed state summary for debugging."""
|
||||
base_summary = super().get_state_summary()
|
||||
base_summary.update({
|
||||
'previous_close': self.previous_close,
|
||||
'ema_state': self.ema_state.get_state_summary(),
|
||||
'current_atr': self.get_current_value()
|
||||
})
|
||||
return base_summary
|
||||
|
||||
|
||||
class SimpleATRState(OHLCIndicatorState):
|
||||
"""
|
||||
Simple ATR implementation using simple moving average instead of EMA.
|
||||
|
||||
This version uses a simple moving average for smoothing true ranges,
|
||||
which matches some traditional ATR implementations but requires more memory.
|
||||
"""
|
||||
|
||||
def __init__(self, period: int = 14):
|
||||
"""
|
||||
Initialize simple ATR state.
|
||||
|
||||
Args:
|
||||
period: Number of periods for ATR calculation (default: 14)
|
||||
"""
|
||||
super().__init__(period)
|
||||
from collections import deque
|
||||
self.true_ranges = deque(maxlen=period)
|
||||
self.tr_sum = 0.0
|
||||
self.previous_close = None
|
||||
self.is_initialized = True
|
||||
|
||||
def update(self, ohlc_data: Dict[str, float]) -> float:
|
||||
"""
|
||||
Update simple ATR with new OHLC data.
|
||||
|
||||
Args:
|
||||
ohlc_data: Dictionary with 'open', 'high', 'low', 'close' keys
|
||||
|
||||
Returns:
|
||||
Current ATR value
|
||||
"""
|
||||
# Validate input
|
||||
if not isinstance(ohlc_data, dict):
|
||||
raise TypeError(f"ohlc_data must be a dictionary, got {type(ohlc_data)}")
|
||||
|
||||
self.validate_input(ohlc_data)
|
||||
|
||||
high = float(ohlc_data['high'])
|
||||
low = float(ohlc_data['low'])
|
||||
close = float(ohlc_data['close'])
|
||||
|
||||
# Calculate True Range
|
||||
if self.previous_close is None:
|
||||
true_range = high - low
|
||||
else:
|
||||
tr1 = high - low
|
||||
tr2 = abs(high - self.previous_close)
|
||||
tr3 = abs(low - self.previous_close)
|
||||
true_range = max(tr1, tr2, tr3)
|
||||
|
||||
# Update rolling sum
|
||||
if len(self.true_ranges) == self.period:
|
||||
self.tr_sum -= self.true_ranges[0] # Remove oldest value
|
||||
|
||||
self.true_ranges.append(true_range)
|
||||
self.tr_sum += true_range
|
||||
|
||||
# Calculate ATR as simple moving average
|
||||
atr_value = self.tr_sum / len(self.true_ranges)
|
||||
|
||||
# Store state
|
||||
self.previous_close = close
|
||||
self.values_received += 1
|
||||
self._current_values = {'atr': atr_value}
|
||||
|
||||
return atr_value
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""Check if simple ATR is warmed up."""
|
||||
return len(self.true_ranges) >= self.period
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset simple ATR state."""
|
||||
self.true_ranges.clear()
|
||||
self.tr_sum = 0.0
|
||||
self.previous_close = None
|
||||
self.values_received = 0
|
||||
self._current_values = {}
|
||||
|
||||
def get_current_value(self) -> Optional[float]:
|
||||
"""Get current simple ATR value."""
|
||||
if not self.is_warmed_up():
|
||||
return None
|
||||
return self.tr_sum / len(self.true_ranges)
|
||||
|
||||
def get_state_summary(self) -> dict:
|
||||
"""Get detailed state summary for debugging."""
|
||||
base_summary = super().get_state_summary()
|
||||
base_summary.update({
|
||||
'previous_close': self.previous_close,
|
||||
'tr_window_size': len(self.true_ranges),
|
||||
'tr_sum': self.tr_sum,
|
||||
'current_atr': self.get_current_value()
|
||||
})
|
||||
return base_summary
|
||||
@ -1,197 +0,0 @@
|
||||
"""
|
||||
Base Indicator State Class
|
||||
|
||||
This module contains the abstract base class for all incremental indicator states.
|
||||
All indicator implementations must inherit from IndicatorState and implement
|
||||
the required methods for incremental calculation.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, Optional, Union
|
||||
import numpy as np
|
||||
|
||||
|
||||
class IndicatorState(ABC):
|
||||
"""
|
||||
Abstract base class for maintaining indicator calculation state.
|
||||
|
||||
This class defines the interface that all incremental indicators must implement.
|
||||
Indicators maintain their internal state and can be updated incrementally with
|
||||
new data points, providing constant memory usage and high performance.
|
||||
|
||||
Attributes:
|
||||
period (int): The period/window size for the indicator
|
||||
values_received (int): Number of values processed so far
|
||||
is_initialized (bool): Whether the indicator has been initialized
|
||||
|
||||
Example:
|
||||
class MyIndicator(IndicatorState):
|
||||
def __init__(self, period: int):
|
||||
super().__init__(period)
|
||||
self._sum = 0.0
|
||||
|
||||
def update(self, new_value: float) -> float:
|
||||
self._sum += new_value
|
||||
self.values_received += 1
|
||||
return self._sum / min(self.values_received, self.period)
|
||||
"""
|
||||
|
||||
def __init__(self, period: int):
|
||||
"""
|
||||
Initialize the indicator state.
|
||||
|
||||
Args:
|
||||
period: The period/window size for the indicator calculation
|
||||
|
||||
Raises:
|
||||
ValueError: If period is not a positive integer
|
||||
"""
|
||||
if not isinstance(period, int) or period <= 0:
|
||||
raise ValueError(f"Period must be a positive integer, got {period}")
|
||||
|
||||
self.period = period
|
||||
self.values_received = 0
|
||||
self.is_initialized = False
|
||||
|
||||
@abstractmethod
|
||||
def update(self, new_value: Union[float, Dict[str, float]]) -> Union[float, Dict[str, float]]:
|
||||
"""
|
||||
Update indicator with new value and return current indicator value.
|
||||
|
||||
This method processes a new data point and updates the internal state
|
||||
of the indicator. It returns the current indicator value after the update.
|
||||
|
||||
Args:
|
||||
new_value: New data point (can be single value or OHLCV dict)
|
||||
|
||||
Returns:
|
||||
Current indicator value after update (single value or dict)
|
||||
|
||||
Raises:
|
||||
ValueError: If new_value is invalid or incompatible
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""
|
||||
Check whether indicator has enough data for reliable values.
|
||||
|
||||
Returns:
|
||||
True if indicator has received enough data points for reliable calculation
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def reset(self) -> None:
|
||||
"""
|
||||
Reset indicator state to initial conditions.
|
||||
|
||||
This method clears all internal state and resets the indicator
|
||||
as if it was just initialized.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_current_value(self) -> Union[float, Dict[str, float], None]:
|
||||
"""
|
||||
Get the current indicator value without updating.
|
||||
|
||||
Returns:
|
||||
Current indicator value, or None if not warmed up
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_state_summary(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get summary of current indicator state for debugging.
|
||||
|
||||
Returns:
|
||||
Dictionary containing indicator state information
|
||||
"""
|
||||
return {
|
||||
'indicator_type': self.__class__.__name__,
|
||||
'period': self.period,
|
||||
'values_received': self.values_received,
|
||||
'is_warmed_up': self.is_warmed_up(),
|
||||
'is_initialized': self.is_initialized,
|
||||
'current_value': self.get_current_value()
|
||||
}
|
||||
|
||||
def validate_input(self, value: Union[float, Dict[str, float]]) -> None:
|
||||
"""
|
||||
Validate input value for the indicator.
|
||||
|
||||
Args:
|
||||
value: Input value to validate
|
||||
|
||||
Raises:
|
||||
ValueError: If value is invalid
|
||||
TypeError: If value type is incorrect
|
||||
"""
|
||||
if isinstance(value, (int, float)):
|
||||
if not np.isfinite(value):
|
||||
raise ValueError(f"Input value must be finite, got {value}")
|
||||
elif isinstance(value, dict):
|
||||
required_keys = ['open', 'high', 'low', 'close']
|
||||
for key in required_keys:
|
||||
if key not in value:
|
||||
raise ValueError(f"OHLCV dict missing required key: {key}")
|
||||
if not np.isfinite(value[key]):
|
||||
raise ValueError(f"OHLCV value for {key} must be finite, got {value[key]}")
|
||||
# Validate OHLC relationships
|
||||
if not (value['low'] <= value['open'] <= value['high'] and
|
||||
value['low'] <= value['close'] <= value['high']):
|
||||
raise ValueError(f"Invalid OHLC relationships: {value}")
|
||||
else:
|
||||
raise TypeError(f"Input value must be float or OHLCV dict, got {type(value)}")
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""String representation of the indicator state."""
|
||||
return (f"{self.__class__.__name__}(period={self.period}, "
|
||||
f"values_received={self.values_received}, "
|
||||
f"warmed_up={self.is_warmed_up()})")
|
||||
|
||||
|
||||
class SimpleIndicatorState(IndicatorState):
|
||||
"""
|
||||
Base class for simple single-value indicators.
|
||||
|
||||
This class provides common functionality for indicators that work with
|
||||
single float values and maintain a simple rolling calculation.
|
||||
"""
|
||||
|
||||
def __init__(self, period: int):
|
||||
"""Initialize simple indicator state."""
|
||||
super().__init__(period)
|
||||
self._current_value = None
|
||||
|
||||
def get_current_value(self) -> Optional[float]:
|
||||
"""Get current indicator value."""
|
||||
return self._current_value if self.is_warmed_up() else None
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""Check if indicator is warmed up."""
|
||||
return self.values_received >= self.period
|
||||
|
||||
|
||||
class OHLCIndicatorState(IndicatorState):
|
||||
"""
|
||||
Base class for OHLC-based indicators.
|
||||
|
||||
This class provides common functionality for indicators that work with
|
||||
OHLC data (Open, High, Low, Close) and may return multiple values.
|
||||
"""
|
||||
|
||||
def __init__(self, period: int):
|
||||
"""Initialize OHLC indicator state."""
|
||||
super().__init__(period)
|
||||
self._current_values = {}
|
||||
|
||||
def get_current_value(self) -> Optional[Dict[str, float]]:
|
||||
"""Get current indicator values."""
|
||||
return self._current_values.copy() if self.is_warmed_up() else None
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""Check if indicator is warmed up."""
|
||||
return self.values_received >= self.period
|
||||
@ -1,325 +0,0 @@
|
||||
"""
|
||||
Bollinger Bands Indicator State
|
||||
|
||||
This module implements incremental Bollinger Bands calculation that maintains constant memory usage
|
||||
and provides identical results to traditional batch calculations. Used by the BBRSStrategy.
|
||||
"""
|
||||
|
||||
from typing import Dict, Union, Optional
|
||||
from collections import deque
|
||||
import math
|
||||
from .base import OHLCIndicatorState
|
||||
from .moving_average import MovingAverageState
|
||||
|
||||
|
||||
class BollingerBandsState(OHLCIndicatorState):
|
||||
"""
|
||||
Incremental Bollinger Bands calculation state.
|
||||
|
||||
Bollinger Bands consist of:
|
||||
- Middle Band: Simple Moving Average of close prices
|
||||
- Upper Band: Middle Band + (Standard Deviation * multiplier)
|
||||
- Lower Band: Middle Band - (Standard Deviation * multiplier)
|
||||
|
||||
This implementation maintains a rolling window for standard deviation calculation
|
||||
while using the MovingAverageState for the middle band.
|
||||
|
||||
Attributes:
|
||||
period (int): Period for moving average and standard deviation
|
||||
std_dev_multiplier (float): Multiplier for standard deviation
|
||||
ma_state (MovingAverageState): Moving average state for middle band
|
||||
close_values (deque): Rolling window of close prices for std dev calculation
|
||||
close_sum_sq (float): Sum of squared close values for variance calculation
|
||||
|
||||
Example:
|
||||
bb = BollingerBandsState(period=20, std_dev_multiplier=2.0)
|
||||
|
||||
# Add price data incrementally
|
||||
result = bb.update(103.5) # Close price
|
||||
upper_band = result['upper_band']
|
||||
middle_band = result['middle_band']
|
||||
lower_band = result['lower_band']
|
||||
bandwidth = result['bandwidth']
|
||||
"""
|
||||
|
||||
def __init__(self, period: int = 20, std_dev_multiplier: float = 2.0):
|
||||
"""
|
||||
Initialize Bollinger Bands state.
|
||||
|
||||
Args:
|
||||
period: Period for moving average and standard deviation (default: 20)
|
||||
std_dev_multiplier: Multiplier for standard deviation (default: 2.0)
|
||||
|
||||
Raises:
|
||||
ValueError: If period is not positive or multiplier is not positive
|
||||
"""
|
||||
super().__init__(period)
|
||||
|
||||
if std_dev_multiplier <= 0:
|
||||
raise ValueError(f"Standard deviation multiplier must be positive, got {std_dev_multiplier}")
|
||||
|
||||
self.std_dev_multiplier = std_dev_multiplier
|
||||
self.ma_state = MovingAverageState(period)
|
||||
|
||||
# For incremental standard deviation calculation
|
||||
self.close_values = deque(maxlen=period)
|
||||
self.close_sum_sq = 0.0 # Sum of squared values
|
||||
|
||||
self.is_initialized = True
|
||||
|
||||
def update(self, close_price: Union[float, int]) -> Dict[str, float]:
|
||||
"""
|
||||
Update Bollinger Bands with new close price.
|
||||
|
||||
Args:
|
||||
close_price: New closing price
|
||||
|
||||
Returns:
|
||||
Dictionary with 'upper_band', 'middle_band', 'lower_band', 'bandwidth', 'std_dev'
|
||||
|
||||
Raises:
|
||||
ValueError: If close_price is not finite
|
||||
TypeError: If close_price is not numeric
|
||||
"""
|
||||
# Validate input
|
||||
if not isinstance(close_price, (int, float)):
|
||||
raise TypeError(f"close_price must be numeric, got {type(close_price)}")
|
||||
|
||||
self.validate_input(close_price)
|
||||
|
||||
close_price = float(close_price)
|
||||
|
||||
# Update moving average (middle band)
|
||||
middle_band = self.ma_state.update(close_price)
|
||||
|
||||
# Update rolling window for standard deviation
|
||||
if len(self.close_values) == self.period:
|
||||
# Remove oldest value from sum of squares
|
||||
old_value = self.close_values[0]
|
||||
self.close_sum_sq -= old_value * old_value
|
||||
|
||||
# Add new value
|
||||
self.close_values.append(close_price)
|
||||
self.close_sum_sq += close_price * close_price
|
||||
|
||||
# Calculate standard deviation
|
||||
n = len(self.close_values)
|
||||
if n < 2:
|
||||
# Not enough data for standard deviation
|
||||
std_dev = 0.0
|
||||
else:
|
||||
# Incremental variance calculation: Var = (sum_sq - n*mean^2) / (n-1)
|
||||
mean = middle_band
|
||||
variance = (self.close_sum_sq - n * mean * mean) / (n - 1)
|
||||
std_dev = math.sqrt(max(variance, 0.0)) # Ensure non-negative
|
||||
|
||||
# Calculate bands
|
||||
upper_band = middle_band + (self.std_dev_multiplier * std_dev)
|
||||
lower_band = middle_band - (self.std_dev_multiplier * std_dev)
|
||||
|
||||
# Calculate bandwidth (normalized band width)
|
||||
if middle_band != 0:
|
||||
bandwidth = (upper_band - lower_band) / middle_band
|
||||
else:
|
||||
bandwidth = 0.0
|
||||
|
||||
self.values_received += 1
|
||||
|
||||
# Store current values
|
||||
result = {
|
||||
'upper_band': upper_band,
|
||||
'middle_band': middle_band,
|
||||
'lower_band': lower_band,
|
||||
'bandwidth': bandwidth,
|
||||
'std_dev': std_dev
|
||||
}
|
||||
|
||||
self._current_values = result
|
||||
return result
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""
|
||||
Check if Bollinger Bands has enough data for reliable values.
|
||||
|
||||
Returns:
|
||||
True if we have at least 'period' number of values
|
||||
"""
|
||||
return self.ma_state.is_warmed_up()
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset Bollinger Bands state to initial conditions."""
|
||||
self.ma_state.reset()
|
||||
self.close_values.clear()
|
||||
self.close_sum_sq = 0.0
|
||||
self.values_received = 0
|
||||
self._current_values = {}
|
||||
|
||||
def get_current_value(self) -> Optional[Dict[str, float]]:
|
||||
"""
|
||||
Get current Bollinger Bands values without updating.
|
||||
|
||||
Returns:
|
||||
Dictionary with current BB values, or None if not warmed up
|
||||
"""
|
||||
if not self.is_warmed_up():
|
||||
return None
|
||||
return self._current_values.copy() if self._current_values else None
|
||||
|
||||
def get_squeeze_status(self, squeeze_threshold: float = 0.05) -> bool:
|
||||
"""
|
||||
Check if Bollinger Bands are in a squeeze condition.
|
||||
|
||||
Args:
|
||||
squeeze_threshold: Bandwidth threshold for squeeze detection
|
||||
|
||||
Returns:
|
||||
True if bandwidth is below threshold (squeeze condition)
|
||||
"""
|
||||
if not self.is_warmed_up() or not self._current_values:
|
||||
return False
|
||||
|
||||
bandwidth = self._current_values.get('bandwidth', float('inf'))
|
||||
return bandwidth < squeeze_threshold
|
||||
|
||||
def get_position_relative_to_bands(self, current_price: float) -> str:
|
||||
"""
|
||||
Get current price position relative to Bollinger Bands.
|
||||
|
||||
Args:
|
||||
current_price: Current price to evaluate
|
||||
|
||||
Returns:
|
||||
'above_upper', 'between_bands', 'below_lower', or 'unknown'
|
||||
"""
|
||||
if not self.is_warmed_up() or not self._current_values:
|
||||
return 'unknown'
|
||||
|
||||
upper_band = self._current_values['upper_band']
|
||||
lower_band = self._current_values['lower_band']
|
||||
|
||||
if current_price > upper_band:
|
||||
return 'above_upper'
|
||||
elif current_price < lower_band:
|
||||
return 'below_lower'
|
||||
else:
|
||||
return 'between_bands'
|
||||
|
||||
def get_state_summary(self) -> dict:
|
||||
"""Get detailed state summary for debugging."""
|
||||
base_summary = super().get_state_summary()
|
||||
base_summary.update({
|
||||
'std_dev_multiplier': self.std_dev_multiplier,
|
||||
'close_values_count': len(self.close_values),
|
||||
'close_sum_sq': self.close_sum_sq,
|
||||
'ma_state': self.ma_state.get_state_summary(),
|
||||
'current_squeeze': self.get_squeeze_status() if self.is_warmed_up() else None
|
||||
})
|
||||
return base_summary
|
||||
|
||||
|
||||
class BollingerBandsOHLCState(OHLCIndicatorState):
|
||||
"""
|
||||
Bollinger Bands implementation that works with OHLC data.
|
||||
|
||||
This version can calculate Bollinger Bands based on different price types
|
||||
(close, typical price, etc.) and provides additional OHLC-based analysis.
|
||||
"""
|
||||
|
||||
def __init__(self, period: int = 20, std_dev_multiplier: float = 2.0, price_type: str = 'close'):
|
||||
"""
|
||||
Initialize OHLC Bollinger Bands state.
|
||||
|
||||
Args:
|
||||
period: Period for calculation
|
||||
std_dev_multiplier: Standard deviation multiplier
|
||||
price_type: Price type to use ('close', 'typical', 'median', 'weighted')
|
||||
"""
|
||||
super().__init__(period)
|
||||
|
||||
if price_type not in ['close', 'typical', 'median', 'weighted']:
|
||||
raise ValueError(f"Invalid price_type: {price_type}")
|
||||
|
||||
self.std_dev_multiplier = std_dev_multiplier
|
||||
self.price_type = price_type
|
||||
self.bb_state = BollingerBandsState(period, std_dev_multiplier)
|
||||
self.is_initialized = True
|
||||
|
||||
def _extract_price(self, ohlc_data: Dict[str, float]) -> float:
|
||||
"""Extract price based on price_type setting."""
|
||||
if self.price_type == 'close':
|
||||
return ohlc_data['close']
|
||||
elif self.price_type == 'typical':
|
||||
return (ohlc_data['high'] + ohlc_data['low'] + ohlc_data['close']) / 3.0
|
||||
elif self.price_type == 'median':
|
||||
return (ohlc_data['high'] + ohlc_data['low']) / 2.0
|
||||
elif self.price_type == 'weighted':
|
||||
return (ohlc_data['high'] + ohlc_data['low'] + 2 * ohlc_data['close']) / 4.0
|
||||
else:
|
||||
return ohlc_data['close']
|
||||
|
||||
def update(self, ohlc_data: Dict[str, float]) -> Dict[str, float]:
|
||||
"""
|
||||
Update Bollinger Bands with OHLC data.
|
||||
|
||||
Args:
|
||||
ohlc_data: Dictionary with OHLC data
|
||||
|
||||
Returns:
|
||||
Dictionary with Bollinger Bands values plus OHLC analysis
|
||||
"""
|
||||
# Validate input
|
||||
if not isinstance(ohlc_data, dict):
|
||||
raise TypeError(f"ohlc_data must be a dictionary, got {type(ohlc_data)}")
|
||||
|
||||
self.validate_input(ohlc_data)
|
||||
|
||||
# Extract price based on type
|
||||
price = self._extract_price(ohlc_data)
|
||||
|
||||
# Update underlying BB state
|
||||
bb_result = self.bb_state.update(price)
|
||||
|
||||
# Add OHLC-specific analysis
|
||||
high = ohlc_data['high']
|
||||
low = ohlc_data['low']
|
||||
close = ohlc_data['close']
|
||||
|
||||
# Check if high/low touched bands
|
||||
upper_band = bb_result['upper_band']
|
||||
lower_band = bb_result['lower_band']
|
||||
|
||||
bb_result.update({
|
||||
'high_above_upper': high > upper_band,
|
||||
'low_below_lower': low < lower_band,
|
||||
'close_position': self.bb_state.get_position_relative_to_bands(close),
|
||||
'price_type': self.price_type,
|
||||
'extracted_price': price
|
||||
})
|
||||
|
||||
self.values_received += 1
|
||||
self._current_values = bb_result
|
||||
|
||||
return bb_result
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""Check if OHLC Bollinger Bands is warmed up."""
|
||||
return self.bb_state.is_warmed_up()
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset OHLC Bollinger Bands state."""
|
||||
self.bb_state.reset()
|
||||
self.values_received = 0
|
||||
self._current_values = {}
|
||||
|
||||
def get_current_value(self) -> Optional[Dict[str, float]]:
|
||||
"""Get current OHLC Bollinger Bands values."""
|
||||
return self.bb_state.get_current_value()
|
||||
|
||||
def get_state_summary(self) -> dict:
|
||||
"""Get detailed state summary."""
|
||||
base_summary = super().get_state_summary()
|
||||
base_summary.update({
|
||||
'price_type': self.price_type,
|
||||
'bb_state': self.bb_state.get_state_summary()
|
||||
})
|
||||
return base_summary
|
||||
@ -1,228 +0,0 @@
|
||||
"""
|
||||
Moving Average Indicator State
|
||||
|
||||
This module implements incremental moving average calculation that maintains
|
||||
constant memory usage and provides identical results to traditional batch calculations.
|
||||
"""
|
||||
|
||||
from collections import deque
|
||||
from typing import Union
|
||||
from .base import SimpleIndicatorState
|
||||
|
||||
|
||||
class MovingAverageState(SimpleIndicatorState):
|
||||
"""
|
||||
Incremental moving average calculation state.
|
||||
|
||||
This class maintains the state for calculating a simple moving average
|
||||
incrementally. It uses a rolling window approach with constant memory usage.
|
||||
|
||||
Attributes:
|
||||
period (int): The moving average period
|
||||
values (deque): Rolling window of values (max length = period)
|
||||
sum (float): Current sum of values in the window
|
||||
|
||||
Example:
|
||||
ma = MovingAverageState(period=20)
|
||||
|
||||
# Add values incrementally
|
||||
ma_value = ma.update(100.0) # Returns current MA value
|
||||
ma_value = ma.update(105.0) # Updates and returns new MA value
|
||||
|
||||
# Check if warmed up (has enough values)
|
||||
if ma.is_warmed_up():
|
||||
current_ma = ma.get_current_value()
|
||||
"""
|
||||
|
||||
def __init__(self, period: int):
|
||||
"""
|
||||
Initialize moving average state.
|
||||
|
||||
Args:
|
||||
period: Number of periods for the moving average
|
||||
|
||||
Raises:
|
||||
ValueError: If period is not a positive integer
|
||||
"""
|
||||
super().__init__(period)
|
||||
self.values = deque(maxlen=period)
|
||||
self.sum = 0.0
|
||||
self.is_initialized = True
|
||||
|
||||
def update(self, new_value: Union[float, int]) -> float:
|
||||
"""
|
||||
Update moving average with new value.
|
||||
|
||||
Args:
|
||||
new_value: New price/value to add to the moving average
|
||||
|
||||
Returns:
|
||||
Current moving average value
|
||||
|
||||
Raises:
|
||||
ValueError: If new_value is not finite
|
||||
TypeError: If new_value is not numeric
|
||||
"""
|
||||
# Validate input
|
||||
if not isinstance(new_value, (int, float)):
|
||||
raise TypeError(f"new_value must be numeric, got {type(new_value)}")
|
||||
|
||||
self.validate_input(new_value)
|
||||
|
||||
# If deque is at max capacity, subtract the value being removed
|
||||
if len(self.values) == self.period:
|
||||
self.sum -= self.values[0] # Will be automatically removed by deque
|
||||
|
||||
# Add new value
|
||||
self.values.append(float(new_value))
|
||||
self.sum += float(new_value)
|
||||
self.values_received += 1
|
||||
|
||||
# Calculate current moving average
|
||||
current_count = len(self.values)
|
||||
self._current_value = self.sum / current_count
|
||||
|
||||
return self._current_value
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""
|
||||
Check if moving average has enough data for reliable values.
|
||||
|
||||
Returns:
|
||||
True if we have at least 'period' number of values
|
||||
"""
|
||||
return len(self.values) >= self.period
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset moving average state to initial conditions."""
|
||||
self.values.clear()
|
||||
self.sum = 0.0
|
||||
self.values_received = 0
|
||||
self._current_value = None
|
||||
|
||||
def get_current_value(self) -> Union[float, None]:
|
||||
"""
|
||||
Get current moving average value without updating.
|
||||
|
||||
Returns:
|
||||
Current moving average value, or None if not enough data
|
||||
"""
|
||||
if len(self.values) == 0:
|
||||
return None
|
||||
return self.sum / len(self.values)
|
||||
|
||||
def get_state_summary(self) -> dict:
|
||||
"""Get detailed state summary for debugging."""
|
||||
base_summary = super().get_state_summary()
|
||||
base_summary.update({
|
||||
'window_size': len(self.values),
|
||||
'sum': self.sum,
|
||||
'values_in_window': list(self.values) if len(self.values) <= 10 else f"[{len(self.values)} values]"
|
||||
})
|
||||
return base_summary
|
||||
|
||||
|
||||
class ExponentialMovingAverageState(SimpleIndicatorState):
|
||||
"""
|
||||
Incremental exponential moving average calculation state.
|
||||
|
||||
This class maintains the state for calculating an exponential moving average (EMA)
|
||||
incrementally. EMA gives more weight to recent values and requires minimal memory.
|
||||
|
||||
Attributes:
|
||||
period (int): The EMA period (used to calculate smoothing factor)
|
||||
alpha (float): Smoothing factor (2 / (period + 1))
|
||||
ema_value (float): Current EMA value
|
||||
|
||||
Example:
|
||||
ema = ExponentialMovingAverageState(period=20)
|
||||
|
||||
# Add values incrementally
|
||||
ema_value = ema.update(100.0) # Returns current EMA value
|
||||
ema_value = ema.update(105.0) # Updates and returns new EMA value
|
||||
"""
|
||||
|
||||
def __init__(self, period: int):
|
||||
"""
|
||||
Initialize exponential moving average state.
|
||||
|
||||
Args:
|
||||
period: Number of periods for the EMA (used to calculate alpha)
|
||||
|
||||
Raises:
|
||||
ValueError: If period is not a positive integer
|
||||
"""
|
||||
super().__init__(period)
|
||||
self.alpha = 2.0 / (period + 1) # Smoothing factor
|
||||
self.ema_value = None
|
||||
self.is_initialized = True
|
||||
|
||||
def update(self, new_value: Union[float, int]) -> float:
|
||||
"""
|
||||
Update exponential moving average with new value.
|
||||
|
||||
Args:
|
||||
new_value: New price/value to add to the EMA
|
||||
|
||||
Returns:
|
||||
Current EMA value
|
||||
|
||||
Raises:
|
||||
ValueError: If new_value is not finite
|
||||
TypeError: If new_value is not numeric
|
||||
"""
|
||||
# Validate input
|
||||
if not isinstance(new_value, (int, float)):
|
||||
raise TypeError(f"new_value must be numeric, got {type(new_value)}")
|
||||
|
||||
self.validate_input(new_value)
|
||||
|
||||
new_value = float(new_value)
|
||||
|
||||
if self.ema_value is None:
|
||||
# First value - initialize EMA
|
||||
self.ema_value = new_value
|
||||
else:
|
||||
# EMA formula: EMA = alpha * new_value + (1 - alpha) * previous_EMA
|
||||
self.ema_value = self.alpha * new_value + (1 - self.alpha) * self.ema_value
|
||||
|
||||
self.values_received += 1
|
||||
self._current_value = self.ema_value
|
||||
|
||||
return self.ema_value
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""
|
||||
Check if EMA has enough data for reliable values.
|
||||
|
||||
For EMA, we consider it warmed up after receiving 'period' number of values,
|
||||
though it starts producing values immediately.
|
||||
|
||||
Returns:
|
||||
True if we have at least 'period' number of values
|
||||
"""
|
||||
return self.values_received >= self.period
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset EMA state to initial conditions."""
|
||||
self.ema_value = None
|
||||
self.values_received = 0
|
||||
self._current_value = None
|
||||
|
||||
def get_current_value(self) -> Union[float, None]:
|
||||
"""
|
||||
Get current EMA value without updating.
|
||||
|
||||
Returns:
|
||||
Current EMA value, or None if no data received
|
||||
"""
|
||||
return self.ema_value
|
||||
|
||||
def get_state_summary(self) -> dict:
|
||||
"""Get detailed state summary for debugging."""
|
||||
base_summary = super().get_state_summary()
|
||||
base_summary.update({
|
||||
'alpha': self.alpha,
|
||||
'ema_value': self.ema_value
|
||||
})
|
||||
return base_summary
|
||||
@ -1,289 +0,0 @@
|
||||
"""
|
||||
RSI (Relative Strength Index) Indicator State
|
||||
|
||||
This module implements incremental RSI calculation that maintains constant memory usage
|
||||
and provides identical results to traditional batch calculations.
|
||||
"""
|
||||
|
||||
from typing import Union, Optional
|
||||
from .base import SimpleIndicatorState
|
||||
from .moving_average import ExponentialMovingAverageState
|
||||
|
||||
|
||||
class RSIState(SimpleIndicatorState):
|
||||
"""
|
||||
Incremental RSI calculation state using Wilder's smoothing.
|
||||
|
||||
RSI measures the speed and magnitude of price changes to evaluate overbought
|
||||
or oversold conditions. It oscillates between 0 and 100.
|
||||
|
||||
RSI = 100 - (100 / (1 + RS))
|
||||
where RS = Average Gain / Average Loss over the specified period
|
||||
|
||||
This implementation uses Wilder's smoothing (alpha = 1/period) to match
|
||||
the original pandas implementation exactly.
|
||||
|
||||
Attributes:
|
||||
period (int): The RSI period (typically 14)
|
||||
alpha (float): Wilder's smoothing factor (1/period)
|
||||
avg_gain (float): Current average gain
|
||||
avg_loss (float): Current average loss
|
||||
previous_close (float): Previous period's close price
|
||||
|
||||
Example:
|
||||
rsi = RSIState(period=14)
|
||||
|
||||
# Add price data incrementally
|
||||
rsi_value = rsi.update(100.0) # Returns current RSI value
|
||||
rsi_value = rsi.update(105.0) # Updates and returns new RSI value
|
||||
|
||||
# Check if warmed up
|
||||
if rsi.is_warmed_up():
|
||||
current_rsi = rsi.get_current_value()
|
||||
"""
|
||||
|
||||
def __init__(self, period: int = 14):
|
||||
"""
|
||||
Initialize RSI state.
|
||||
|
||||
Args:
|
||||
period: Number of periods for RSI calculation (default: 14)
|
||||
|
||||
Raises:
|
||||
ValueError: If period is not a positive integer
|
||||
"""
|
||||
super().__init__(period)
|
||||
self.alpha = 1.0 / period # Wilder's smoothing factor
|
||||
self.avg_gain = None
|
||||
self.avg_loss = None
|
||||
self.previous_close = None
|
||||
self.is_initialized = True
|
||||
|
||||
def update(self, new_close: Union[float, int]) -> float:
|
||||
"""
|
||||
Update RSI with new close price using Wilder's smoothing.
|
||||
|
||||
Args:
|
||||
new_close: New closing price
|
||||
|
||||
Returns:
|
||||
Current RSI value (0-100), or NaN if not warmed up
|
||||
|
||||
Raises:
|
||||
ValueError: If new_close is not finite
|
||||
TypeError: If new_close is not numeric
|
||||
"""
|
||||
# Validate input - accept numpy types as well
|
||||
import numpy as np
|
||||
if not isinstance(new_close, (int, float, np.integer, np.floating)):
|
||||
raise TypeError(f"new_close must be numeric, got {type(new_close)}")
|
||||
|
||||
self.validate_input(float(new_close))
|
||||
|
||||
new_close = float(new_close)
|
||||
|
||||
if self.previous_close is None:
|
||||
# First value - no gain/loss to calculate
|
||||
self.previous_close = new_close
|
||||
self.values_received += 1
|
||||
# Return NaN until warmed up (matches original behavior)
|
||||
self._current_value = float('nan')
|
||||
return self._current_value
|
||||
|
||||
# Calculate price change
|
||||
price_change = new_close - self.previous_close
|
||||
|
||||
# Separate gains and losses
|
||||
gain = max(price_change, 0.0)
|
||||
loss = max(-price_change, 0.0)
|
||||
|
||||
if self.avg_gain is None:
|
||||
# Initialize with first gain/loss
|
||||
self.avg_gain = gain
|
||||
self.avg_loss = loss
|
||||
else:
|
||||
# Wilder's smoothing: avg = alpha * new_value + (1 - alpha) * previous_avg
|
||||
self.avg_gain = self.alpha * gain + (1 - self.alpha) * self.avg_gain
|
||||
self.avg_loss = self.alpha * loss + (1 - self.alpha) * self.avg_loss
|
||||
|
||||
# Calculate RSI only if warmed up
|
||||
# RSI should start when we have 'period' price changes (not including the first value)
|
||||
if self.values_received > self.period:
|
||||
if self.avg_loss == 0.0:
|
||||
# Avoid division by zero - all gains, no losses
|
||||
if self.avg_gain > 0:
|
||||
rsi_value = 100.0
|
||||
else:
|
||||
rsi_value = 50.0 # Neutral when both are zero
|
||||
else:
|
||||
rs = self.avg_gain / self.avg_loss
|
||||
rsi_value = 100.0 - (100.0 / (1.0 + rs))
|
||||
else:
|
||||
# Not warmed up yet - return NaN
|
||||
rsi_value = float('nan')
|
||||
|
||||
# Store state
|
||||
self.previous_close = new_close
|
||||
self.values_received += 1
|
||||
self._current_value = rsi_value
|
||||
|
||||
return rsi_value
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""
|
||||
Check if RSI has enough data for reliable values.
|
||||
|
||||
Returns:
|
||||
True if we have enough price changes for RSI calculation
|
||||
"""
|
||||
return self.values_received > self.period
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset RSI state to initial conditions."""
|
||||
self.alpha = 1.0 / self.period
|
||||
self.avg_gain = None
|
||||
self.avg_loss = None
|
||||
self.previous_close = None
|
||||
self.values_received = 0
|
||||
self._current_value = None
|
||||
|
||||
def get_current_value(self) -> Optional[float]:
|
||||
"""
|
||||
Get current RSI value without updating.
|
||||
|
||||
Returns:
|
||||
Current RSI value (0-100), or None if not enough data
|
||||
"""
|
||||
if not self.is_warmed_up():
|
||||
return None
|
||||
return self._current_value
|
||||
|
||||
def get_state_summary(self) -> dict:
|
||||
"""Get detailed state summary for debugging."""
|
||||
base_summary = super().get_state_summary()
|
||||
base_summary.update({
|
||||
'alpha': self.alpha,
|
||||
'previous_close': self.previous_close,
|
||||
'avg_gain': self.avg_gain,
|
||||
'avg_loss': self.avg_loss,
|
||||
'current_rsi': self.get_current_value()
|
||||
})
|
||||
return base_summary
|
||||
|
||||
|
||||
class SimpleRSIState(SimpleIndicatorState):
|
||||
"""
|
||||
Simple RSI implementation using simple moving averages instead of EMAs.
|
||||
|
||||
This version uses simple moving averages for gain and loss smoothing,
|
||||
which matches traditional RSI implementations but requires more memory.
|
||||
"""
|
||||
|
||||
def __init__(self, period: int = 14):
|
||||
"""
|
||||
Initialize simple RSI state.
|
||||
|
||||
Args:
|
||||
period: Number of periods for RSI calculation (default: 14)
|
||||
"""
|
||||
super().__init__(period)
|
||||
from collections import deque
|
||||
self.gains = deque(maxlen=period)
|
||||
self.losses = deque(maxlen=period)
|
||||
self.gain_sum = 0.0
|
||||
self.loss_sum = 0.0
|
||||
self.previous_close = None
|
||||
self.is_initialized = True
|
||||
|
||||
def update(self, new_close: Union[float, int]) -> float:
|
||||
"""
|
||||
Update simple RSI with new close price.
|
||||
|
||||
Args:
|
||||
new_close: New closing price
|
||||
|
||||
Returns:
|
||||
Current RSI value (0-100)
|
||||
"""
|
||||
# Validate input
|
||||
if not isinstance(new_close, (int, float)):
|
||||
raise TypeError(f"new_close must be numeric, got {type(new_close)}")
|
||||
|
||||
self.validate_input(new_close)
|
||||
|
||||
new_close = float(new_close)
|
||||
|
||||
if self.previous_close is None:
|
||||
# First value
|
||||
self.previous_close = new_close
|
||||
self.values_received += 1
|
||||
self._current_value = 50.0
|
||||
return self._current_value
|
||||
|
||||
# Calculate price change
|
||||
price_change = new_close - self.previous_close
|
||||
gain = max(price_change, 0.0)
|
||||
loss = max(-price_change, 0.0)
|
||||
|
||||
# Update rolling sums
|
||||
if len(self.gains) == self.period:
|
||||
self.gain_sum -= self.gains[0]
|
||||
self.loss_sum -= self.losses[0]
|
||||
|
||||
self.gains.append(gain)
|
||||
self.losses.append(loss)
|
||||
self.gain_sum += gain
|
||||
self.loss_sum += loss
|
||||
|
||||
# Calculate RSI
|
||||
if len(self.gains) == 0:
|
||||
rsi_value = 50.0
|
||||
else:
|
||||
avg_gain = self.gain_sum / len(self.gains)
|
||||
avg_loss = self.loss_sum / len(self.losses)
|
||||
|
||||
if avg_loss == 0.0:
|
||||
rsi_value = 100.0
|
||||
else:
|
||||
rs = avg_gain / avg_loss
|
||||
rsi_value = 100.0 - (100.0 / (1.0 + rs))
|
||||
|
||||
# Store state
|
||||
self.previous_close = new_close
|
||||
self.values_received += 1
|
||||
self._current_value = rsi_value
|
||||
|
||||
return rsi_value
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""Check if simple RSI is warmed up."""
|
||||
return len(self.gains) >= self.period
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset simple RSI state."""
|
||||
self.gains.clear()
|
||||
self.losses.clear()
|
||||
self.gain_sum = 0.0
|
||||
self.loss_sum = 0.0
|
||||
self.previous_close = None
|
||||
self.values_received = 0
|
||||
self._current_value = None
|
||||
|
||||
def get_current_value(self) -> Optional[float]:
|
||||
"""Get current simple RSI value."""
|
||||
if self.values_received == 0:
|
||||
return None
|
||||
return self._current_value
|
||||
|
||||
def get_state_summary(self) -> dict:
|
||||
"""Get detailed state summary for debugging."""
|
||||
base_summary = super().get_state_summary()
|
||||
base_summary.update({
|
||||
'previous_close': self.previous_close,
|
||||
'gains_window_size': len(self.gains),
|
||||
'losses_window_size': len(self.losses),
|
||||
'gain_sum': self.gain_sum,
|
||||
'loss_sum': self.loss_sum,
|
||||
'current_rsi': self.get_current_value()
|
||||
})
|
||||
return base_summary
|
||||
@ -1,333 +0,0 @@
|
||||
"""
|
||||
Supertrend Indicator State
|
||||
|
||||
This module implements incremental Supertrend calculation that maintains constant memory usage
|
||||
and provides identical results to traditional batch calculations. Supertrend is used by
|
||||
the DefaultStrategy for trend detection.
|
||||
"""
|
||||
|
||||
from typing import Dict, Union, Optional
|
||||
from .base import OHLCIndicatorState
|
||||
from .atr import ATRState
|
||||
|
||||
|
||||
class SupertrendState(OHLCIndicatorState):
|
||||
"""
|
||||
Incremental Supertrend calculation state.
|
||||
|
||||
Supertrend is a trend-following indicator that uses Average True Range (ATR)
|
||||
to calculate dynamic support and resistance levels. It provides clear trend
|
||||
direction signals: +1 for uptrend, -1 for downtrend.
|
||||
|
||||
The calculation involves:
|
||||
1. Calculate ATR for the given period
|
||||
2. Calculate basic upper and lower bands using ATR and multiplier
|
||||
3. Calculate final upper and lower bands with trend logic
|
||||
4. Determine trend direction based on price vs bands
|
||||
|
||||
Attributes:
|
||||
period (int): ATR period for Supertrend calculation
|
||||
multiplier (float): Multiplier for ATR in band calculation
|
||||
atr_state (ATRState): ATR calculation state
|
||||
previous_close (float): Previous period's close price
|
||||
previous_trend (int): Previous trend direction (+1 or -1)
|
||||
final_upper_band (float): Current final upper band
|
||||
final_lower_band (float): Current final lower band
|
||||
|
||||
Example:
|
||||
supertrend = SupertrendState(period=10, multiplier=3.0)
|
||||
|
||||
# Add OHLC data incrementally
|
||||
ohlc = {'open': 100, 'high': 105, 'low': 98, 'close': 103}
|
||||
result = supertrend.update(ohlc)
|
||||
trend = result['trend'] # +1 or -1
|
||||
supertrend_value = result['supertrend'] # Supertrend line value
|
||||
"""
|
||||
|
||||
def __init__(self, period: int = 10, multiplier: float = 3.0):
|
||||
"""
|
||||
Initialize Supertrend state.
|
||||
|
||||
Args:
|
||||
period: ATR period for Supertrend calculation (default: 10)
|
||||
multiplier: Multiplier for ATR in band calculation (default: 3.0)
|
||||
|
||||
Raises:
|
||||
ValueError: If period is not positive or multiplier is not positive
|
||||
"""
|
||||
super().__init__(period)
|
||||
|
||||
if multiplier <= 0:
|
||||
raise ValueError(f"Multiplier must be positive, got {multiplier}")
|
||||
|
||||
self.multiplier = multiplier
|
||||
self.atr_state = ATRState(period)
|
||||
|
||||
# State variables
|
||||
self.previous_close = None
|
||||
self.previous_trend = None # Don't assume initial trend, let first calculation determine it
|
||||
self.final_upper_band = None
|
||||
self.final_lower_band = None
|
||||
|
||||
# Current values
|
||||
self.current_trend = None
|
||||
self.current_supertrend = None
|
||||
|
||||
self.is_initialized = True
|
||||
|
||||
def update(self, ohlc_data: Dict[str, float]) -> Dict[str, float]:
|
||||
"""
|
||||
Update Supertrend with new OHLC data.
|
||||
|
||||
Args:
|
||||
ohlc_data: Dictionary with 'open', 'high', 'low', 'close' keys
|
||||
|
||||
Returns:
|
||||
Dictionary with 'trend', 'supertrend', 'upper_band', 'lower_band' keys
|
||||
|
||||
Raises:
|
||||
ValueError: If OHLC data is invalid
|
||||
TypeError: If ohlc_data is not a dictionary
|
||||
"""
|
||||
# Validate input
|
||||
if not isinstance(ohlc_data, dict):
|
||||
raise TypeError(f"ohlc_data must be a dictionary, got {type(ohlc_data)}")
|
||||
|
||||
self.validate_input(ohlc_data)
|
||||
|
||||
high = float(ohlc_data['high'])
|
||||
low = float(ohlc_data['low'])
|
||||
close = float(ohlc_data['close'])
|
||||
|
||||
# Update ATR
|
||||
atr_value = self.atr_state.update(ohlc_data)
|
||||
|
||||
# Calculate HL2 (typical price)
|
||||
hl2 = (high + low) / 2.0
|
||||
|
||||
# Calculate basic upper and lower bands
|
||||
basic_upper_band = hl2 + (self.multiplier * atr_value)
|
||||
basic_lower_band = hl2 - (self.multiplier * atr_value)
|
||||
|
||||
# Calculate final upper band
|
||||
if self.final_upper_band is None or basic_upper_band < self.final_upper_band or self.previous_close > self.final_upper_band:
|
||||
final_upper_band = basic_upper_band
|
||||
else:
|
||||
final_upper_band = self.final_upper_band
|
||||
|
||||
# Calculate final lower band
|
||||
if self.final_lower_band is None or basic_lower_band > self.final_lower_band or self.previous_close < self.final_lower_band:
|
||||
final_lower_band = basic_lower_band
|
||||
else:
|
||||
final_lower_band = self.final_lower_band
|
||||
|
||||
# Determine trend
|
||||
if self.previous_close is None:
|
||||
# First calculation - match original logic
|
||||
# If close <= upper_band, trend is -1 (downtrend), else trend is 1 (uptrend)
|
||||
trend = -1 if close <= basic_upper_band else 1
|
||||
else:
|
||||
# Trend logic for subsequent calculations
|
||||
if self.previous_trend == 1 and close <= final_lower_band:
|
||||
trend = -1
|
||||
elif self.previous_trend == -1 and close >= final_upper_band:
|
||||
trend = 1
|
||||
else:
|
||||
trend = self.previous_trend
|
||||
|
||||
# Calculate Supertrend value
|
||||
if trend == 1:
|
||||
supertrend_value = final_lower_band
|
||||
else:
|
||||
supertrend_value = final_upper_band
|
||||
|
||||
# Store current state
|
||||
self.previous_close = close
|
||||
self.previous_trend = trend
|
||||
self.final_upper_band = final_upper_band
|
||||
self.final_lower_band = final_lower_band
|
||||
self.current_trend = trend
|
||||
self.current_supertrend = supertrend_value
|
||||
self.values_received += 1
|
||||
|
||||
# Prepare result
|
||||
result = {
|
||||
'trend': trend,
|
||||
'supertrend': supertrend_value,
|
||||
'upper_band': final_upper_band,
|
||||
'lower_band': final_lower_band,
|
||||
'atr': atr_value
|
||||
}
|
||||
|
||||
self._current_values = result
|
||||
return result
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""
|
||||
Check if Supertrend has enough data for reliable values.
|
||||
|
||||
Returns:
|
||||
True if ATR state is warmed up
|
||||
"""
|
||||
return self.atr_state.is_warmed_up()
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset Supertrend state to initial conditions."""
|
||||
self.atr_state.reset()
|
||||
self.previous_close = None
|
||||
self.previous_trend = None
|
||||
self.final_upper_band = None
|
||||
self.final_lower_band = None
|
||||
self.current_trend = None
|
||||
self.current_supertrend = None
|
||||
self.values_received = 0
|
||||
self._current_values = {}
|
||||
|
||||
def get_current_value(self) -> Optional[Dict[str, float]]:
|
||||
"""
|
||||
Get current Supertrend values without updating.
|
||||
|
||||
Returns:
|
||||
Dictionary with current Supertrend values, or None if not warmed up
|
||||
"""
|
||||
if not self.is_warmed_up():
|
||||
return None
|
||||
return self._current_values.copy() if self._current_values else None
|
||||
|
||||
def get_current_trend(self) -> int:
|
||||
"""
|
||||
Get current trend direction.
|
||||
|
||||
Returns:
|
||||
Current trend: +1 for uptrend, -1 for downtrend, 0 if not initialized
|
||||
"""
|
||||
return self.current_trend if self.current_trend is not None else 0
|
||||
|
||||
def get_current_supertrend_value(self) -> Optional[float]:
|
||||
"""
|
||||
Get current Supertrend line value.
|
||||
|
||||
Returns:
|
||||
Current Supertrend value, or None if not available
|
||||
"""
|
||||
return self.current_supertrend
|
||||
|
||||
def get_state_summary(self) -> dict:
|
||||
"""Get detailed state summary for debugging."""
|
||||
base_summary = super().get_state_summary()
|
||||
base_summary.update({
|
||||
'multiplier': self.multiplier,
|
||||
'previous_close': self.previous_close,
|
||||
'previous_trend': self.previous_trend,
|
||||
'current_trend': self.current_trend,
|
||||
'current_supertrend': self.current_supertrend,
|
||||
'final_upper_band': self.final_upper_band,
|
||||
'final_lower_band': self.final_lower_band,
|
||||
'atr_state': self.atr_state.get_state_summary()
|
||||
})
|
||||
return base_summary
|
||||
|
||||
|
||||
class SupertrendCollection:
|
||||
"""
|
||||
Collection of multiple Supertrend indicators with different parameters.
|
||||
|
||||
This class manages multiple Supertrend indicators and provides meta-trend
|
||||
calculation based on agreement between different Supertrend configurations.
|
||||
Used by the DefaultStrategy for robust trend detection.
|
||||
|
||||
Example:
|
||||
# Create collection with three Supertrend indicators
|
||||
collection = SupertrendCollection([
|
||||
(10, 3.0), # period=10, multiplier=3.0
|
||||
(11, 2.0), # period=11, multiplier=2.0
|
||||
(12, 1.0) # period=12, multiplier=1.0
|
||||
])
|
||||
|
||||
# Update all indicators
|
||||
results = collection.update(ohlc_data)
|
||||
meta_trend = results['meta_trend'] # 1, -1, or 0 (neutral)
|
||||
"""
|
||||
|
||||
def __init__(self, supertrend_configs: list):
|
||||
"""
|
||||
Initialize Supertrend collection.
|
||||
|
||||
Args:
|
||||
supertrend_configs: List of (period, multiplier) tuples
|
||||
"""
|
||||
self.supertrends = []
|
||||
for period, multiplier in supertrend_configs:
|
||||
self.supertrends.append(SupertrendState(period, multiplier))
|
||||
|
||||
self.values_received = 0
|
||||
|
||||
def update(self, ohlc_data: Dict[str, float]) -> Dict[str, Union[int, list]]:
|
||||
"""
|
||||
Update all Supertrend indicators and calculate meta-trend.
|
||||
|
||||
Args:
|
||||
ohlc_data: OHLC data dictionary
|
||||
|
||||
Returns:
|
||||
Dictionary with individual trends and meta-trend
|
||||
"""
|
||||
trends = []
|
||||
results = []
|
||||
|
||||
# Update each Supertrend
|
||||
for supertrend in self.supertrends:
|
||||
result = supertrend.update(ohlc_data)
|
||||
trends.append(result['trend'])
|
||||
results.append(result)
|
||||
|
||||
# Calculate meta-trend: all must agree for directional signal
|
||||
if all(trend == trends[0] for trend in trends):
|
||||
meta_trend = trends[0] # All agree
|
||||
else:
|
||||
meta_trend = 0 # Neutral when trends don't agree
|
||||
|
||||
self.values_received += 1
|
||||
|
||||
return {
|
||||
'trends': trends,
|
||||
'meta_trend': meta_trend,
|
||||
'results': results
|
||||
}
|
||||
|
||||
def is_warmed_up(self) -> bool:
|
||||
"""Check if all Supertrend indicators are warmed up."""
|
||||
return all(st.is_warmed_up() for st in self.supertrends)
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset all Supertrend indicators."""
|
||||
for supertrend in self.supertrends:
|
||||
supertrend.reset()
|
||||
self.values_received = 0
|
||||
|
||||
def get_current_meta_trend(self) -> int:
|
||||
"""
|
||||
Get current meta-trend without updating.
|
||||
|
||||
Returns:
|
||||
Current meta-trend: +1, -1, or 0
|
||||
"""
|
||||
if not self.is_warmed_up():
|
||||
return 0
|
||||
|
||||
trends = [st.get_current_trend() for st in self.supertrends]
|
||||
|
||||
if all(trend == trends[0] for trend in trends):
|
||||
return trends[0]
|
||||
else:
|
||||
return 0
|
||||
|
||||
def get_state_summary(self) -> dict:
|
||||
"""Get detailed state summary for all Supertrends."""
|
||||
return {
|
||||
'num_supertrends': len(self.supertrends),
|
||||
'values_received': self.values_received,
|
||||
'is_warmed_up': self.is_warmed_up(),
|
||||
'current_meta_trend': self.get_current_meta_trend(),
|
||||
'supertrends': [st.get_state_summary() for st in self.supertrends]
|
||||
}
|
||||
@ -1,423 +0,0 @@
|
||||
"""
|
||||
Incremental MetaTrend Strategy
|
||||
|
||||
This module implements an incremental version of the DefaultStrategy that processes
|
||||
real-time data efficiently while producing identical meta-trend signals to the
|
||||
original batch-processing implementation.
|
||||
|
||||
The strategy uses 3 Supertrend indicators with parameters:
|
||||
- Supertrend 1: period=12, multiplier=3.0
|
||||
- Supertrend 2: period=10, multiplier=1.0
|
||||
- Supertrend 3: period=11, multiplier=2.0
|
||||
|
||||
Meta-trend calculation:
|
||||
- Meta-trend = 1 when all 3 Supertrends agree on uptrend
|
||||
- Meta-trend = -1 when all 3 Supertrends agree on downtrend
|
||||
- Meta-trend = 0 when Supertrends disagree (neutral)
|
||||
|
||||
Signal generation:
|
||||
- Entry: meta-trend changes from != 1 to == 1
|
||||
- Exit: meta-trend changes from != -1 to == -1
|
||||
|
||||
Stop-loss handling is delegated to the trader layer.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from typing import Dict, Optional, List, Any
|
||||
import logging
|
||||
|
||||
from .base import IncStrategyBase, IncStrategySignal
|
||||
from .indicators.supertrend import SupertrendCollection
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IncMetaTrendStrategy(IncStrategyBase):
|
||||
"""
|
||||
Incremental MetaTrend strategy implementation.
|
||||
|
||||
This strategy uses multiple Supertrend indicators to determine market direction
|
||||
and generates entry/exit signals based on meta-trend changes. It processes
|
||||
data incrementally for real-time performance while maintaining mathematical
|
||||
equivalence to the original DefaultStrategy.
|
||||
|
||||
The strategy is designed to work with any timeframe but defaults to the
|
||||
timeframe specified in parameters (or 15min if not specified).
|
||||
|
||||
Parameters:
|
||||
timeframe (str): Primary timeframe for analysis (default: "15min")
|
||||
buffer_size_multiplier (float): Buffer size multiplier for memory management (default: 2.0)
|
||||
enable_logging (bool): Enable detailed logging (default: False)
|
||||
|
||||
Example:
|
||||
strategy = IncMetaTrendStrategy("metatrend", weight=1.0, params={
|
||||
"timeframe": "15min",
|
||||
"enable_logging": True
|
||||
})
|
||||
"""
|
||||
|
||||
def __init__(self, name: str = "metatrend", weight: float = 1.0, params: Optional[Dict] = None):
|
||||
"""
|
||||
Initialize the incremental MetaTrend strategy.
|
||||
|
||||
Args:
|
||||
name: Strategy name/identifier
|
||||
weight: Strategy weight for combination (default: 1.0)
|
||||
params: Strategy parameters
|
||||
"""
|
||||
super().__init__(name, weight, params)
|
||||
|
||||
# Strategy configuration - now handled by base class timeframe aggregation
|
||||
self.primary_timeframe = self.params.get("timeframe", "15min")
|
||||
self.enable_logging = self.params.get("enable_logging", False)
|
||||
|
||||
# Configure logging level
|
||||
if self.enable_logging:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
# Initialize Supertrend collection with exact parameters from original strategy
|
||||
self.supertrend_configs = [
|
||||
(12, 3.0), # period=12, multiplier=3.0
|
||||
(10, 1.0), # period=10, multiplier=1.0
|
||||
(11, 2.0) # period=11, multiplier=2.0
|
||||
]
|
||||
|
||||
self.supertrend_collection = SupertrendCollection(self.supertrend_configs)
|
||||
|
||||
# Meta-trend state
|
||||
self.current_meta_trend = 0
|
||||
self.previous_meta_trend = 0
|
||||
self._meta_trend_history = [] # For debugging/analysis
|
||||
|
||||
# Signal generation state
|
||||
self._last_entry_signal = None
|
||||
self._last_exit_signal = None
|
||||
self._signal_count = {"entry": 0, "exit": 0}
|
||||
|
||||
# Performance tracking
|
||||
self._update_count = 0
|
||||
self._last_update_time = None
|
||||
|
||||
logger.info(f"IncMetaTrendStrategy initialized: timeframe={self.primary_timeframe}, "
|
||||
f"aggregation_enabled={self._timeframe_aggregator is not None}")
|
||||
|
||||
def get_minimum_buffer_size(self) -> Dict[str, int]:
|
||||
"""
|
||||
Return minimum data points needed for reliable Supertrend calculations.
|
||||
|
||||
With the new base class timeframe aggregation, we only need to specify
|
||||
the minimum buffer size for our primary timeframe. The base class
|
||||
handles minute-level data aggregation automatically.
|
||||
|
||||
Returns:
|
||||
Dict[str, int]: {timeframe: min_points} mapping
|
||||
"""
|
||||
# Find the largest period among all Supertrend configurations
|
||||
max_period = max(config[0] for config in self.supertrend_configs)
|
||||
|
||||
# Add buffer for ATR warmup (ATR typically needs ~2x period for stability)
|
||||
min_buffer_size = max_period * 2 + 10 # Extra 10 points for safety
|
||||
|
||||
# With new base class, we only specify our primary timeframe
|
||||
# The base class handles minute-level aggregation automatically
|
||||
return {self.primary_timeframe: min_buffer_size}
|
||||
|
||||
def calculate_on_data(self, new_data_point: Dict[str, float], timestamp: pd.Timestamp) -> None:
|
||||
"""
|
||||
Process a single new data point incrementally.
|
||||
|
||||
This method updates the Supertrend indicators and recalculates the meta-trend
|
||||
based on the new data point.
|
||||
|
||||
Args:
|
||||
new_data_point: OHLCV data point {open, high, low, close, volume}
|
||||
timestamp: Timestamp of the data point
|
||||
"""
|
||||
try:
|
||||
self._update_count += 1
|
||||
self._last_update_time = timestamp
|
||||
|
||||
if self.enable_logging:
|
||||
logger.debug(f"Processing data point {self._update_count} at {timestamp}")
|
||||
logger.debug(f"OHLC: O={new_data_point.get('open', 0):.2f}, "
|
||||
f"H={new_data_point.get('high', 0):.2f}, "
|
||||
f"L={new_data_point.get('low', 0):.2f}, "
|
||||
f"C={new_data_point.get('close', 0):.2f}")
|
||||
|
||||
# Store previous meta-trend for change detection
|
||||
self.previous_meta_trend = self.current_meta_trend
|
||||
|
||||
# Update Supertrend collection with new data
|
||||
supertrend_results = self.supertrend_collection.update(new_data_point)
|
||||
|
||||
# Calculate new meta-trend
|
||||
self.current_meta_trend = self._calculate_meta_trend(supertrend_results)
|
||||
|
||||
# Store meta-trend history for analysis
|
||||
self._meta_trend_history.append({
|
||||
'timestamp': timestamp,
|
||||
'meta_trend': self.current_meta_trend,
|
||||
'individual_trends': supertrend_results['trends'].copy(),
|
||||
'update_count': self._update_count
|
||||
})
|
||||
|
||||
# Limit history size to prevent memory growth
|
||||
if len(self._meta_trend_history) > 1000:
|
||||
self._meta_trend_history = self._meta_trend_history[-500:] # Keep last 500
|
||||
|
||||
# Log meta-trend changes
|
||||
if self.enable_logging and self.current_meta_trend != self.previous_meta_trend:
|
||||
logger.info(f"Meta-trend changed: {self.previous_meta_trend} -> {self.current_meta_trend} "
|
||||
f"at {timestamp} (update #{self._update_count})")
|
||||
logger.debug(f"Individual trends: {supertrend_results['trends']}")
|
||||
|
||||
# Update warmup status
|
||||
if not self._is_warmed_up and self.supertrend_collection.is_warmed_up():
|
||||
self._is_warmed_up = True
|
||||
logger.info(f"Strategy warmed up after {self._update_count} data points")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in calculate_on_data: {e}")
|
||||
raise
|
||||
|
||||
def supports_incremental_calculation(self) -> bool:
|
||||
"""
|
||||
Whether strategy supports incremental calculation.
|
||||
|
||||
Returns:
|
||||
bool: True (this strategy is fully incremental)
|
||||
"""
|
||||
return True
|
||||
|
||||
def get_entry_signal(self) -> IncStrategySignal:
|
||||
"""
|
||||
Generate entry signal based on meta-trend direction change.
|
||||
|
||||
Entry occurs when meta-trend changes from != 1 to == 1, indicating
|
||||
all Supertrend indicators now agree on upward direction.
|
||||
|
||||
Returns:
|
||||
IncStrategySignal: Entry signal if trend aligns, hold signal otherwise
|
||||
"""
|
||||
if not self.is_warmed_up:
|
||||
return IncStrategySignal("HOLD", confidence=0.0)
|
||||
|
||||
# Check for meta-trend entry condition
|
||||
if self._check_entry_condition():
|
||||
self._signal_count["entry"] += 1
|
||||
self._last_entry_signal = {
|
||||
'timestamp': self._last_update_time,
|
||||
'meta_trend': self.current_meta_trend,
|
||||
'previous_meta_trend': self.previous_meta_trend,
|
||||
'update_count': self._update_count
|
||||
}
|
||||
|
||||
if self.enable_logging:
|
||||
logger.info(f"ENTRY SIGNAL generated at {self._last_update_time} "
|
||||
f"(signal #{self._signal_count['entry']})")
|
||||
|
||||
return IncStrategySignal("ENTRY", confidence=1.0, metadata={
|
||||
"meta_trend": self.current_meta_trend,
|
||||
"previous_meta_trend": self.previous_meta_trend,
|
||||
"signal_count": self._signal_count["entry"]
|
||||
})
|
||||
|
||||
return IncStrategySignal("HOLD", confidence=0.0)
|
||||
|
||||
def get_exit_signal(self) -> IncStrategySignal:
|
||||
"""
|
||||
Generate exit signal based on meta-trend reversal.
|
||||
|
||||
Exit occurs when meta-trend changes from != -1 to == -1, indicating
|
||||
trend reversal to downward direction.
|
||||
|
||||
Returns:
|
||||
IncStrategySignal: Exit signal if trend reverses, hold signal otherwise
|
||||
"""
|
||||
if not self.is_warmed_up:
|
||||
return IncStrategySignal("HOLD", confidence=0.0)
|
||||
|
||||
# Check for meta-trend exit condition
|
||||
if self._check_exit_condition():
|
||||
self._signal_count["exit"] += 1
|
||||
self._last_exit_signal = {
|
||||
'timestamp': self._last_update_time,
|
||||
'meta_trend': self.current_meta_trend,
|
||||
'previous_meta_trend': self.previous_meta_trend,
|
||||
'update_count': self._update_count
|
||||
}
|
||||
|
||||
if self.enable_logging:
|
||||
logger.info(f"EXIT SIGNAL generated at {self._last_update_time} "
|
||||
f"(signal #{self._signal_count['exit']})")
|
||||
|
||||
return IncStrategySignal("EXIT", confidence=1.0, metadata={
|
||||
"type": "META_TREND_EXIT",
|
||||
"meta_trend": self.current_meta_trend,
|
||||
"previous_meta_trend": self.previous_meta_trend,
|
||||
"signal_count": self._signal_count["exit"]
|
||||
})
|
||||
|
||||
return IncStrategySignal("HOLD", confidence=0.0)
|
||||
|
||||
def get_confidence(self) -> float:
|
||||
"""
|
||||
Get strategy confidence based on meta-trend strength.
|
||||
|
||||
Higher confidence when meta-trend is strongly directional,
|
||||
lower confidence during neutral periods.
|
||||
|
||||
Returns:
|
||||
float: Confidence level (0.0 to 1.0)
|
||||
"""
|
||||
if not self.is_warmed_up:
|
||||
return 0.0
|
||||
|
||||
# High confidence for strong directional signals
|
||||
if self.current_meta_trend == 1 or self.current_meta_trend == -1:
|
||||
return 1.0
|
||||
|
||||
# Lower confidence for neutral trend
|
||||
return 0.3
|
||||
|
||||
def _calculate_meta_trend(self, supertrend_results: Dict) -> int:
|
||||
"""
|
||||
Calculate meta-trend from SupertrendCollection results.
|
||||
|
||||
Meta-trend logic (matching original DefaultStrategy):
|
||||
- All 3 Supertrends must agree for directional signal
|
||||
- If all trends are the same, meta-trend = that trend
|
||||
- If trends disagree, meta-trend = 0 (neutral)
|
||||
|
||||
Args:
|
||||
supertrend_results: Results from SupertrendCollection.update()
|
||||
|
||||
Returns:
|
||||
int: Meta-trend value (1, -1, or 0)
|
||||
"""
|
||||
trends = supertrend_results['trends']
|
||||
|
||||
# Check if all trends agree
|
||||
if all(trend == trends[0] for trend in trends):
|
||||
return trends[0] # All agree: return the common trend
|
||||
else:
|
||||
return 0 # Neutral when trends disagree
|
||||
|
||||
def _check_entry_condition(self) -> bool:
|
||||
"""
|
||||
Check if meta-trend entry condition is met.
|
||||
|
||||
Entry condition: meta-trend changes from != 1 to == 1
|
||||
|
||||
Returns:
|
||||
bool: True if entry condition is met
|
||||
"""
|
||||
return (self.previous_meta_trend != 1 and
|
||||
self.current_meta_trend == 1)
|
||||
|
||||
def _check_exit_condition(self) -> bool:
|
||||
"""
|
||||
Check if meta-trend exit condition is met.
|
||||
|
||||
Exit condition: meta-trend changes from != 1 to == -1
|
||||
(Modified to match original strategy behavior)
|
||||
|
||||
Returns:
|
||||
bool: True if exit condition is met
|
||||
"""
|
||||
return (self.previous_meta_trend != 1 and
|
||||
self.current_meta_trend == -1)
|
||||
|
||||
def get_current_state_summary(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get detailed state summary for debugging and monitoring.
|
||||
|
||||
Returns:
|
||||
Dict with current strategy state information
|
||||
"""
|
||||
base_summary = super().get_current_state_summary()
|
||||
|
||||
# Add MetaTrend-specific state
|
||||
base_summary.update({
|
||||
'primary_timeframe': self.primary_timeframe,
|
||||
'current_meta_trend': self.current_meta_trend,
|
||||
'previous_meta_trend': self.previous_meta_trend,
|
||||
'supertrend_collection_warmed_up': self.supertrend_collection.is_warmed_up(),
|
||||
'supertrend_configs': self.supertrend_configs,
|
||||
'signal_counts': self._signal_count.copy(),
|
||||
'update_count': self._update_count,
|
||||
'last_update_time': str(self._last_update_time) if self._last_update_time else None,
|
||||
'meta_trend_history_length': len(self._meta_trend_history),
|
||||
'last_entry_signal': self._last_entry_signal,
|
||||
'last_exit_signal': self._last_exit_signal
|
||||
})
|
||||
|
||||
# Add Supertrend collection state
|
||||
if hasattr(self.supertrend_collection, 'get_state_summary'):
|
||||
base_summary['supertrend_collection_state'] = self.supertrend_collection.get_state_summary()
|
||||
|
||||
return base_summary
|
||||
|
||||
def reset_calculation_state(self) -> None:
|
||||
"""Reset internal calculation state for reinitialization."""
|
||||
super().reset_calculation_state()
|
||||
|
||||
# Reset Supertrend collection
|
||||
self.supertrend_collection.reset()
|
||||
|
||||
# Reset meta-trend state
|
||||
self.current_meta_trend = 0
|
||||
self.previous_meta_trend = 0
|
||||
self._meta_trend_history.clear()
|
||||
|
||||
# Reset signal state
|
||||
self._last_entry_signal = None
|
||||
self._last_exit_signal = None
|
||||
self._signal_count = {"entry": 0, "exit": 0}
|
||||
|
||||
# Reset performance tracking
|
||||
self._update_count = 0
|
||||
self._last_update_time = None
|
||||
|
||||
logger.info("IncMetaTrendStrategy state reset")
|
||||
|
||||
def get_meta_trend_history(self, limit: Optional[int] = None) -> List[Dict]:
|
||||
"""
|
||||
Get meta-trend history for analysis.
|
||||
|
||||
Args:
|
||||
limit: Maximum number of recent entries to return
|
||||
|
||||
Returns:
|
||||
List of meta-trend history entries
|
||||
"""
|
||||
if limit is None:
|
||||
return self._meta_trend_history.copy()
|
||||
else:
|
||||
return self._meta_trend_history[-limit:] if limit > 0 else []
|
||||
|
||||
def get_current_meta_trend(self) -> int:
|
||||
"""
|
||||
Get current meta-trend value.
|
||||
|
||||
Returns:
|
||||
int: Current meta-trend (1, -1, or 0)
|
||||
"""
|
||||
return self.current_meta_trend
|
||||
|
||||
def get_individual_supertrend_states(self) -> List[Dict]:
|
||||
"""
|
||||
Get current state of individual Supertrend indicators.
|
||||
|
||||
Returns:
|
||||
List of Supertrend state summaries
|
||||
"""
|
||||
if hasattr(self.supertrend_collection, 'get_state_summary'):
|
||||
collection_state = self.supertrend_collection.get_state_summary()
|
||||
return collection_state.get('supertrends', [])
|
||||
return []
|
||||
|
||||
|
||||
# Compatibility alias for easier imports
|
||||
MetaTrendStrategy = IncMetaTrendStrategy
|
||||
@ -1,329 +0,0 @@
|
||||
"""
|
||||
Incremental Random Strategy for Testing
|
||||
|
||||
This strategy generates random entry and exit signals for testing the incremental strategy system.
|
||||
It's useful for verifying that the incremental strategy framework is working correctly.
|
||||
"""
|
||||
|
||||
import random
|
||||
import logging
|
||||
import time
|
||||
from typing import Dict, Optional
|
||||
import pandas as pd
|
||||
|
||||
from .base import IncStrategyBase, IncStrategySignal
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IncRandomStrategy(IncStrategyBase):
|
||||
"""
|
||||
Incremental random signal generator strategy for testing.
|
||||
|
||||
This strategy generates random entry and exit signals with configurable
|
||||
probability and confidence levels. It's designed to test the incremental
|
||||
strategy framework and signal processing system.
|
||||
|
||||
The incremental version maintains minimal state and processes each new
|
||||
data point independently, making it ideal for testing real-time performance.
|
||||
|
||||
Parameters:
|
||||
entry_probability: Probability of generating an entry signal (0.0-1.0)
|
||||
exit_probability: Probability of generating an exit signal (0.0-1.0)
|
||||
min_confidence: Minimum confidence level for signals
|
||||
max_confidence: Maximum confidence level for signals
|
||||
timeframe: Timeframe to operate on (default: "1min")
|
||||
signal_frequency: How often to generate signals (every N bars)
|
||||
random_seed: Optional seed for reproducible random signals
|
||||
|
||||
Example:
|
||||
strategy = IncRandomStrategy(
|
||||
weight=1.0,
|
||||
params={
|
||||
"entry_probability": 0.1,
|
||||
"exit_probability": 0.15,
|
||||
"min_confidence": 0.7,
|
||||
"max_confidence": 0.9,
|
||||
"signal_frequency": 5,
|
||||
"random_seed": 42 # For reproducible testing
|
||||
}
|
||||
)
|
||||
"""
|
||||
|
||||
def __init__(self, weight: float = 1.0, params: Optional[Dict] = None):
|
||||
"""Initialize the incremental random strategy."""
|
||||
super().__init__("inc_random", weight, params)
|
||||
|
||||
# Strategy parameters with defaults
|
||||
self.entry_probability = self.params.get("entry_probability", 0.05) # 5% chance per bar
|
||||
self.exit_probability = self.params.get("exit_probability", 0.1) # 10% chance per bar
|
||||
self.min_confidence = self.params.get("min_confidence", 0.6)
|
||||
self.max_confidence = self.params.get("max_confidence", 0.9)
|
||||
self.timeframe = self.params.get("timeframe", "1min")
|
||||
self.signal_frequency = self.params.get("signal_frequency", 1) # Every bar
|
||||
|
||||
# Create separate random instance for this strategy
|
||||
self._random = random.Random()
|
||||
random_seed = self.params.get("random_seed")
|
||||
if random_seed is not None:
|
||||
self._random.seed(random_seed)
|
||||
logger.info(f"IncRandomStrategy: Set random seed to {random_seed}")
|
||||
|
||||
# Internal state (minimal for random strategy)
|
||||
self._bar_count = 0
|
||||
self._last_signal_bar = -1
|
||||
self._current_price = None
|
||||
self._last_timestamp = None
|
||||
|
||||
logger.info(f"IncRandomStrategy initialized with entry_prob={self.entry_probability}, "
|
||||
f"exit_prob={self.exit_probability}, timeframe={self.timeframe}, "
|
||||
f"aggregation_enabled={self._timeframe_aggregator is not None}")
|
||||
|
||||
def get_minimum_buffer_size(self) -> Dict[str, int]:
|
||||
"""
|
||||
Return minimum data points needed for each timeframe.
|
||||
|
||||
Random strategy doesn't need any historical data for calculations,
|
||||
so we only need 1 data point to start generating signals.
|
||||
With the new base class timeframe aggregation, we only specify
|
||||
our primary timeframe.
|
||||
|
||||
Returns:
|
||||
Dict[str, int]: Minimal buffer requirements
|
||||
"""
|
||||
return {self.timeframe: 1} # Only need current data point
|
||||
|
||||
def supports_incremental_calculation(self) -> bool:
|
||||
"""
|
||||
Whether strategy supports incremental calculation.
|
||||
|
||||
Random strategy is ideal for incremental mode since it doesn't
|
||||
depend on historical calculations.
|
||||
|
||||
Returns:
|
||||
bool: Always True for random strategy
|
||||
"""
|
||||
return True
|
||||
|
||||
def calculate_on_data(self, new_data_point: Dict[str, float], timestamp: pd.Timestamp) -> None:
|
||||
"""
|
||||
Process a single new data point incrementally.
|
||||
|
||||
For random strategy, we just update our internal state with the
|
||||
current price. The base class now handles timeframe aggregation
|
||||
automatically, so we only receive data when a complete timeframe
|
||||
bar is formed.
|
||||
|
||||
Args:
|
||||
new_data_point: OHLCV data point {open, high, low, close, volume}
|
||||
timestamp: Timestamp of the data point
|
||||
"""
|
||||
start_time = time.perf_counter()
|
||||
|
||||
try:
|
||||
# Update internal state - base class handles timeframe aggregation
|
||||
self._current_price = new_data_point['close']
|
||||
self._last_timestamp = timestamp
|
||||
self._data_points_received += 1
|
||||
|
||||
# Increment bar count for each processed timeframe bar
|
||||
self._bar_count += 1
|
||||
|
||||
# Debug logging every 10 bars
|
||||
if self._bar_count % 10 == 0:
|
||||
logger.debug(f"IncRandomStrategy: Processing bar {self._bar_count}, "
|
||||
f"price=${self._current_price:.2f}, timestamp={timestamp}")
|
||||
|
||||
# Update warm-up status
|
||||
if not self._is_warmed_up and self._data_points_received >= 1:
|
||||
self._is_warmed_up = True
|
||||
self._calculation_mode = "incremental"
|
||||
logger.info(f"IncRandomStrategy: Warmed up after {self._data_points_received} data points")
|
||||
|
||||
# Record performance metrics
|
||||
update_time = time.perf_counter() - start_time
|
||||
self._performance_metrics['update_times'].append(update_time)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"IncRandomStrategy: Error in calculate_on_data: {e}")
|
||||
self._performance_metrics['state_validation_failures'] += 1
|
||||
raise
|
||||
|
||||
def get_entry_signal(self) -> IncStrategySignal:
|
||||
"""
|
||||
Generate random entry signals based on current state.
|
||||
|
||||
Returns:
|
||||
IncStrategySignal: Entry signal with confidence level
|
||||
"""
|
||||
if not self._is_warmed_up:
|
||||
return IncStrategySignal("HOLD", 0.0)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
try:
|
||||
# Check if we should generate a signal based on frequency
|
||||
if (self._bar_count - self._last_signal_bar) < self.signal_frequency:
|
||||
return IncStrategySignal("HOLD", 0.0)
|
||||
|
||||
# Generate random entry signal using strategy's random instance
|
||||
random_value = self._random.random()
|
||||
if random_value < self.entry_probability:
|
||||
confidence = self._random.uniform(self.min_confidence, self.max_confidence)
|
||||
self._last_signal_bar = self._bar_count
|
||||
|
||||
logger.info(f"IncRandomStrategy: Generated ENTRY signal at bar {self._bar_count}, "
|
||||
f"price=${self._current_price:.2f}, confidence={confidence:.2f}, "
|
||||
f"random_value={random_value:.3f}")
|
||||
|
||||
signal = IncStrategySignal(
|
||||
"ENTRY",
|
||||
confidence=confidence,
|
||||
price=self._current_price,
|
||||
metadata={
|
||||
"strategy": "inc_random",
|
||||
"bar_count": self._bar_count,
|
||||
"timeframe": self.timeframe,
|
||||
"random_value": random_value,
|
||||
"timestamp": self._last_timestamp
|
||||
}
|
||||
)
|
||||
|
||||
# Record performance metrics
|
||||
signal_time = time.perf_counter() - start_time
|
||||
self._performance_metrics['signal_generation_times'].append(signal_time)
|
||||
|
||||
return signal
|
||||
|
||||
return IncStrategySignal("HOLD", 0.0)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"IncRandomStrategy: Error in get_entry_signal: {e}")
|
||||
return IncStrategySignal("HOLD", 0.0)
|
||||
|
||||
def get_exit_signal(self) -> IncStrategySignal:
|
||||
"""
|
||||
Generate random exit signals based on current state.
|
||||
|
||||
Returns:
|
||||
IncStrategySignal: Exit signal with confidence level
|
||||
"""
|
||||
if not self._is_warmed_up:
|
||||
return IncStrategySignal("HOLD", 0.0)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
try:
|
||||
# Generate random exit signal using strategy's random instance
|
||||
random_value = self._random.random()
|
||||
if random_value < self.exit_probability:
|
||||
confidence = self._random.uniform(self.min_confidence, self.max_confidence)
|
||||
|
||||
# Randomly choose exit type
|
||||
exit_types = ["SELL_SIGNAL", "TAKE_PROFIT", "STOP_LOSS"]
|
||||
exit_type = self._random.choice(exit_types)
|
||||
|
||||
logger.info(f"IncRandomStrategy: Generated EXIT signal at bar {self._bar_count}, "
|
||||
f"price=${self._current_price:.2f}, confidence={confidence:.2f}, "
|
||||
f"type={exit_type}, random_value={random_value:.3f}")
|
||||
|
||||
signal = IncStrategySignal(
|
||||
"EXIT",
|
||||
confidence=confidence,
|
||||
price=self._current_price,
|
||||
metadata={
|
||||
"type": exit_type,
|
||||
"strategy": "inc_random",
|
||||
"bar_count": self._bar_count,
|
||||
"timeframe": self.timeframe,
|
||||
"random_value": random_value,
|
||||
"timestamp": self._last_timestamp
|
||||
}
|
||||
)
|
||||
|
||||
# Record performance metrics
|
||||
signal_time = time.perf_counter() - start_time
|
||||
self._performance_metrics['signal_generation_times'].append(signal_time)
|
||||
|
||||
return signal
|
||||
|
||||
return IncStrategySignal("HOLD", 0.0)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"IncRandomStrategy: Error in get_exit_signal: {e}")
|
||||
return IncStrategySignal("HOLD", 0.0)
|
||||
|
||||
def get_confidence(self) -> float:
|
||||
"""
|
||||
Return random confidence level for current market state.
|
||||
|
||||
Returns:
|
||||
float: Random confidence level between min and max confidence
|
||||
"""
|
||||
if not self._is_warmed_up:
|
||||
return 0.0
|
||||
|
||||
return self._random.uniform(self.min_confidence, self.max_confidence)
|
||||
|
||||
def reset_calculation_state(self) -> None:
|
||||
"""Reset internal calculation state for reinitialization."""
|
||||
super().reset_calculation_state()
|
||||
|
||||
# Reset random strategy specific state
|
||||
self._bar_count = 0
|
||||
self._last_signal_bar = -1
|
||||
self._current_price = None
|
||||
self._last_timestamp = None
|
||||
|
||||
# Reset random state if seed was provided
|
||||
random_seed = self.params.get("random_seed")
|
||||
if random_seed is not None:
|
||||
self._random.seed(random_seed)
|
||||
|
||||
logger.info("IncRandomStrategy: Calculation state reset")
|
||||
|
||||
def _reinitialize_from_buffers(self) -> None:
|
||||
"""
|
||||
Reinitialize indicators from available buffer data.
|
||||
|
||||
For random strategy, we just need to restore the current price
|
||||
from the latest data point in the buffer.
|
||||
"""
|
||||
try:
|
||||
# Get the latest data point from 1min buffer
|
||||
buffer_1min = self._timeframe_buffers.get("1min")
|
||||
if buffer_1min and len(buffer_1min) > 0:
|
||||
latest_data = buffer_1min[-1]
|
||||
self._current_price = latest_data['close']
|
||||
self._last_timestamp = latest_data.get('timestamp')
|
||||
self._bar_count = len(buffer_1min)
|
||||
|
||||
logger.info(f"IncRandomStrategy: Reinitialized from buffer with {self._bar_count} bars")
|
||||
else:
|
||||
logger.warning("IncRandomStrategy: No buffer data available for reinitialization")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"IncRandomStrategy: Error reinitializing from buffers: {e}")
|
||||
raise
|
||||
|
||||
def get_current_state_summary(self) -> Dict[str, any]:
|
||||
"""Get summary of current calculation state for debugging."""
|
||||
base_summary = super().get_current_state_summary()
|
||||
base_summary.update({
|
||||
'entry_probability': self.entry_probability,
|
||||
'exit_probability': self.exit_probability,
|
||||
'bar_count': self._bar_count,
|
||||
'last_signal_bar': self._last_signal_bar,
|
||||
'current_price': self._current_price,
|
||||
'last_timestamp': self._last_timestamp,
|
||||
'signal_frequency': self.signal_frequency,
|
||||
'timeframe': self.timeframe
|
||||
})
|
||||
return base_summary
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""String representation of the strategy."""
|
||||
return (f"IncRandomStrategy(entry_prob={self.entry_probability}, "
|
||||
f"exit_prob={self.exit_probability}, timeframe={self.timeframe}, "
|
||||
f"mode={self._calculation_mode}, warmed_up={self._is_warmed_up}, "
|
||||
f"bars={self._bar_count})")
|
||||
@ -1,7 +1,7 @@
|
||||
[project]
|
||||
name = "cycles"
|
||||
name = "incremental-trader"
|
||||
version = "0.1.0"
|
||||
description = "Add your description here"
|
||||
description = "Incremental Trading Framework with Strategy Management and Backtesting"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
dependencies = [
|
||||
|
||||
@ -1,329 +0,0 @@
|
||||
# Incremental Trading Refactoring - Task Progress
|
||||
|
||||
## Current Phase: Phase 4 - Documentation and Examples ✅ COMPLETED
|
||||
|
||||
### Phase 1: Module Structure Setup ✅
|
||||
- [x] **Task 1.1**: Create `IncrementalTrader/` directory structure ✅
|
||||
- [x] **Task 1.2**: Create initial `__init__.py` files with proper exports ✅
|
||||
- [x] **Task 1.3**: Create main `README.md` with module overview ✅
|
||||
- [x] **Task 1.4**: Set up documentation structure in `docs/` ✅
|
||||
|
||||
### Phase 2: Core Components Migration ✅ COMPLETED
|
||||
- [x] **Task 2.1**: Move and refactor base classes ✅ COMPLETED
|
||||
- [x] **Task 2.2**: Move and refactor trader implementation ✅ COMPLETED
|
||||
- [x] **Task 2.3**: Move and refactor backtester ✅ COMPLETED
|
||||
|
||||
### Phase 3: Strategy Migration ✅ COMPLETED
|
||||
- [x] **Task 3.1**: Move MetaTrend strategy ✅ COMPLETED
|
||||
- [x] **Task 3.2**: Move Random strategy ✅ COMPLETED
|
||||
- [x] **Task 3.3**: Move BBRS strategy ✅ COMPLETED
|
||||
- [x] **Task 3.4**: Move indicators ✅ COMPLETED (all needed indicators migrated)
|
||||
|
||||
### Phase 4: Documentation and Examples ✅ COMPLETED
|
||||
- [x] **Task 4.1**: Create comprehensive documentation ✅ COMPLETED
|
||||
- [x] **Task 4.2**: Create usage examples ✅ COMPLETED
|
||||
- [x] **Task 4.3**: Migrate existing documentation ✅ COMPLETED
|
||||
- [x] **Task 4.4**: Create detailed strategy documentation ✅ COMPLETED
|
||||
|
||||
### Phase 5: Integration and Testing ✅ COMPLETED
|
||||
- [ ] **Task 5.1**: Update import statements
|
||||
- [ ] **Task 5.2**: Update dependencies
|
||||
- [x] **Task 5.3**: Testing and validation for indicators ✅ COMPLETED
|
||||
- [x] **Task 5.4**: Testing and validation for Strategies ✅ COMPLETED
|
||||
|
||||
### Phase 6: Cleanup and Optimization (Pending)
|
||||
- [ ] **Task 6.1**: Remove old module
|
||||
- [ ] **Task 6.2**: Code optimization
|
||||
- [ ] **Task 6.3**: Final documentation review
|
||||
|
||||
---
|
||||
|
||||
## Progress Log
|
||||
|
||||
### 2024-01-XX - Task 5.3 Completed ✅
|
||||
- ✅ Successfully created comprehensive indicator comparison test framework
|
||||
- ✅ Validated mathematical equivalence between original and new indicator implementations
|
||||
- ✅ Created `test/test_indicators_comparison_fixed.py` with comprehensive testing suite
|
||||
- ✅ Fixed interface compatibility issues and validated all indicators work correctly
|
||||
- ✅ Generated detailed test reports and comparison plots
|
||||
- ✅ All indicators show 0.0000000000 difference (perfect mathematical equivalence)
|
||||
|
||||
**Task 5.3 Results:**
|
||||
- **Comprehensive Test Suite**: Complete framework for comparing original vs new indicators
|
||||
- **Mathematical Validation**: All indicators show perfect equivalence (0.0 difference)
|
||||
- **Test Coverage**: Moving averages, EMA, ATR, SimpleATR, Supertrend, RSI, SimpleRSI, Bollinger Bands
|
||||
- **Interface Validation**: Confirmed both modules use identical `is_warmed_up()` and `get_current_value()` interface
|
||||
- **Detailed Reports**: Generated markdown reports and comparison plots
|
||||
- **Test Results**: 100% PASSED - All 9 indicator types are mathematically equivalent
|
||||
|
||||
**Indicators Validated:**
|
||||
- **Moving Averages**: MA(20), MA(50) - Perfect equivalence
|
||||
- **Exponential Moving Averages**: EMA(20), EMA(50) - Perfect equivalence
|
||||
- **ATR Indicators**: ATR(14), SimpleATR(14) - Perfect equivalence
|
||||
- **Supertrend**: Supertrend(10, 3.0) - Perfect equivalence including trend direction (100% match)
|
||||
- **RSI Indicators**: RSI(14), SimpleRSI(14) - Perfect equivalence
|
||||
- **Bollinger Bands**: BB(20, 2.0) - Perfect equivalence for all three bands
|
||||
|
||||
**Test Framework Features:**
|
||||
- **Data Processing**: Uses BTCUSD minute data (3000 data points) for realistic testing
|
||||
- **Statistical Analysis**: Max/mean/std difference calculations with pass/fail criteria
|
||||
- **Visual Validation**: Detailed comparison plots showing overlays and differences
|
||||
- **Report Generation**: Comprehensive markdown reports with Unicode support
|
||||
- **Modular Design**: Individual test files for each indicator type
|
||||
- **Interface Compatibility**: Fixed all interface calls to use correct method names
|
||||
|
||||
**Phase 5 Testing Summary:**
|
||||
The migration validation is complete with 100% success rate. All IncrementalTrader indicators are mathematically identical to the original implementations, confirming the migration preserves all calculation accuracy while providing the enhanced modular architecture.
|
||||
|
||||
### 2024-01-XX - Task 4.4 Completed ✅
|
||||
- ✅ Successfully created detailed strategy documentation for all three strategies
|
||||
- ✅ Created comprehensive MetaTrend strategy documentation (`IncrementalTrader/docs/strategies/metatrend.md`)
|
||||
- ✅ Created comprehensive BBRS strategy documentation (`IncrementalTrader/docs/strategies/bbrs.md`)
|
||||
- ✅ Created comprehensive Random strategy documentation (`IncrementalTrader/docs/strategies/random.md`)
|
||||
- ✅ Each documentation includes detailed process flow diagrams and implementation details
|
||||
- ✅ Documented mathematical foundations, configuration parameters, and usage examples
|
||||
- ✅ Added troubleshooting guides and advanced features for each strategy
|
||||
|
||||
**Task 4.4 Results:**
|
||||
- **MetaTrend Documentation**: Complete guide with multi-Supertrend consensus algorithm details
|
||||
- **BBRS Documentation**: Comprehensive mean-reversion strategy with market regime detection
|
||||
- **Random Documentation**: Testing and benchmarking strategy with statistical validation features
|
||||
- **Process Diagrams**: Visual flow diagrams showing data processing and signal generation
|
||||
- **Implementation Details**: Code examples, configuration parameters, and optimization ranges
|
||||
- **Performance Analysis**: Expected performance characteristics and backtesting results
|
||||
|
||||
**Key Documentation Features:**
|
||||
- **Mathematical Foundations**: Detailed algorithms and calculations for each strategy
|
||||
- **Process Flow Diagrams**: Visual representation of data flow and decision logic
|
||||
- **Implementation Architecture**: Class hierarchies and component relationships
|
||||
- **Configuration Management**: Parameter descriptions and optimization ranges
|
||||
- **Usage Examples**: Basic, aggressive, and conservative configuration examples
|
||||
- **Advanced Features**: Dynamic parameter adjustment and multi-timeframe analysis
|
||||
- **Troubleshooting**: Common issues and debug information
|
||||
- **Performance Metrics**: Expected results and statistical properties
|
||||
|
||||
**Phase 4 Summary - Documentation and Examples COMPLETED ✅:**
|
||||
All documentation tasks have been successfully completed:
|
||||
- ✅ **Comprehensive Documentation**: Complete API reference, guides, and examples
|
||||
- ✅ **Usage Examples**: Practical examples for immediate use
|
||||
- ✅ **Migration Guide**: Smooth transition path from legacy framework
|
||||
- ✅ **Strategy Documentation**: Detailed documentation for all three strategies with process diagrams
|
||||
|
||||
**Ready for Phase 5:** Integration and testing can now begin with complete documentation.
|
||||
|
||||
### 2024-01-XX - Task 4.3 Completed ✅
|
||||
- ✅ Successfully migrated existing documentation from legacy Cycles framework
|
||||
- ✅ Created comprehensive migration guide (`IncrementalTrader/docs/migration.md`)
|
||||
- ✅ Documented architectural changes and import updates
|
||||
- ✅ Provided strategy migration patterns and examples
|
||||
- ✅ Included compatibility layer documentation
|
||||
- ✅ Added troubleshooting guide for common migration issues
|
||||
- ✅ Preserved valuable timeframe system and strategy manager concepts from legacy docs
|
||||
|
||||
**Task 4.3 Results:**
|
||||
- **Migration Guide**: Complete guide for transitioning from Cycles to IncrementalTrader
|
||||
- **Architectural Mapping**: Clear mapping between old and new module structures
|
||||
- **Import Updates**: Comprehensive list of import changes and compatibility aliases
|
||||
- **Strategy Migration**: Detailed patterns for migrating existing strategies
|
||||
- **Legacy Reference**: Preserved important concepts from original documentation
|
||||
- **Troubleshooting**: Common issues and solutions for migration process
|
||||
|
||||
**Key Migration Features:**
|
||||
- **Backward Compatibility**: Compatibility aliases for smooth transition
|
||||
- **Gradual Migration**: Phased approach to minimize disruption
|
||||
- **Enhanced Features**: Documentation of new capabilities and improvements
|
||||
- **Performance Notes**: Memory efficiency and processing speed improvements
|
||||
- **Resource Links**: Complete reference to new documentation structure
|
||||
|
||||
### 2024-01-XX - Task 3.3 Completed ✅
|
||||
- ✅ Successfully migrated BBRS strategy with all dependencies
|
||||
- ✅ Migrated Bollinger Bands indicators: `BollingerBandsState`, `BollingerBandsOHLCState`
|
||||
- ✅ Migrated RSI indicators: `RSIState`, `SimpleRSIState`
|
||||
- ✅ Created `IncrementalTrader/strategies/bbrs.py` with enhanced BBRS strategy
|
||||
- ✅ Integrated with new IncStrategyBase framework using timeframe aggregation
|
||||
- ✅ Enhanced signal generation using factory methods (`IncStrategySignal.BUY()`, `SELL()`, `HOLD()`)
|
||||
- ✅ Maintained full compatibility with original strategy behavior
|
||||
- ✅ Updated module exports and documentation
|
||||
- ✅ Added compatibility alias `IncBBRSStrategy` for backward compatibility
|
||||
|
||||
**Task 3.3 Results:**
|
||||
- **BBRS Strategy**: Fully functional with market regime detection and adaptive behavior
|
||||
- **Bollinger Bands Framework**: Complete implementation with squeeze detection and position analysis
|
||||
- **RSI Framework**: Wilder's smoothing and simple RSI implementations
|
||||
- **Enhanced Features**: Improved signal generation using factory methods
|
||||
- **Module Integration**: All imports working correctly with new structure
|
||||
- **Compatibility**: Maintains exact behavior equivalence to original implementation
|
||||
|
||||
**Key Improvements Made:**
|
||||
- **Market Regime Detection**: Automatic switching between trending and sideways market strategies
|
||||
- **Volume Analysis**: Integrated volume spike detection and volume moving average tracking
|
||||
- **Enhanced Signal Generation**: Updated to use `IncStrategySignal.BUY()` and `SELL()` factory methods
|
||||
- **Comprehensive State Management**: Detailed state tracking and debugging capabilities
|
||||
- **Flexible Configuration**: Configurable parameters for different market conditions
|
||||
- **Compatibility**: Added `IncBBRSStrategy` alias for backward compatibility
|
||||
|
||||
**Task 3.4 Completed as Part of 3.3:**
|
||||
All required indicators have been migrated as part of the strategy migrations:
|
||||
- ✅ **Base Indicators**: `IndicatorState`, `SimpleIndicatorState`, `OHLCIndicatorState`
|
||||
- ✅ **Moving Averages**: `MovingAverageState`, `ExponentialMovingAverageState`
|
||||
- ✅ **Volatility**: `ATRState`, `SimpleATRState`
|
||||
- ✅ **Trend**: `SupertrendState`, `SupertrendCollection`
|
||||
- ✅ **Bollinger Bands**: `BollingerBandsState`, `BollingerBandsOHLCState`
|
||||
- ✅ **RSI**: `RSIState`, `SimpleRSIState`
|
||||
|
||||
**Phase 3 Summary - Strategy Migration COMPLETED ✅:**
|
||||
All major strategies have been successfully migrated:
|
||||
- ✅ **MetaTrend Strategy**: Meta-trend detection using multiple Supertrend indicators
|
||||
- ✅ **Random Strategy**: Testing framework for strategy validation
|
||||
- ✅ **BBRS Strategy**: Bollinger Bands + RSI with market regime detection
|
||||
- ✅ **Complete Indicator Framework**: All indicators needed for strategies
|
||||
|
||||
### 2024-01-XX - Task 3.2 Completed ✅
|
||||
- ✅ Successfully migrated Random strategy for testing framework
|
||||
- ✅ Created `IncrementalTrader/strategies/random.py` with enhanced Random strategy
|
||||
- ✅ Updated imports to use new module structure
|
||||
- ✅ Enhanced signal generation using factory methods (`IncStrategySignal.BUY()`, `SELL()`, `HOLD()`)
|
||||
- ✅ Maintained full compatibility with original strategy behavior
|
||||
- ✅ Updated module exports and documentation
|
||||
- ✅ Added compatibility alias `IncRandomStrategy` for backward compatibility
|
||||
|
||||
**Task 3.2 Results:**
|
||||
- **Random Strategy**: Fully functional testing strategy with enhanced signal generation
|
||||
- **Enhanced Features**: Improved signal generation using factory methods
|
||||
- **Module Integration**: All imports working correctly with new structure
|
||||
- **Compatibility**: Maintains exact behavior equivalence to original implementation
|
||||
- **Testing Framework**: Ready for use in testing incremental strategy framework
|
||||
|
||||
**Key Improvements Made:**
|
||||
- **Enhanced Signal Generation**: Updated to use `IncStrategySignal.BUY()` and `SELL()` factory methods
|
||||
- **Improved Logging**: Updated strategy name references for consistency
|
||||
- **Better Documentation**: Enhanced docstrings and examples
|
||||
- **Compatibility**: Added `IncRandomStrategy` alias for backward compatibility
|
||||
|
||||
### 2024-01-XX - Task 3.1 Completed ✅
|
||||
- ✅ Successfully migrated MetaTrend strategy and all its dependencies
|
||||
- ✅ Migrated complete indicator framework: base classes, moving averages, ATR, Supertrend
|
||||
- ✅ Created `IncrementalTrader/strategies/indicators/` with full indicator suite
|
||||
- ✅ Created `IncrementalTrader/strategies/metatrend.py` with enhanced MetaTrend strategy
|
||||
- ✅ Updated all import statements to use new module structure
|
||||
- ✅ Enhanced strategy with improved signal generation using factory methods
|
||||
- ✅ Maintained full compatibility with original strategy behavior
|
||||
- ✅ Updated module exports and documentation
|
||||
|
||||
**Task 3.1 Results:**
|
||||
- **Indicator Framework**: Complete migration of base classes, moving averages, ATR, and Supertrend
|
||||
- **MetaTrend Strategy**: Fully functional with enhanced signal generation and logging
|
||||
- **Module Integration**: All imports working correctly with new structure
|
||||
- **Enhanced Features**: Improved signal generation using `IncStrategySignal.BUY()`, `SELL()`, `HOLD()`
|
||||
- **Compatibility**: Maintains exact mathematical equivalence to original implementation
|
||||
|
||||
**Key Components Migrated:**
|
||||
- `IndicatorState`, `SimpleIndicatorState`, `OHLCIndicatorState`: Base indicator framework
|
||||
- `MovingAverageState`, `ExponentialMovingAverageState`: Moving average indicators
|
||||
- `ATRState`, `SimpleATRState`: Average True Range indicators
|
||||
- `SupertrendState`, `SupertrendCollection`: Supertrend indicators for trend detection
|
||||
- `MetaTrendStrategy`: Complete strategy implementation with meta-trend calculation
|
||||
|
||||
### 2024-01-XX - Task 2.3 Completed ✅
|
||||
- ✅ Successfully moved and refactored backtester implementation
|
||||
- ✅ Created `IncrementalTrader/backtester/backtester.py` with enhanced architecture
|
||||
- ✅ Created `IncrementalTrader/backtester/config.py` for configuration management
|
||||
- ✅ Created `IncrementalTrader/backtester/utils.py` with integrated utilities
|
||||
- ✅ Separated concerns: backtesting logic, configuration, and utilities
|
||||
- ✅ Removed external dependencies (self-contained DataLoader, SystemUtils, ResultsSaver)
|
||||
- ✅ Enhanced configuration with validation and directory management
|
||||
- ✅ Improved data loading with validation and multiple format support
|
||||
- ✅ Enhanced result saving with comprehensive reporting capabilities
|
||||
- ✅ Updated module imports and verified functionality
|
||||
|
||||
**Task 2.3 Results:**
|
||||
- `IncBacktester`: Main backtesting engine with parallel execution support
|
||||
- `BacktestConfig`: Enhanced configuration management with validation
|
||||
- `OptimizationConfig`: Specialized configuration for parameter optimization
|
||||
- `DataLoader`: Self-contained data loading with CSV/JSON support and validation
|
||||
- `SystemUtils`: System resource management for optimal worker allocation
|
||||
- `ResultsSaver`: Comprehensive result saving with multiple output formats
|
||||
- All imports working correctly from main module
|
||||
|
||||
**Key Improvements Made:**
|
||||
- **Modular Architecture**: Split backtester into logical components (config, utils, main)
|
||||
- **Enhanced Configuration**: Robust configuration with validation and directory management
|
||||
- **Self-Contained Utilities**: No external dependencies on cycles module
|
||||
- **Improved Data Loading**: Support for multiple formats with comprehensive validation
|
||||
- **Better Result Management**: Enhanced saving with JSON, CSV, and comprehensive reports
|
||||
- **System Resource Optimization**: Intelligent worker allocation based on system resources
|
||||
- **Action Logging**: Comprehensive logging of all backtesting operations
|
||||
|
||||
### 2024-01-XX - Task 2.2 Completed ✅
|
||||
- ✅ Successfully moved and refactored trader implementation
|
||||
- ✅ Created `IncrementalTrader/trader/trader.py` with improved architecture
|
||||
- ✅ Created `IncrementalTrader/trader/position.py` for position management
|
||||
- ✅ Separated concerns: trading logic vs position management
|
||||
- ✅ Removed external dependencies (self-contained MarketFees)
|
||||
- ✅ Enhanced error handling and logging throughout
|
||||
- ✅ Improved API with cleaner method signatures
|
||||
- ✅ Added portfolio tracking and enhanced performance metrics
|
||||
- ✅ Updated module imports and verified functionality
|
||||
|
||||
**Task 2.2 Results:**
|
||||
- `IncTrader`: Main trader class with strategy integration and risk management
|
||||
- `PositionManager`: Dedicated position state and trade execution management
|
||||
- `TradeRecord`: Enhanced trade record structure
|
||||
- `MarketFees`: Self-contained fee calculation utilities
|
||||
- All imports working correctly from main module
|
||||
|
||||
**Key Improvements Made:**
|
||||
- **Separation of Concerns**: Split trader logic from position management
|
||||
- **Enhanced Architecture**: Cleaner interfaces and better modularity
|
||||
- **Self-Contained**: No external dependencies on cycles module
|
||||
- **Better Error Handling**: Comprehensive exception handling and logging
|
||||
- **Improved Performance Tracking**: Portfolio history and detailed metrics
|
||||
- **Flexible Fee Calculation**: Support for different exchange fee structures
|
||||
|
||||
### 2024-01-XX - Task 2.1 Completed ✅
|
||||
- ✅ Successfully moved and refactored base classes
|
||||
- ✅ Created `IncrementalTrader/strategies/base.py` with improved structure
|
||||
- ✅ Cleaned up imports and removed external dependencies
|
||||
- ✅ Added convenience methods (BUY, SELL, HOLD) to IncStrategySignal
|
||||
- ✅ Improved error handling and logging
|
||||
- ✅ Simplified the API while maintaining all functionality
|
||||
- ✅ Updated module imports to use new base classes
|
||||
|
||||
**Task 2.1 Results:**
|
||||
- `IncStrategySignal`: Enhanced signal class with factory methods
|
||||
- `TimeframeAggregator`: Robust timeframe aggregation for real-time data
|
||||
- `IncStrategyBase`: Comprehensive base class with performance tracking
|
||||
- All imports updated and working correctly
|
||||
|
||||
### 2024-01-XX - Phase 1 Completed ✅
|
||||
- ✅ Created complete directory structure for IncrementalTrader module
|
||||
- ✅ Set up all `__init__.py` files with proper module exports
|
||||
- ✅ Created comprehensive main README.md with usage examples
|
||||
- ✅ Established documentation structure with architecture overview
|
||||
- ✅ All placeholder imports ready for Phase 2 migration
|
||||
|
||||
**Phase 1 Results:**
|
||||
```
|
||||
IncrementalTrader/
|
||||
├── README.md # Complete module overview
|
||||
├── __init__.py # Main module exports
|
||||
├── strategies/ # Strategy framework
|
||||
│ ├── __init__.py # Strategy exports
|
||||
│ └── indicators/ # Indicator framework
|
||||
│ └── __init__.py # Indicator exports
|
||||
├── trader/ # Trading execution
|
||||
│ └── __init__.py # Trader exports
|
||||
├── backtester/ # Backtesting framework
|
||||
│ └── __init__.py # Backtester exports
|
||||
└── docs/ # Documentation
|
||||
├── README.md # Documentation index
|
||||
├── architecture.md # System architecture
|
||||
└── strategies/ # Strategy documentation
|
||||
├── metatrend.md # MetaTrend strategy guide
|
||||
├── bbrs.md # BBRS strategy guide
|
||||
└── random.md # Random strategy guide
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*This file tracks the progress of the incremental trading module refactoring.*
|
||||
@ -1,395 +0,0 @@
|
||||
"""
|
||||
ATR Indicators Comparison Test
|
||||
|
||||
Focused testing for ATR and Simple ATR implementations.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.dates as mdates
|
||||
from datetime import datetime
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
# Import original indicators
|
||||
from cycles.IncStrategies.indicators import (
|
||||
ATRState as OriginalATR,
|
||||
SimpleATRState as OriginalSimpleATR
|
||||
)
|
||||
|
||||
# Import new indicators
|
||||
from IncrementalTrader.strategies.indicators import (
|
||||
ATRState as NewATR,
|
||||
SimpleATRState as NewSimpleATR
|
||||
)
|
||||
|
||||
|
||||
class ATRComparisonTest:
|
||||
"""Test framework for comparing ATR implementations."""
|
||||
|
||||
def __init__(self, data_file: str = "data/btcusd_1-min_data.csv", sample_size: int = 5000):
|
||||
self.data_file = data_file
|
||||
self.sample_size = sample_size
|
||||
self.data = None
|
||||
self.results = {}
|
||||
|
||||
# Create results directory
|
||||
self.results_dir = Path("test/results/atr_indicators")
|
||||
self.results_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def load_data(self):
|
||||
"""Load and prepare the data for testing."""
|
||||
print(f"Loading data from {self.data_file}...")
|
||||
|
||||
df = pd.read_csv(self.data_file)
|
||||
df['datetime'] = pd.to_datetime(df['Timestamp'], unit='s')
|
||||
|
||||
if self.sample_size and len(df) > self.sample_size:
|
||||
df = df.tail(self.sample_size).reset_index(drop=True)
|
||||
|
||||
self.data = df
|
||||
print(f"Loaded {len(df)} data points from {df['datetime'].iloc[0]} to {df['datetime'].iloc[-1]}")
|
||||
|
||||
def test_atr(self, periods=[7, 14, 21, 28]):
|
||||
"""Test ATR implementations."""
|
||||
print("\n=== Testing ATR (Wilder's Smoothing) ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing ATR({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_atr = OriginalATR(period)
|
||||
new_atr = NewATR(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
true_ranges = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
high, low, close = row['High'], row['Low'], row['Close']
|
||||
|
||||
# Create OHLC dictionary for both indicators
|
||||
ohlc_data = {
|
||||
'open': row['Open'],
|
||||
'high': high,
|
||||
'low': low,
|
||||
'close': close
|
||||
}
|
||||
|
||||
original_atr.update(ohlc_data)
|
||||
new_atr.update(ohlc_data)
|
||||
|
||||
original_values.append(original_atr.get_current_value() if original_atr.is_warmed_up() else np.nan)
|
||||
new_values.append(new_atr.get_current_value() if new_atr.is_warmed_up() else np.nan)
|
||||
|
||||
# Calculate true range for reference
|
||||
if len(self.data) > 1:
|
||||
prev_close = self.data.iloc[max(0, len(true_ranges)-1)]['Close'] if true_ranges else close
|
||||
tr = max(high - low, abs(high - prev_close), abs(low - prev_close))
|
||||
true_ranges.append(tr)
|
||||
else:
|
||||
true_ranges.append(high - low)
|
||||
|
||||
# Store results
|
||||
self.results[f'ATR_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'true_ranges': true_ranges,
|
||||
'highs': self.data['High'].tolist(),
|
||||
'lows': self.data['Low'].tolist(),
|
||||
'closes': self.data['Close'].tolist(),
|
||||
'dates': self.data['datetime'].tolist(),
|
||||
'period': period
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
std_diff = np.std(valid_diff)
|
||||
|
||||
print(f" Max difference: {max_diff:.12f}")
|
||||
print(f" Mean difference: {mean_diff:.12f}")
|
||||
print(f" Std difference: {std_diff:.12f}")
|
||||
|
||||
# Status check
|
||||
if max_diff < 1e-10:
|
||||
print(f" ✅ PASSED: Mathematically equivalent")
|
||||
elif max_diff < 1e-6:
|
||||
print(f" ⚠️ WARNING: Small differences (floating point precision)")
|
||||
else:
|
||||
print(f" ❌ FAILED: Significant differences detected")
|
||||
else:
|
||||
print(f" ❌ ERROR: No valid data points")
|
||||
|
||||
def test_simple_atr(self, periods=[7, 14, 21, 28]):
|
||||
"""Test Simple ATR implementations."""
|
||||
print("\n=== Testing Simple ATR (Simple Moving Average) ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing SimpleATR({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_atr = OriginalSimpleATR(period)
|
||||
new_atr = NewSimpleATR(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
true_ranges = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
high, low, close = row['High'], row['Low'], row['Close']
|
||||
|
||||
# Create OHLC dictionary for both indicators
|
||||
ohlc_data = {
|
||||
'open': row['Open'],
|
||||
'high': high,
|
||||
'low': low,
|
||||
'close': close
|
||||
}
|
||||
|
||||
original_atr.update(ohlc_data)
|
||||
new_atr.update(ohlc_data)
|
||||
|
||||
original_values.append(original_atr.get_current_value() if original_atr.is_warmed_up() else np.nan)
|
||||
new_values.append(new_atr.get_current_value() if new_atr.is_warmed_up() else np.nan)
|
||||
|
||||
# Calculate true range for reference
|
||||
if len(self.data) > 1:
|
||||
prev_close = self.data.iloc[max(0, len(true_ranges)-1)]['Close'] if true_ranges else close
|
||||
tr = max(high - low, abs(high - prev_close), abs(low - prev_close))
|
||||
true_ranges.append(tr)
|
||||
else:
|
||||
true_ranges.append(high - low)
|
||||
|
||||
# Store results
|
||||
self.results[f'SimpleATR_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'true_ranges': true_ranges,
|
||||
'highs': self.data['High'].tolist(),
|
||||
'lows': self.data['Low'].tolist(),
|
||||
'closes': self.data['Close'].tolist(),
|
||||
'dates': self.data['datetime'].tolist(),
|
||||
'period': period
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
std_diff = np.std(valid_diff)
|
||||
|
||||
print(f" Max difference: {max_diff:.12f}")
|
||||
print(f" Mean difference: {mean_diff:.12f}")
|
||||
print(f" Std difference: {std_diff:.12f}")
|
||||
|
||||
# Status check
|
||||
if max_diff < 1e-10:
|
||||
print(f" ✅ PASSED: Mathematically equivalent")
|
||||
elif max_diff < 1e-6:
|
||||
print(f" ⚠️ WARNING: Small differences (floating point precision)")
|
||||
else:
|
||||
print(f" ❌ FAILED: Significant differences detected")
|
||||
else:
|
||||
print(f" ❌ ERROR: No valid data points")
|
||||
|
||||
def plot_comparison(self, indicator_name: str):
|
||||
"""Plot detailed comparison for a specific indicator."""
|
||||
if indicator_name not in self.results:
|
||||
print(f"No results found for {indicator_name}")
|
||||
return
|
||||
|
||||
result = self.results[indicator_name]
|
||||
dates = pd.to_datetime(result['dates'])
|
||||
|
||||
# Create figure with subplots
|
||||
fig, axes = plt.subplots(4, 1, figsize=(15, 16))
|
||||
fig.suptitle(f'{indicator_name} - Detailed Comparison Analysis', fontsize=16)
|
||||
|
||||
# Plot 1: OHLC data
|
||||
ax1 = axes[0]
|
||||
ax1.plot(dates, result['highs'], label='High', alpha=0.6, color='green')
|
||||
ax1.plot(dates, result['lows'], label='Low', alpha=0.6, color='red')
|
||||
ax1.plot(dates, result['closes'], label='Close', alpha=0.8, color='blue')
|
||||
ax1.set_title('OHLC Data')
|
||||
ax1.legend()
|
||||
ax1.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 2: True Range
|
||||
ax2 = axes[1]
|
||||
ax2.plot(dates, result['true_ranges'], label='True Range', alpha=0.7, color='orange')
|
||||
ax2.set_title('True Range Values')
|
||||
ax2.legend()
|
||||
ax2.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 3: ATR comparison
|
||||
ax3 = axes[2]
|
||||
ax3.plot(dates, result['original'], label='Original', alpha=0.8, linewidth=2)
|
||||
ax3.plot(dates, result['new'], label='New', alpha=0.8, linewidth=2, linestyle='--')
|
||||
ax3.set_title(f'{indicator_name} Values Comparison')
|
||||
ax3.legend()
|
||||
ax3.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 4: Difference analysis
|
||||
ax4 = axes[3]
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
ax4.plot(dates, diff, color='red', alpha=0.7, linewidth=1)
|
||||
ax4.set_title(f'{indicator_name} Difference (New - Original)')
|
||||
ax4.axhline(y=0, color='black', linestyle='-', alpha=0.5)
|
||||
ax4.grid(True, alpha=0.3)
|
||||
|
||||
# Add statistics text
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
if len(valid_diff) > 0:
|
||||
stats_text = f'Max: {np.max(np.abs(valid_diff)):.2e}\n'
|
||||
stats_text += f'Mean: {np.mean(np.abs(valid_diff)):.2e}\n'
|
||||
stats_text += f'Std: {np.std(valid_diff):.2e}'
|
||||
ax4.text(0.02, 0.98, stats_text, transform=ax4.transAxes,
|
||||
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
|
||||
|
||||
# Format x-axis
|
||||
for ax in axes:
|
||||
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
||||
ax.xaxis.set_major_locator(mdates.DayLocator(interval=max(1, len(dates)//10)))
|
||||
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45)
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
# Save plot
|
||||
plot_path = self.results_dir / f"{indicator_name}_detailed_comparison.png"
|
||||
plt.savefig(plot_path, dpi=300, bbox_inches='tight')
|
||||
print(f"Plot saved to {plot_path}")
|
||||
|
||||
plt.show()
|
||||
|
||||
def plot_all_comparisons(self):
|
||||
"""Plot comparisons for all tested indicators."""
|
||||
print("\n=== Generating Detailed Comparison Plots ===")
|
||||
|
||||
for indicator_name in self.results.keys():
|
||||
print(f"Plotting {indicator_name}...")
|
||||
self.plot_comparison(indicator_name)
|
||||
plt.close('all')
|
||||
|
||||
def generate_report(self):
|
||||
"""Generate detailed report for ATR indicators."""
|
||||
print("\n=== Generating ATR Report ===")
|
||||
|
||||
report_lines = []
|
||||
report_lines.append("# ATR Indicators Comparison Report")
|
||||
report_lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
report_lines.append(f"Data file: {self.data_file}")
|
||||
report_lines.append(f"Sample size: {len(self.data)} data points")
|
||||
report_lines.append("")
|
||||
|
||||
# Summary table
|
||||
report_lines.append("## Summary Table")
|
||||
report_lines.append("| Indicator | Period | Max Diff | Mean Diff | Status |")
|
||||
report_lines.append("|-----------|--------|----------|-----------|--------|")
|
||||
|
||||
for indicator_name, result in self.results.items():
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
|
||||
if max_diff < 1e-10:
|
||||
status = "✅ PASSED"
|
||||
elif max_diff < 1e-6:
|
||||
status = "⚠️ WARNING"
|
||||
else:
|
||||
status = "❌ FAILED"
|
||||
|
||||
report_lines.append(f"| {indicator_name} | {result['period']} | {max_diff:.2e} | {mean_diff:.2e} | {status} |")
|
||||
else:
|
||||
report_lines.append(f"| {indicator_name} | {result['period']} | N/A | N/A | ❌ ERROR |")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Methodology explanation
|
||||
report_lines.append("## Methodology")
|
||||
report_lines.append("### ATR (Average True Range)")
|
||||
report_lines.append("- Uses Wilder's smoothing method: ATR = (Previous ATR * (n-1) + Current TR) / n")
|
||||
report_lines.append("- True Range = max(High-Low, |High-PrevClose|, |Low-PrevClose|)")
|
||||
report_lines.append("")
|
||||
report_lines.append("### Simple ATR")
|
||||
report_lines.append("- Uses simple moving average of True Range values")
|
||||
report_lines.append("- More responsive to recent changes than Wilder's method")
|
||||
report_lines.append("")
|
||||
|
||||
# Detailed analysis
|
||||
report_lines.append("## Detailed Analysis")
|
||||
|
||||
for indicator_name, result in self.results.items():
|
||||
report_lines.append(f"### {indicator_name}")
|
||||
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
report_lines.append(f"- **Period**: {result['period']}")
|
||||
report_lines.append(f"- **Valid data points**: {len(valid_diff)}")
|
||||
report_lines.append(f"- **Max absolute difference**: {np.max(np.abs(valid_diff)):.12f}")
|
||||
report_lines.append(f"- **Mean absolute difference**: {np.mean(np.abs(valid_diff)):.12f}")
|
||||
report_lines.append(f"- **Standard deviation**: {np.std(valid_diff):.12f}")
|
||||
|
||||
# ATR-specific metrics
|
||||
valid_original = np.array(result['original'])[~np.isnan(result['original'])]
|
||||
if len(valid_original) > 0:
|
||||
mean_atr = np.mean(valid_original)
|
||||
relative_error = np.mean(np.abs(valid_diff)) / mean_atr * 100
|
||||
report_lines.append(f"- **Mean ATR value**: {mean_atr:.6f}")
|
||||
report_lines.append(f"- **Relative error**: {relative_error:.2e}%")
|
||||
|
||||
# Percentile analysis
|
||||
percentiles = [1, 5, 25, 50, 75, 95, 99]
|
||||
perc_values = np.percentile(np.abs(valid_diff), percentiles)
|
||||
perc_str = ", ".join([f"P{p}: {v:.2e}" for p, v in zip(percentiles, perc_values)])
|
||||
report_lines.append(f"- **Percentiles**: {perc_str}")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Save report
|
||||
report_path = self.results_dir / "atr_indicators_report.md"
|
||||
with open(report_path, 'w', encoding='utf-8') as f:
|
||||
f.write('\n'.join(report_lines))
|
||||
|
||||
print(f"Report saved to {report_path}")
|
||||
|
||||
def run_tests(self):
|
||||
"""Run all ATR tests."""
|
||||
print("Starting ATR Comparison Tests...")
|
||||
|
||||
# Load data
|
||||
self.load_data()
|
||||
|
||||
# Run tests
|
||||
self.test_atr()
|
||||
self.test_simple_atr()
|
||||
|
||||
# Generate outputs
|
||||
self.plot_all_comparisons()
|
||||
self.generate_report()
|
||||
|
||||
print("\n✅ ATR tests completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tester = ATRComparisonTest(sample_size=3000)
|
||||
tester.run_tests()
|
||||
@ -1,487 +0,0 @@
|
||||
"""
|
||||
Bollinger Bands Indicators Comparison Test
|
||||
|
||||
Focused testing for Bollinger Bands implementations.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.dates as mdates
|
||||
from datetime import datetime
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
# Import original indicators
|
||||
from cycles.IncStrategies.indicators import (
|
||||
BollingerBandsState as OriginalBB,
|
||||
BollingerBandsOHLCState as OriginalBBOHLC
|
||||
)
|
||||
|
||||
# Import new indicators
|
||||
from IncrementalTrader.strategies.indicators import (
|
||||
BollingerBandsState as NewBB,
|
||||
BollingerBandsOHLCState as NewBBOHLC
|
||||
)
|
||||
|
||||
|
||||
class BollingerBandsComparisonTest:
|
||||
"""Test framework for comparing Bollinger Bands implementations."""
|
||||
|
||||
def __init__(self, data_file: str = "data/btcusd_1-min_data.csv", sample_size: int = 5000):
|
||||
self.data_file = data_file
|
||||
self.sample_size = sample_size
|
||||
self.data = None
|
||||
self.results = {}
|
||||
|
||||
# Create results directory
|
||||
self.results_dir = Path("test/results/bollinger_bands")
|
||||
self.results_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def load_data(self):
|
||||
"""Load and prepare the data for testing."""
|
||||
print(f"Loading data from {self.data_file}...")
|
||||
|
||||
df = pd.read_csv(self.data_file)
|
||||
df['datetime'] = pd.to_datetime(df['Timestamp'], unit='s')
|
||||
|
||||
if self.sample_size and len(df) > self.sample_size:
|
||||
df = df.tail(self.sample_size).reset_index(drop=True)
|
||||
|
||||
self.data = df
|
||||
print(f"Loaded {len(df)} data points from {df['datetime'].iloc[0]} to {df['datetime'].iloc[-1]}")
|
||||
|
||||
def test_bollinger_bands(self, periods=[10, 20, 30], std_devs=[1.5, 2.0, 2.5]):
|
||||
"""Test Bollinger Bands implementations (Close price based)."""
|
||||
print("\n=== Testing Bollinger Bands (Close Price) ===")
|
||||
|
||||
for period in periods:
|
||||
for std_dev in std_devs:
|
||||
print(f"Testing BollingerBands({period}, {std_dev})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_bb = OriginalBB(period, std_dev)
|
||||
new_bb = NewBB(period, std_dev)
|
||||
|
||||
original_upper = []
|
||||
original_middle = []
|
||||
original_lower = []
|
||||
new_upper = []
|
||||
new_middle = []
|
||||
new_lower = []
|
||||
prices = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
prices.append(price)
|
||||
|
||||
original_bb.update(price)
|
||||
new_bb.update(price)
|
||||
|
||||
if original_bb.is_warmed_up():
|
||||
original_upper.append(original_bb.get_current_value()['upper_band'])
|
||||
original_middle.append(original_bb.get_current_value()['middle_band'])
|
||||
original_lower.append(original_bb.get_current_value()['lower_band'])
|
||||
else:
|
||||
original_upper.append(np.nan)
|
||||
original_middle.append(np.nan)
|
||||
original_lower.append(np.nan)
|
||||
|
||||
if new_bb.is_warmed_up():
|
||||
new_upper.append(new_bb.get_current_value()['upper_band'])
|
||||
new_middle.append(new_bb.get_current_value()['middle_band'])
|
||||
new_lower.append(new_bb.get_current_value()['lower_band'])
|
||||
else:
|
||||
new_upper.append(np.nan)
|
||||
new_middle.append(np.nan)
|
||||
new_lower.append(np.nan)
|
||||
|
||||
# Store results
|
||||
key = f'BB_{period}_{std_dev}'
|
||||
self.results[key] = {
|
||||
'original_upper': original_upper,
|
||||
'original_middle': original_middle,
|
||||
'original_lower': original_lower,
|
||||
'new_upper': new_upper,
|
||||
'new_middle': new_middle,
|
||||
'new_lower': new_lower,
|
||||
'prices': prices,
|
||||
'dates': self.data['datetime'].tolist(),
|
||||
'period': period,
|
||||
'std_dev': std_dev,
|
||||
'type': 'Close'
|
||||
}
|
||||
|
||||
# Calculate differences for each band
|
||||
for band in ['upper', 'middle', 'lower']:
|
||||
orig = np.array(locals()[f'original_{band}'])
|
||||
new = np.array(locals()[f'new_{band}'])
|
||||
diff = new - orig
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
|
||||
print(f" {band.capitalize()} band - Max diff: {max_diff:.12f}, Mean diff: {mean_diff:.12f}")
|
||||
|
||||
# Status check for this band
|
||||
if max_diff < 1e-10:
|
||||
status = "✅ PASSED"
|
||||
elif max_diff < 1e-6:
|
||||
status = "⚠️ WARNING"
|
||||
else:
|
||||
status = "❌ FAILED"
|
||||
print(f" Status: {status}")
|
||||
else:
|
||||
print(f" {band.capitalize()} band - ❌ ERROR: No valid data points")
|
||||
|
||||
def test_bollinger_bands_ohlc(self, periods=[10, 20, 30], std_devs=[1.5, 2.0, 2.5]):
|
||||
"""Test Bollinger Bands OHLC implementations (Typical price based)."""
|
||||
print("\n=== Testing Bollinger Bands OHLC (Typical Price) ===")
|
||||
|
||||
for period in periods:
|
||||
for std_dev in std_devs:
|
||||
print(f"Testing BollingerBandsOHLC({period}, {std_dev})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_bb = OriginalBBOHLC(period, std_dev)
|
||||
new_bb = NewBBOHLC(period, std_dev)
|
||||
|
||||
original_upper = []
|
||||
original_middle = []
|
||||
original_lower = []
|
||||
new_upper = []
|
||||
new_middle = []
|
||||
new_lower = []
|
||||
typical_prices = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
high, low, close = row['High'], row['Low'], row['Close']
|
||||
typical_price = (high + low + close) / 3
|
||||
typical_prices.append(typical_price)
|
||||
|
||||
# Create OHLC dictionary for both indicators
|
||||
ohlc_data = {
|
||||
'open': row['Open'],
|
||||
'high': high,
|
||||
'low': low,
|
||||
'close': close
|
||||
}
|
||||
|
||||
original_bb.update(ohlc_data)
|
||||
new_bb.update(ohlc_data)
|
||||
|
||||
if original_bb.is_warmed_up():
|
||||
original_upper.append(original_bb.get_current_value()['upper_band'])
|
||||
original_middle.append(original_bb.get_current_value()['middle_band'])
|
||||
original_lower.append(original_bb.get_current_value()['lower_band'])
|
||||
else:
|
||||
original_upper.append(np.nan)
|
||||
original_middle.append(np.nan)
|
||||
original_lower.append(np.nan)
|
||||
|
||||
if new_bb.is_warmed_up():
|
||||
new_upper.append(new_bb.get_current_value()['upper_band'])
|
||||
new_middle.append(new_bb.get_current_value()['middle_band'])
|
||||
new_lower.append(new_bb.get_current_value()['lower_band'])
|
||||
else:
|
||||
new_upper.append(np.nan)
|
||||
new_middle.append(np.nan)
|
||||
new_lower.append(np.nan)
|
||||
|
||||
# Store results
|
||||
key = f'BBOHLC_{period}_{std_dev}'
|
||||
self.results[key] = {
|
||||
'original_upper': original_upper,
|
||||
'original_middle': original_middle,
|
||||
'original_lower': original_lower,
|
||||
'new_upper': new_upper,
|
||||
'new_middle': new_middle,
|
||||
'new_lower': new_lower,
|
||||
'prices': self.data['Close'].tolist(),
|
||||
'typical_prices': typical_prices,
|
||||
'highs': self.data['High'].tolist(),
|
||||
'lows': self.data['Low'].tolist(),
|
||||
'dates': self.data['datetime'].tolist(),
|
||||
'period': period,
|
||||
'std_dev': std_dev,
|
||||
'type': 'OHLC'
|
||||
}
|
||||
|
||||
# Calculate differences for each band
|
||||
for band in ['upper', 'middle', 'lower']:
|
||||
orig = np.array(locals()[f'original_{band}'])
|
||||
new = np.array(locals()[f'new_{band}'])
|
||||
diff = new - orig
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
|
||||
print(f" {band.capitalize()} band - Max diff: {max_diff:.12f}, Mean diff: {mean_diff:.12f}")
|
||||
|
||||
# Status check for this band
|
||||
if max_diff < 1e-10:
|
||||
status = "✅ PASSED"
|
||||
elif max_diff < 1e-6:
|
||||
status = "⚠️ WARNING"
|
||||
else:
|
||||
status = "❌ FAILED"
|
||||
print(f" Status: {status}")
|
||||
else:
|
||||
print(f" {band.capitalize()} band - ❌ ERROR: No valid data points")
|
||||
|
||||
def plot_comparison(self, indicator_name: str):
|
||||
"""Plot detailed comparison for a specific indicator."""
|
||||
if indicator_name not in self.results:
|
||||
print(f"No results found for {indicator_name}")
|
||||
return
|
||||
|
||||
result = self.results[indicator_name]
|
||||
dates = pd.to_datetime(result['dates'])
|
||||
|
||||
# Create figure with subplots
|
||||
fig, axes = plt.subplots(4, 1, figsize=(15, 16))
|
||||
fig.suptitle(f'{indicator_name} - Detailed Comparison Analysis', fontsize=16)
|
||||
|
||||
# Plot 1: Price and Bollinger Bands
|
||||
ax1 = axes[0]
|
||||
if result['type'] == 'OHLC':
|
||||
ax1.plot(dates, result['typical_prices'], label='Typical Price', alpha=0.7, color='black', linewidth=1)
|
||||
else:
|
||||
ax1.plot(dates, result['prices'], label='Close Price', alpha=0.7, color='black', linewidth=1)
|
||||
|
||||
ax1.plot(dates, result['original_upper'], label='Original Upper', alpha=0.8, color='red')
|
||||
ax1.plot(dates, result['original_middle'], label='Original Middle', alpha=0.8, color='blue')
|
||||
ax1.plot(dates, result['original_lower'], label='Original Lower', alpha=0.8, color='green')
|
||||
ax1.fill_between(dates, result['original_upper'], result['original_lower'], alpha=0.1, color='gray')
|
||||
ax1.set_title(f'{indicator_name} - Original Implementation')
|
||||
ax1.legend()
|
||||
ax1.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 2: New implementation
|
||||
ax2 = axes[1]
|
||||
if result['type'] == 'OHLC':
|
||||
ax2.plot(dates, result['typical_prices'], label='Typical Price', alpha=0.7, color='black', linewidth=1)
|
||||
else:
|
||||
ax2.plot(dates, result['prices'], label='Close Price', alpha=0.7, color='black', linewidth=1)
|
||||
|
||||
ax2.plot(dates, result['new_upper'], label='New Upper', alpha=0.8, color='red', linestyle='--')
|
||||
ax2.plot(dates, result['new_middle'], label='New Middle', alpha=0.8, color='blue', linestyle='--')
|
||||
ax2.plot(dates, result['new_lower'], label='New Lower', alpha=0.8, color='green', linestyle='--')
|
||||
ax2.fill_between(dates, result['new_upper'], result['new_lower'], alpha=0.1, color='gray')
|
||||
ax2.set_title(f'{indicator_name} - New Implementation')
|
||||
ax2.legend()
|
||||
ax2.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 3: Overlay comparison
|
||||
ax3 = axes[2]
|
||||
ax3.plot(dates, result['original_upper'], label='Original Upper', alpha=0.8, color='red')
|
||||
ax3.plot(dates, result['original_middle'], label='Original Middle', alpha=0.8, color='blue')
|
||||
ax3.plot(dates, result['original_lower'], label='Original Lower', alpha=0.8, color='green')
|
||||
ax3.plot(dates, result['new_upper'], label='New Upper', alpha=0.8, color='red', linestyle='--')
|
||||
ax3.plot(dates, result['new_middle'], label='New Middle', alpha=0.8, color='blue', linestyle='--')
|
||||
ax3.plot(dates, result['new_lower'], label='New Lower', alpha=0.8, color='green', linestyle='--')
|
||||
ax3.set_title(f'{indicator_name} - Overlay Comparison')
|
||||
ax3.legend()
|
||||
ax3.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 4: Differences for all bands
|
||||
ax4 = axes[3]
|
||||
for band, color in [('upper', 'red'), ('middle', 'blue'), ('lower', 'green')]:
|
||||
orig = np.array(result[f'original_{band}'])
|
||||
new = np.array(result[f'new_{band}'])
|
||||
diff = new - orig
|
||||
ax4.plot(dates, diff, label=f'{band.capitalize()} diff', alpha=0.7, color=color)
|
||||
|
||||
ax4.set_title(f'{indicator_name} Differences (New - Original)')
|
||||
ax4.axhline(y=0, color='black', linestyle='-', alpha=0.5)
|
||||
ax4.legend()
|
||||
ax4.grid(True, alpha=0.3)
|
||||
|
||||
# Add statistics text
|
||||
stats_lines = []
|
||||
for band in ['upper', 'middle', 'lower']:
|
||||
orig = np.array(result[f'original_{band}'])
|
||||
new = np.array(result[f'new_{band}'])
|
||||
diff = new - orig
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
if len(valid_diff) > 0:
|
||||
stats_lines.append(f'{band.capitalize()}: Max={np.max(np.abs(valid_diff)):.2e}')
|
||||
|
||||
stats_text = '\n'.join(stats_lines)
|
||||
ax4.text(0.02, 0.98, stats_text, transform=ax4.transAxes,
|
||||
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
|
||||
|
||||
# Format x-axis
|
||||
for ax in axes:
|
||||
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
||||
ax.xaxis.set_major_locator(mdates.DayLocator(interval=max(1, len(dates)//10)))
|
||||
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45)
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
# Save plot
|
||||
plot_path = self.results_dir / f"{indicator_name}_detailed_comparison.png"
|
||||
plt.savefig(plot_path, dpi=300, bbox_inches='tight')
|
||||
print(f"Plot saved to {plot_path}")
|
||||
|
||||
plt.show()
|
||||
|
||||
def plot_all_comparisons(self):
|
||||
"""Plot comparisons for all tested indicators."""
|
||||
print("\n=== Generating Detailed Comparison Plots ===")
|
||||
|
||||
for indicator_name in self.results.keys():
|
||||
print(f"Plotting {indicator_name}...")
|
||||
self.plot_comparison(indicator_name)
|
||||
plt.close('all')
|
||||
|
||||
def generate_report(self):
|
||||
"""Generate detailed report for Bollinger Bands indicators."""
|
||||
print("\n=== Generating Bollinger Bands Report ===")
|
||||
|
||||
report_lines = []
|
||||
report_lines.append("# Bollinger Bands Indicators Comparison Report")
|
||||
report_lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
report_lines.append(f"Data file: {self.data_file}")
|
||||
report_lines.append(f"Sample size: {len(self.data)} data points")
|
||||
report_lines.append("")
|
||||
|
||||
# Summary table
|
||||
report_lines.append("## Summary Table")
|
||||
report_lines.append("| Indicator | Period | Std Dev | Upper Max Diff | Middle Max Diff | Lower Max Diff | Status |")
|
||||
report_lines.append("|-----------|--------|---------|----------------|-----------------|----------------|--------|")
|
||||
|
||||
for indicator_name, result in self.results.items():
|
||||
max_diffs = []
|
||||
for band in ['upper', 'middle', 'lower']:
|
||||
orig = np.array(result[f'original_{band}'])
|
||||
new = np.array(result[f'new_{band}'])
|
||||
diff = new - orig
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
max_diffs.append(max_diff)
|
||||
else:
|
||||
max_diffs.append(float('inf'))
|
||||
|
||||
overall_max = max(max_diffs) if max_diffs else float('inf')
|
||||
|
||||
if overall_max < 1e-10:
|
||||
status = "✅ PASSED"
|
||||
elif overall_max < 1e-6:
|
||||
status = "⚠️ WARNING"
|
||||
else:
|
||||
status = "❌ FAILED"
|
||||
|
||||
max_diff_strs = [f"{d:.2e}" if d != float('inf') else "N/A" for d in max_diffs]
|
||||
report_lines.append(f"| {indicator_name} | {result['period']} | {result['std_dev']} | "
|
||||
f"{max_diff_strs[0]} | {max_diff_strs[1]} | {max_diff_strs[2]} | {status} |")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Methodology explanation
|
||||
report_lines.append("## Methodology")
|
||||
report_lines.append("### Bollinger Bands (Close Price)")
|
||||
report_lines.append("- **Middle Band**: Simple Moving Average of Close prices")
|
||||
report_lines.append("- **Upper Band**: Middle Band + (Standard Deviation × Multiplier)")
|
||||
report_lines.append("- **Lower Band**: Middle Band - (Standard Deviation × Multiplier)")
|
||||
report_lines.append("- Uses Close price for all calculations")
|
||||
report_lines.append("")
|
||||
report_lines.append("### Bollinger Bands OHLC (Typical Price)")
|
||||
report_lines.append("- **Typical Price**: (High + Low + Close) / 3")
|
||||
report_lines.append("- **Middle Band**: Simple Moving Average of Typical prices")
|
||||
report_lines.append("- **Upper Band**: Middle Band + (Standard Deviation × Multiplier)")
|
||||
report_lines.append("- **Lower Band**: Middle Band - (Standard Deviation × Multiplier)")
|
||||
report_lines.append("- Uses Typical price for all calculations")
|
||||
report_lines.append("")
|
||||
|
||||
# Detailed analysis
|
||||
report_lines.append("## Detailed Analysis")
|
||||
|
||||
for indicator_name, result in self.results.items():
|
||||
report_lines.append(f"### {indicator_name}")
|
||||
|
||||
report_lines.append(f"- **Type**: {result['type']}")
|
||||
report_lines.append(f"- **Period**: {result['period']}")
|
||||
report_lines.append(f"- **Standard Deviation Multiplier**: {result['std_dev']}")
|
||||
|
||||
for band in ['upper', 'middle', 'lower']:
|
||||
orig = np.array(result[f'original_{band}'])
|
||||
new = np.array(result[f'new_{band}'])
|
||||
diff = new - orig
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
report_lines.append(f"- **{band.capitalize()} Band Analysis**:")
|
||||
report_lines.append(f" - Valid data points: {len(valid_diff)}")
|
||||
report_lines.append(f" - Max absolute difference: {np.max(np.abs(valid_diff)):.12f}")
|
||||
report_lines.append(f" - Mean absolute difference: {np.mean(np.abs(valid_diff)):.12f}")
|
||||
report_lines.append(f" - Standard deviation: {np.std(valid_diff):.12f}")
|
||||
|
||||
# Band-specific metrics
|
||||
valid_original = orig[~np.isnan(orig)]
|
||||
if len(valid_original) > 0:
|
||||
mean_value = np.mean(valid_original)
|
||||
relative_error = np.mean(np.abs(valid_diff)) / mean_value * 100
|
||||
report_lines.append(f" - Mean {band} value: {mean_value:.6f}")
|
||||
report_lines.append(f" - Relative error: {relative_error:.2e}%")
|
||||
|
||||
# Band width analysis
|
||||
orig_width = np.array(result['original_upper']) - np.array(result['original_lower'])
|
||||
new_width = np.array(result['new_upper']) - np.array(result['new_lower'])
|
||||
width_diff = new_width - orig_width
|
||||
valid_width_diff = width_diff[~np.isnan(width_diff)]
|
||||
|
||||
if len(valid_width_diff) > 0:
|
||||
report_lines.append(f"- **Band Width Analysis**:")
|
||||
report_lines.append(f" - Max width difference: {np.max(np.abs(valid_width_diff)):.12f}")
|
||||
report_lines.append(f" - Mean width difference: {np.mean(np.abs(valid_width_diff)):.12f}")
|
||||
|
||||
# Squeeze detection (when bands are narrow)
|
||||
valid_orig_width = orig_width[~np.isnan(orig_width)]
|
||||
if len(valid_orig_width) > 0:
|
||||
width_percentile_20 = np.percentile(valid_orig_width, 20)
|
||||
squeeze_periods = np.sum(valid_orig_width < width_percentile_20)
|
||||
report_lines.append(f" - Squeeze periods (width < 20th percentile): {squeeze_periods}")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Save report
|
||||
report_path = self.results_dir / "bollinger_bands_report.md"
|
||||
with open(report_path, 'w', encoding='utf-8') as f:
|
||||
f.write('\n'.join(report_lines))
|
||||
|
||||
print(f"Report saved to {report_path}")
|
||||
|
||||
def run_tests(self):
|
||||
"""Run all Bollinger Bands tests."""
|
||||
print("Starting Bollinger Bands Comparison Tests...")
|
||||
|
||||
# Load data
|
||||
self.load_data()
|
||||
|
||||
# Run tests
|
||||
self.test_bollinger_bands()
|
||||
self.test_bollinger_bands_ohlc()
|
||||
|
||||
# Generate outputs
|
||||
self.plot_all_comparisons()
|
||||
self.generate_report()
|
||||
|
||||
print("\n✅ Bollinger Bands tests completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tester = BollingerBandsComparisonTest(sample_size=3000)
|
||||
tester.run_tests()
|
||||
@ -1,610 +0,0 @@
|
||||
"""
|
||||
Comprehensive Indicator Comparison Test Suite
|
||||
|
||||
This module provides testing framework to compare original indicators from cycles module
|
||||
with new implementations in IncrementalTrader module to ensure mathematical equivalence.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.dates as mdates
|
||||
from datetime import datetime
|
||||
import sys
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
# Import original indicators
|
||||
from cycles.IncStrategies.indicators import (
|
||||
MovingAverageState as OriginalMA,
|
||||
ExponentialMovingAverageState as OriginalEMA,
|
||||
ATRState as OriginalATR,
|
||||
SimpleATRState as OriginalSimpleATR,
|
||||
SupertrendState as OriginalSupertrend,
|
||||
RSIState as OriginalRSI,
|
||||
SimpleRSIState as OriginalSimpleRSI,
|
||||
BollingerBandsState as OriginalBB,
|
||||
BollingerBandsOHLCState as OriginalBBOHLC
|
||||
)
|
||||
|
||||
# Import new indicators
|
||||
from IncrementalTrader.strategies.indicators import (
|
||||
MovingAverageState as NewMA,
|
||||
ExponentialMovingAverageState as NewEMA,
|
||||
ATRState as NewATR,
|
||||
SimpleATRState as NewSimpleATR,
|
||||
SupertrendState as NewSupertrend,
|
||||
RSIState as NewRSI,
|
||||
SimpleRSIState as NewSimpleRSI,
|
||||
BollingerBandsState as NewBB,
|
||||
BollingerBandsOHLCState as NewBBOHLC
|
||||
)
|
||||
|
||||
|
||||
class IndicatorComparisonTester:
|
||||
"""Test framework for comparing original and new indicator implementations."""
|
||||
|
||||
def __init__(self, data_file: str = "data/btcusd_1-min_data.csv", sample_size: int = 10000):
|
||||
"""
|
||||
Initialize the tester with data.
|
||||
|
||||
Args:
|
||||
data_file: Path to the CSV data file
|
||||
sample_size: Number of data points to use for testing (None for all data)
|
||||
"""
|
||||
self.data_file = data_file
|
||||
self.sample_size = sample_size
|
||||
self.data = None
|
||||
self.results = {}
|
||||
|
||||
# Create results directory
|
||||
self.results_dir = Path("test/results")
|
||||
self.results_dir.mkdir(exist_ok=True)
|
||||
|
||||
def load_data(self):
|
||||
"""Load and prepare the data for testing."""
|
||||
print(f"Loading data from {self.data_file}...")
|
||||
|
||||
# Load data
|
||||
df = pd.read_csv(self.data_file)
|
||||
|
||||
# Convert timestamp to datetime
|
||||
df['datetime'] = pd.to_datetime(df['Timestamp'], unit='s')
|
||||
|
||||
# Take sample if specified
|
||||
if self.sample_size and len(df) > self.sample_size:
|
||||
# Take the most recent data
|
||||
df = df.tail(self.sample_size).reset_index(drop=True)
|
||||
|
||||
self.data = df
|
||||
print(f"Loaded {len(df)} data points from {df['datetime'].iloc[0]} to {df['datetime'].iloc[-1]}")
|
||||
|
||||
def compare_moving_averages(self, periods=[20, 50]):
|
||||
"""Compare Moving Average implementations."""
|
||||
print("\n=== Testing Moving Averages ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing MA({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_ma = OriginalMA(period)
|
||||
new_ma = NewMA(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
|
||||
original_ma.update(price)
|
||||
new_ma.update(price)
|
||||
|
||||
original_values.append(original_ma.get_current_value() if original_ma.is_warmed_up() else np.nan)
|
||||
new_values.append(new_ma.get_current_value() if new_ma.is_warmed_up() else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'MA_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Std difference: {np.std(valid_diff):.10f}")
|
||||
|
||||
def compare_exponential_moving_averages(self, periods=[20, 50]):
|
||||
"""Compare Exponential Moving Average implementations."""
|
||||
print("\n=== Testing Exponential Moving Averages ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing EMA({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_ema = OriginalEMA(period)
|
||||
new_ema = NewEMA(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
|
||||
original_ema.update(price)
|
||||
new_ema.update(price)
|
||||
|
||||
original_values.append(original_ema.value if original_ema.is_ready else np.nan)
|
||||
new_values.append(new_ema.value if new_ema.is_ready else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'EMA_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Std difference: {np.std(valid_diff):.10f}")
|
||||
|
||||
def compare_atr(self, periods=[14]):
|
||||
"""Compare ATR implementations."""
|
||||
print("\n=== Testing ATR ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing ATR({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_atr = OriginalATR(period)
|
||||
new_atr = NewATR(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
high, low, close = row['High'], row['Low'], row['Close']
|
||||
|
||||
original_atr.update(high, low, close)
|
||||
new_atr.update(high, low, close)
|
||||
|
||||
original_values.append(original_atr.value if original_atr.is_ready else np.nan)
|
||||
new_values.append(new_atr.value if new_atr.is_ready else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'ATR_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Std difference: {np.std(valid_diff):.10f}")
|
||||
|
||||
def compare_simple_atr(self, periods=[14]):
|
||||
"""Compare Simple ATR implementations."""
|
||||
print("\n=== Testing Simple ATR ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing SimpleATR({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_atr = OriginalSimpleATR(period)
|
||||
new_atr = NewSimpleATR(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
high, low, close = row['High'], row['Low'], row['Close']
|
||||
|
||||
original_atr.update(high, low, close)
|
||||
new_atr.update(high, low, close)
|
||||
|
||||
original_values.append(original_atr.value if original_atr.is_ready else np.nan)
|
||||
new_values.append(new_atr.value if new_atr.is_ready else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'SimpleATR_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Std difference: {np.std(valid_diff):.10f}")
|
||||
|
||||
def compare_supertrend(self, periods=[10], multipliers=[3.0]):
|
||||
"""Compare Supertrend implementations."""
|
||||
print("\n=== Testing Supertrend ===")
|
||||
|
||||
for period in periods:
|
||||
for multiplier in multipliers:
|
||||
print(f"Testing Supertrend({period}, {multiplier})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_st = OriginalSupertrend(period, multiplier)
|
||||
new_st = NewSupertrend(period, multiplier)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
original_trends = []
|
||||
new_trends = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
high, low, close = row['High'], row['Low'], row['Close']
|
||||
|
||||
original_st.update(high, low, close)
|
||||
new_st.update(high, low, close)
|
||||
|
||||
original_values.append(original_st.value if original_st.is_ready else np.nan)
|
||||
new_values.append(new_st.value if new_st.is_ready else np.nan)
|
||||
original_trends.append(original_st.trend if original_st.is_ready else 0)
|
||||
new_trends.append(new_st.trend if new_st.is_ready else 0)
|
||||
|
||||
# Store results
|
||||
key = f'Supertrend_{period}_{multiplier}'
|
||||
self.results[key] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'original_trend': original_trends,
|
||||
'new_trend': new_trends,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
trend_diff = np.array(new_trends) - np.array(original_trends)
|
||||
trend_matches = np.sum(trend_diff == 0) / len(trend_diff) * 100
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Trend match: {trend_matches:.2f}%")
|
||||
|
||||
def compare_rsi(self, periods=[14]):
|
||||
"""Compare RSI implementations."""
|
||||
print("\n=== Testing RSI ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing RSI({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_rsi = OriginalRSI(period)
|
||||
new_rsi = NewRSI(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
|
||||
original_rsi.update(price)
|
||||
new_rsi.update(price)
|
||||
|
||||
original_values.append(original_rsi.value if original_rsi.is_ready else np.nan)
|
||||
new_values.append(new_rsi.value if new_rsi.is_ready else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'RSI_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Std difference: {np.std(valid_diff):.10f}")
|
||||
|
||||
def compare_simple_rsi(self, periods=[14]):
|
||||
"""Compare Simple RSI implementations."""
|
||||
print("\n=== Testing Simple RSI ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing SimpleRSI({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_rsi = OriginalSimpleRSI(period)
|
||||
new_rsi = NewSimpleRSI(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
|
||||
original_rsi.update(price)
|
||||
new_rsi.update(price)
|
||||
|
||||
original_values.append(original_rsi.value if original_rsi.is_ready else np.nan)
|
||||
new_values.append(new_rsi.value if new_rsi.is_ready else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'SimpleRSI_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Std difference: {np.std(valid_diff):.10f}")
|
||||
|
||||
def compare_bollinger_bands(self, periods=[20], std_devs=[2.0]):
|
||||
"""Compare Bollinger Bands implementations."""
|
||||
print("\n=== Testing Bollinger Bands ===")
|
||||
|
||||
for period in periods:
|
||||
for std_dev in std_devs:
|
||||
print(f"Testing BollingerBands({period}, {std_dev})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_bb = OriginalBB(period, std_dev)
|
||||
new_bb = NewBB(period, std_dev)
|
||||
|
||||
original_upper = []
|
||||
original_middle = []
|
||||
original_lower = []
|
||||
new_upper = []
|
||||
new_middle = []
|
||||
new_lower = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
|
||||
original_bb.update(price)
|
||||
new_bb.update(price)
|
||||
|
||||
if original_bb.is_ready:
|
||||
original_upper.append(original_bb.upper)
|
||||
original_middle.append(original_bb.middle)
|
||||
original_lower.append(original_bb.lower)
|
||||
else:
|
||||
original_upper.append(np.nan)
|
||||
original_middle.append(np.nan)
|
||||
original_lower.append(np.nan)
|
||||
|
||||
if new_bb.is_ready:
|
||||
new_upper.append(new_bb.upper)
|
||||
new_middle.append(new_bb.middle)
|
||||
new_lower.append(new_bb.lower)
|
||||
else:
|
||||
new_upper.append(np.nan)
|
||||
new_middle.append(np.nan)
|
||||
new_lower.append(np.nan)
|
||||
|
||||
# Store results
|
||||
key = f'BB_{period}_{std_dev}'
|
||||
self.results[key] = {
|
||||
'original_upper': original_upper,
|
||||
'original_middle': original_middle,
|
||||
'original_lower': original_lower,
|
||||
'new_upper': new_upper,
|
||||
'new_middle': new_middle,
|
||||
'new_lower': new_lower,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
for band in ['upper', 'middle', 'lower']:
|
||||
orig = np.array(locals()[f'original_{band}'])
|
||||
new = np.array(locals()[f'new_{band}'])
|
||||
diff = new - orig
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" {band.capitalize()} band - Max diff: {np.max(np.abs(valid_diff)):.10f}, "
|
||||
f"Mean diff: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
|
||||
def plot_comparison(self, indicator_name: str, save_plot: bool = True):
|
||||
"""Plot comparison between original and new indicator implementations."""
|
||||
if indicator_name not in self.results:
|
||||
print(f"No results found for {indicator_name}")
|
||||
return
|
||||
|
||||
result = self.results[indicator_name]
|
||||
dates = pd.to_datetime(result['dates'])
|
||||
|
||||
# Create figure
|
||||
fig, axes = plt.subplots(2, 1, figsize=(15, 10))
|
||||
fig.suptitle(f'{indicator_name} - Original vs New Implementation Comparison', fontsize=16)
|
||||
|
||||
# Plot 1: Overlay comparison
|
||||
ax1 = axes[0]
|
||||
|
||||
if 'original' in result and 'new' in result:
|
||||
# Standard indicator comparison
|
||||
ax1.plot(dates, result['original'], label='Original', alpha=0.7, linewidth=1)
|
||||
ax1.plot(dates, result['new'], label='New', alpha=0.7, linewidth=1, linestyle='--')
|
||||
ax1.set_title(f'{indicator_name} Values Comparison')
|
||||
ax1.legend()
|
||||
ax1.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 2: Difference
|
||||
ax2 = axes[1]
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
ax2.plot(dates, diff, color='red', alpha=0.7)
|
||||
ax2.set_title(f'{indicator_name} Difference (New - Original)')
|
||||
ax2.axhline(y=0, color='black', linestyle='-', alpha=0.5)
|
||||
ax2.grid(True, alpha=0.3)
|
||||
|
||||
elif 'original_upper' in result:
|
||||
# Bollinger Bands comparison
|
||||
ax1.plot(dates, result['original_upper'], label='Original Upper', alpha=0.7)
|
||||
ax1.plot(dates, result['original_middle'], label='Original Middle', alpha=0.7)
|
||||
ax1.plot(dates, result['original_lower'], label='Original Lower', alpha=0.7)
|
||||
ax1.plot(dates, result['new_upper'], label='New Upper', alpha=0.7, linestyle='--')
|
||||
ax1.plot(dates, result['new_middle'], label='New Middle', alpha=0.7, linestyle='--')
|
||||
ax1.plot(dates, result['new_lower'], label='New Lower', alpha=0.7, linestyle='--')
|
||||
ax1.set_title(f'{indicator_name} Bollinger Bands Comparison')
|
||||
ax1.legend()
|
||||
ax1.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 2: Differences for all bands
|
||||
ax2 = axes[1]
|
||||
for band in ['upper', 'middle', 'lower']:
|
||||
orig = np.array(result[f'original_{band}'])
|
||||
new = np.array(result[f'new_{band}'])
|
||||
diff = new - orig
|
||||
ax2.plot(dates, diff, label=f'{band.capitalize()} diff', alpha=0.7)
|
||||
ax2.set_title(f'{indicator_name} Differences (New - Original)')
|
||||
ax2.axhline(y=0, color='black', linestyle='-', alpha=0.5)
|
||||
ax2.legend()
|
||||
ax2.grid(True, alpha=0.3)
|
||||
|
||||
# Format x-axis
|
||||
for ax in axes:
|
||||
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
||||
ax.xaxis.set_major_locator(mdates.DayLocator(interval=max(1, len(dates)//10)))
|
||||
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45)
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
if save_plot:
|
||||
plot_path = self.results_dir / f"{indicator_name}_comparison.png"
|
||||
plt.savefig(plot_path, dpi=300, bbox_inches='tight')
|
||||
print(f"Plot saved to {plot_path}")
|
||||
|
||||
plt.show()
|
||||
|
||||
def plot_all_comparisons(self):
|
||||
"""Plot comparisons for all tested indicators."""
|
||||
print("\n=== Generating Comparison Plots ===")
|
||||
|
||||
for indicator_name in self.results.keys():
|
||||
print(f"Plotting {indicator_name}...")
|
||||
self.plot_comparison(indicator_name, save_plot=True)
|
||||
plt.close('all') # Close plots to save memory
|
||||
|
||||
def generate_summary_report(self):
|
||||
"""Generate a summary report of all comparisons."""
|
||||
print("\n=== Summary Report ===")
|
||||
|
||||
report_lines = []
|
||||
report_lines.append("# Indicator Comparison Summary Report")
|
||||
report_lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
report_lines.append(f"Data file: {self.data_file}")
|
||||
report_lines.append(f"Sample size: {len(self.data)} data points")
|
||||
report_lines.append("")
|
||||
|
||||
for indicator_name, result in self.results.items():
|
||||
report_lines.append(f"## {indicator_name}")
|
||||
|
||||
if 'original' in result and 'new' in result:
|
||||
# Standard indicator
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
report_lines.append(f"- Max absolute difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
report_lines.append(f"- Mean absolute difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
report_lines.append(f"- Standard deviation: {np.std(valid_diff):.10f}")
|
||||
report_lines.append(f"- Valid data points: {len(valid_diff)}")
|
||||
|
||||
# Check if differences are negligible
|
||||
if np.max(np.abs(valid_diff)) < 1e-10:
|
||||
report_lines.append("- ✅ **PASSED**: Implementations are mathematically equivalent")
|
||||
elif np.max(np.abs(valid_diff)) < 1e-6:
|
||||
report_lines.append("- ⚠️ **WARNING**: Small differences detected (likely floating point precision)")
|
||||
else:
|
||||
report_lines.append("- ❌ **FAILED**: Significant differences detected")
|
||||
else:
|
||||
report_lines.append("- ❌ **ERROR**: No valid data points for comparison")
|
||||
|
||||
elif 'original_upper' in result:
|
||||
# Bollinger Bands
|
||||
all_passed = True
|
||||
for band in ['upper', 'middle', 'lower']:
|
||||
orig = np.array(result[f'original_{band}'])
|
||||
new = np.array(result[f'new_{band}'])
|
||||
diff = new - orig
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
report_lines.append(f"- {band.capitalize()} band max diff: {max_diff:.10f}")
|
||||
if max_diff >= 1e-6:
|
||||
all_passed = False
|
||||
|
||||
if all_passed:
|
||||
report_lines.append("- ✅ **PASSED**: All bands are mathematically equivalent")
|
||||
else:
|
||||
report_lines.append("- ❌ **FAILED**: Significant differences in one or more bands")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Save report
|
||||
report_path = self.results_dir / "comparison_summary.md"
|
||||
with open(report_path, 'w') as f:
|
||||
f.write('\n'.join(report_lines))
|
||||
|
||||
print(f"Summary report saved to {report_path}")
|
||||
|
||||
# Print summary to console
|
||||
print('\n'.join(report_lines))
|
||||
|
||||
def run_all_tests(self):
|
||||
"""Run all indicator comparison tests."""
|
||||
print("Starting comprehensive indicator comparison tests...")
|
||||
|
||||
# Load data
|
||||
self.load_data()
|
||||
|
||||
# Run all comparisons
|
||||
self.compare_moving_averages()
|
||||
self.compare_exponential_moving_averages()
|
||||
self.compare_atr()
|
||||
self.compare_simple_atr()
|
||||
self.compare_supertrend()
|
||||
self.compare_rsi()
|
||||
self.compare_simple_rsi()
|
||||
self.compare_bollinger_bands()
|
||||
|
||||
# Generate plots and reports
|
||||
self.plot_all_comparisons()
|
||||
self.generate_summary_report()
|
||||
|
||||
print("\n✅ All tests completed! Check the test/results/ directory for detailed outputs.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run the comprehensive test suite
|
||||
tester = IndicatorComparisonTester(sample_size=5000) # Use 5000 data points for faster testing
|
||||
tester.run_all_tests()
|
||||
@ -1,549 +0,0 @@
|
||||
"""
|
||||
Comprehensive Indicator Comparison Test Suite (Fixed Interface)
|
||||
|
||||
This module provides testing framework to compare original indicators from cycles module
|
||||
with new implementations in IncrementalTrader module to ensure mathematical equivalence.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.dates as mdates
|
||||
from datetime import datetime
|
||||
import sys
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
# Import original indicators
|
||||
from cycles.IncStrategies.indicators import (
|
||||
MovingAverageState as OriginalMA,
|
||||
ExponentialMovingAverageState as OriginalEMA,
|
||||
ATRState as OriginalATR,
|
||||
SimpleATRState as OriginalSimpleATR,
|
||||
SupertrendState as OriginalSupertrend,
|
||||
RSIState as OriginalRSI,
|
||||
SimpleRSIState as OriginalSimpleRSI,
|
||||
BollingerBandsState as OriginalBB,
|
||||
BollingerBandsOHLCState as OriginalBBOHLC
|
||||
)
|
||||
|
||||
# Import new indicators
|
||||
from IncrementalTrader.strategies.indicators import (
|
||||
MovingAverageState as NewMA,
|
||||
ExponentialMovingAverageState as NewEMA,
|
||||
ATRState as NewATR,
|
||||
SimpleATRState as NewSimpleATR,
|
||||
SupertrendState as NewSupertrend,
|
||||
RSIState as NewRSI,
|
||||
SimpleRSIState as NewSimpleRSI,
|
||||
BollingerBandsState as NewBB,
|
||||
BollingerBandsOHLCState as NewBBOHLC
|
||||
)
|
||||
|
||||
|
||||
class IndicatorComparisonTester:
|
||||
"""Test framework for comparing original and new indicator implementations."""
|
||||
|
||||
def __init__(self, data_file: str = "data/btcusd_1-min_data.csv", sample_size: int = 5000):
|
||||
"""
|
||||
Initialize the tester with data.
|
||||
|
||||
Args:
|
||||
data_file: Path to the CSV data file
|
||||
sample_size: Number of data points to use for testing (None for all data)
|
||||
"""
|
||||
self.data_file = data_file
|
||||
self.sample_size = sample_size
|
||||
self.data = None
|
||||
self.results = {}
|
||||
|
||||
# Create results directory
|
||||
self.results_dir = Path("test/results")
|
||||
self.results_dir.mkdir(exist_ok=True)
|
||||
|
||||
def load_data(self):
|
||||
"""Load and prepare the data for testing."""
|
||||
print(f"Loading data from {self.data_file}...")
|
||||
|
||||
# Load data
|
||||
df = pd.read_csv(self.data_file)
|
||||
|
||||
# Convert timestamp to datetime
|
||||
df['datetime'] = pd.to_datetime(df['Timestamp'], unit='s')
|
||||
|
||||
# Take sample if specified
|
||||
if self.sample_size and len(df) > self.sample_size:
|
||||
# Take the most recent data
|
||||
df = df.tail(self.sample_size).reset_index(drop=True)
|
||||
|
||||
self.data = df
|
||||
print(f"Loaded {len(df)} data points from {df['datetime'].iloc[0]} to {df['datetime'].iloc[-1]}")
|
||||
|
||||
def compare_moving_averages(self, periods=[20, 50]):
|
||||
"""Compare Moving Average implementations."""
|
||||
print("\n=== Testing Moving Averages ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing MA({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_ma = OriginalMA(period)
|
||||
new_ma = NewMA(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
|
||||
original_ma.update(price)
|
||||
new_ma.update(price)
|
||||
|
||||
original_values.append(original_ma.get_current_value() if original_ma.is_warmed_up() else np.nan)
|
||||
new_values.append(new_ma.get_current_value() if new_ma.is_warmed_up() else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'MA_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Std difference: {np.std(valid_diff):.10f}")
|
||||
|
||||
def compare_exponential_moving_averages(self, periods=[20, 50]):
|
||||
"""Compare Exponential Moving Average implementations."""
|
||||
print("\n=== Testing Exponential Moving Averages ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing EMA({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_ema = OriginalEMA(period)
|
||||
new_ema = NewEMA(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
|
||||
original_ema.update(price)
|
||||
new_ema.update(price)
|
||||
|
||||
original_values.append(original_ema.get_current_value() if original_ema.is_warmed_up() else np.nan)
|
||||
new_values.append(new_ema.get_current_value() if new_ema.is_warmed_up() else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'EMA_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Std difference: {np.std(valid_diff):.10f}")
|
||||
|
||||
def compare_atr(self, periods=[14]):
|
||||
"""Compare ATR implementations."""
|
||||
print("\n=== Testing ATR ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing ATR({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_atr = OriginalATR(period)
|
||||
new_atr = NewATR(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
high, low, close = row['High'], row['Low'], row['Close']
|
||||
ohlc = {'open': close, 'high': high, 'low': low, 'close': close}
|
||||
|
||||
original_atr.update(ohlc)
|
||||
new_atr.update(ohlc)
|
||||
|
||||
original_values.append(original_atr.get_current_value() if original_atr.is_warmed_up() else np.nan)
|
||||
new_values.append(new_atr.get_current_value() if new_atr.is_warmed_up() else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'ATR_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Std difference: {np.std(valid_diff):.10f}")
|
||||
|
||||
def compare_simple_atr(self, periods=[14]):
|
||||
"""Compare Simple ATR implementations."""
|
||||
print("\n=== Testing Simple ATR ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing SimpleATR({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_atr = OriginalSimpleATR(period)
|
||||
new_atr = NewSimpleATR(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
high, low, close = row['High'], row['Low'], row['Close']
|
||||
ohlc = {'open': close, 'high': high, 'low': low, 'close': close}
|
||||
|
||||
original_atr.update(ohlc)
|
||||
new_atr.update(ohlc)
|
||||
|
||||
original_values.append(original_atr.get_current_value() if original_atr.is_warmed_up() else np.nan)
|
||||
new_values.append(new_atr.get_current_value() if new_atr.is_warmed_up() else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'SimpleATR_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Std difference: {np.std(valid_diff):.10f}")
|
||||
|
||||
def compare_supertrend(self, periods=[10], multipliers=[3.0]):
|
||||
"""Compare Supertrend implementations."""
|
||||
print("\n=== Testing Supertrend ===")
|
||||
|
||||
for period in periods:
|
||||
for multiplier in multipliers:
|
||||
print(f"Testing Supertrend({period}, {multiplier})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_st = OriginalSupertrend(period, multiplier)
|
||||
new_st = NewSupertrend(period, multiplier)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
original_trends = []
|
||||
new_trends = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
high, low, close = row['High'], row['Low'], row['Close']
|
||||
ohlc = {'open': close, 'high': high, 'low': low, 'close': close}
|
||||
|
||||
original_st.update(ohlc)
|
||||
new_st.update(ohlc)
|
||||
|
||||
# Get current values
|
||||
orig_result = original_st.get_current_value() if original_st.is_warmed_up() else None
|
||||
new_result = new_st.get_current_value() if new_st.is_warmed_up() else None
|
||||
|
||||
if orig_result:
|
||||
original_values.append(orig_result['supertrend'])
|
||||
original_trends.append(orig_result['trend'])
|
||||
else:
|
||||
original_values.append(np.nan)
|
||||
original_trends.append(0)
|
||||
|
||||
if new_result:
|
||||
new_values.append(new_result['supertrend'])
|
||||
new_trends.append(new_result['trend'])
|
||||
else:
|
||||
new_values.append(np.nan)
|
||||
new_trends.append(0)
|
||||
|
||||
# Store results
|
||||
key = f'Supertrend_{period}_{multiplier}'
|
||||
self.results[key] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'original_trend': original_trends,
|
||||
'new_trend': new_trends,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
trend_diff = np.array(new_trends) - np.array(original_trends)
|
||||
trend_matches = np.sum(trend_diff == 0) / len(trend_diff) * 100
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Trend match: {trend_matches:.2f}%")
|
||||
|
||||
def compare_rsi(self, periods=[14]):
|
||||
"""Compare RSI implementations."""
|
||||
print("\n=== Testing RSI ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing RSI({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_rsi = OriginalRSI(period)
|
||||
new_rsi = NewRSI(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
|
||||
original_rsi.update(price)
|
||||
new_rsi.update(price)
|
||||
|
||||
original_values.append(original_rsi.get_current_value() if original_rsi.is_warmed_up() else np.nan)
|
||||
new_values.append(new_rsi.get_current_value() if new_rsi.is_warmed_up() else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'RSI_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Std difference: {np.std(valid_diff):.10f}")
|
||||
|
||||
def compare_simple_rsi(self, periods=[14]):
|
||||
"""Compare Simple RSI implementations."""
|
||||
print("\n=== Testing Simple RSI ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing SimpleRSI({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_rsi = OriginalSimpleRSI(period)
|
||||
new_rsi = NewSimpleRSI(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
|
||||
original_rsi.update(price)
|
||||
new_rsi.update(price)
|
||||
|
||||
original_values.append(original_rsi.get_current_value() if original_rsi.is_warmed_up() else np.nan)
|
||||
new_values.append(new_rsi.get_current_value() if new_rsi.is_warmed_up() else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'SimpleRSI_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" Max difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
print(f" Mean difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
print(f" Std difference: {np.std(valid_diff):.10f}")
|
||||
|
||||
def compare_bollinger_bands(self, periods=[20], std_devs=[2.0]):
|
||||
"""Compare Bollinger Bands implementations."""
|
||||
print("\n=== Testing Bollinger Bands ===")
|
||||
|
||||
for period in periods:
|
||||
for std_dev in std_devs:
|
||||
print(f"Testing BollingerBands({period}, {std_dev})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_bb = OriginalBB(period, std_dev)
|
||||
new_bb = NewBB(period, std_dev)
|
||||
|
||||
original_upper = []
|
||||
original_middle = []
|
||||
original_lower = []
|
||||
new_upper = []
|
||||
new_middle = []
|
||||
new_lower = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
|
||||
original_bb.update(price)
|
||||
new_bb.update(price)
|
||||
|
||||
# Get current values
|
||||
orig_result = original_bb.get_current_value() if original_bb.is_warmed_up() else None
|
||||
new_result = new_bb.get_current_value() if new_bb.is_warmed_up() else None
|
||||
|
||||
if orig_result:
|
||||
original_upper.append(orig_result['upper_band'])
|
||||
original_middle.append(orig_result['middle_band'])
|
||||
original_lower.append(orig_result['lower_band'])
|
||||
else:
|
||||
original_upper.append(np.nan)
|
||||
original_middle.append(np.nan)
|
||||
original_lower.append(np.nan)
|
||||
|
||||
if new_result:
|
||||
new_upper.append(new_result['upper_band'])
|
||||
new_middle.append(new_result['middle_band'])
|
||||
new_lower.append(new_result['lower_band'])
|
||||
else:
|
||||
new_upper.append(np.nan)
|
||||
new_middle.append(np.nan)
|
||||
new_lower.append(np.nan)
|
||||
|
||||
# Store results
|
||||
key = f'BB_{period}_{std_dev}'
|
||||
self.results[key] = {
|
||||
'original_upper': original_upper,
|
||||
'original_middle': original_middle,
|
||||
'original_lower': original_lower,
|
||||
'new_upper': new_upper,
|
||||
'new_middle': new_middle,
|
||||
'new_lower': new_lower,
|
||||
'dates': self.data['datetime'].tolist()
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
for band in ['upper', 'middle', 'lower']:
|
||||
orig = np.array(locals()[f'original_{band}'])
|
||||
new = np.array(locals()[f'new_{band}'])
|
||||
diff = new - orig
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
print(f" {band.capitalize()} band - Max diff: {np.max(np.abs(valid_diff)):.10f}, "
|
||||
f"Mean diff: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
|
||||
def generate_summary_report(self):
|
||||
"""Generate a summary report of all comparisons."""
|
||||
print("\n=== Summary Report ===")
|
||||
|
||||
report_lines = []
|
||||
report_lines.append("# Indicator Comparison Summary Report")
|
||||
report_lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
report_lines.append(f"Data file: {self.data_file}")
|
||||
report_lines.append(f"Sample size: {len(self.data)} data points")
|
||||
report_lines.append("")
|
||||
|
||||
for indicator_name, result in self.results.items():
|
||||
report_lines.append(f"## {indicator_name}")
|
||||
|
||||
if 'original' in result and 'new' in result:
|
||||
# Standard indicator
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
report_lines.append(f"- Max absolute difference: {np.max(np.abs(valid_diff)):.10f}")
|
||||
report_lines.append(f"- Mean absolute difference: {np.mean(np.abs(valid_diff)):.10f}")
|
||||
report_lines.append(f"- Standard deviation: {np.std(valid_diff):.10f}")
|
||||
report_lines.append(f"- Valid data points: {len(valid_diff)}")
|
||||
|
||||
# Check if differences are negligible
|
||||
if np.max(np.abs(valid_diff)) < 1e-10:
|
||||
report_lines.append("- ✅ **PASSED**: Implementations are mathematically equivalent")
|
||||
elif np.max(np.abs(valid_diff)) < 1e-6:
|
||||
report_lines.append("- ⚠️ **WARNING**: Small differences detected (likely floating point precision)")
|
||||
else:
|
||||
report_lines.append("- ❌ **FAILED**: Significant differences detected")
|
||||
else:
|
||||
report_lines.append("- ❌ **ERROR**: No valid data points for comparison")
|
||||
|
||||
elif 'original_upper' in result:
|
||||
# Bollinger Bands
|
||||
all_passed = True
|
||||
for band in ['upper', 'middle', 'lower']:
|
||||
orig = np.array(result[f'original_{band}'])
|
||||
new = np.array(result[f'new_{band}'])
|
||||
diff = new - orig
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
report_lines.append(f"- {band.capitalize()} band max diff: {max_diff:.10f}")
|
||||
if max_diff >= 1e-6:
|
||||
all_passed = False
|
||||
|
||||
if all_passed:
|
||||
report_lines.append("- ✅ **PASSED**: All bands are mathematically equivalent")
|
||||
else:
|
||||
report_lines.append("- ❌ **FAILED**: Significant differences in one or more bands")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Save report
|
||||
report_path = self.results_dir / "comparison_summary.md"
|
||||
with open(report_path, 'w', encoding='utf-8') as f:
|
||||
f.write('\n'.join(report_lines))
|
||||
|
||||
print(f"Summary report saved to {report_path}")
|
||||
|
||||
# Print summary to console
|
||||
print('\n'.join(report_lines))
|
||||
|
||||
def run_all_tests(self):
|
||||
"""Run all indicator comparison tests."""
|
||||
print("Starting comprehensive indicator comparison tests...")
|
||||
|
||||
# Load data
|
||||
self.load_data()
|
||||
|
||||
# Run all comparisons
|
||||
self.compare_moving_averages()
|
||||
self.compare_exponential_moving_averages()
|
||||
self.compare_atr()
|
||||
self.compare_simple_atr()
|
||||
self.compare_supertrend()
|
||||
self.compare_rsi()
|
||||
self.compare_simple_rsi()
|
||||
self.compare_bollinger_bands()
|
||||
|
||||
# Generate reports
|
||||
self.generate_summary_report()
|
||||
|
||||
print("\n✅ All tests completed! Check the test/results/ directory for detailed outputs.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run the comprehensive test suite
|
||||
tester = IndicatorComparisonTester(sample_size=3000) # Use 3000 data points for faster testing
|
||||
tester.run_all_tests()
|
||||
@ -1,335 +0,0 @@
|
||||
"""
|
||||
Moving Average Indicators Comparison Test
|
||||
|
||||
Focused testing for Moving Average and Exponential Moving Average implementations.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.dates as mdates
|
||||
from datetime import datetime
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
# Import original indicators
|
||||
from cycles.IncStrategies.indicators import (
|
||||
MovingAverageState as OriginalMA,
|
||||
ExponentialMovingAverageState as OriginalEMA
|
||||
)
|
||||
|
||||
# Import new indicators
|
||||
from IncrementalTrader.strategies.indicators import (
|
||||
MovingAverageState as NewMA,
|
||||
ExponentialMovingAverageState as NewEMA
|
||||
)
|
||||
|
||||
|
||||
class MovingAverageComparisonTest:
|
||||
"""Test framework for comparing moving average implementations."""
|
||||
|
||||
def __init__(self, data_file: str = "data/btcusd_1-min_data.csv", sample_size: int = 5000):
|
||||
self.data_file = data_file
|
||||
self.sample_size = sample_size
|
||||
self.data = None
|
||||
self.results = {}
|
||||
|
||||
# Create results directory
|
||||
self.results_dir = Path("test/results/moving_averages")
|
||||
self.results_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def load_data(self):
|
||||
"""Load and prepare the data for testing."""
|
||||
print(f"Loading data from {self.data_file}...")
|
||||
|
||||
df = pd.read_csv(self.data_file)
|
||||
df['datetime'] = pd.to_datetime(df['Timestamp'], unit='s')
|
||||
|
||||
if self.sample_size and len(df) > self.sample_size:
|
||||
df = df.tail(self.sample_size).reset_index(drop=True)
|
||||
|
||||
self.data = df
|
||||
print(f"Loaded {len(df)} data points from {df['datetime'].iloc[0]} to {df['datetime'].iloc[-1]}")
|
||||
|
||||
def test_simple_moving_average(self, periods=[5, 10, 20, 50, 100]):
|
||||
"""Test Simple Moving Average implementations."""
|
||||
print("\n=== Testing Simple Moving Average ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing SMA({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_ma = OriginalMA(period)
|
||||
new_ma = NewMA(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
prices = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
prices.append(price)
|
||||
|
||||
original_ma.update(price)
|
||||
new_ma.update(price)
|
||||
|
||||
original_values.append(original_ma.get_current_value() if original_ma.is_warmed_up() else np.nan)
|
||||
new_values.append(new_ma.get_current_value() if new_ma.is_warmed_up() else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'SMA_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'prices': prices,
|
||||
'dates': self.data['datetime'].tolist(),
|
||||
'period': period
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
std_diff = np.std(valid_diff)
|
||||
|
||||
print(f" Max difference: {max_diff:.12f}")
|
||||
print(f" Mean difference: {mean_diff:.12f}")
|
||||
print(f" Std difference: {std_diff:.12f}")
|
||||
|
||||
# Status check
|
||||
if max_diff < 1e-10:
|
||||
print(f" ✅ PASSED: Mathematically equivalent")
|
||||
elif max_diff < 1e-6:
|
||||
print(f" ⚠️ WARNING: Small differences (floating point precision)")
|
||||
else:
|
||||
print(f" ❌ FAILED: Significant differences detected")
|
||||
else:
|
||||
print(f" ❌ ERROR: No valid data points")
|
||||
|
||||
def test_exponential_moving_average(self, periods=[5, 10, 20, 50, 100]):
|
||||
"""Test Exponential Moving Average implementations."""
|
||||
print("\n=== Testing Exponential Moving Average ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing EMA({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_ema = OriginalEMA(period)
|
||||
new_ema = NewEMA(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
prices = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
prices.append(price)
|
||||
|
||||
original_ema.update(price)
|
||||
new_ema.update(price)
|
||||
|
||||
original_values.append(original_ema.get_current_value() if original_ema.is_warmed_up() else np.nan)
|
||||
new_values.append(new_ema.get_current_value() if new_ema.is_warmed_up() else np.nan)
|
||||
|
||||
# Store results
|
||||
self.results[f'EMA_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'prices': prices,
|
||||
'dates': self.data['datetime'].tolist(),
|
||||
'period': period
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
std_diff = np.std(valid_diff)
|
||||
|
||||
print(f" Max difference: {max_diff:.12f}")
|
||||
print(f" Mean difference: {mean_diff:.12f}")
|
||||
print(f" Std difference: {std_diff:.12f}")
|
||||
|
||||
# Status check
|
||||
if max_diff < 1e-10:
|
||||
print(f" ✅ PASSED: Mathematically equivalent")
|
||||
elif max_diff < 1e-6:
|
||||
print(f" ⚠️ WARNING: Small differences (floating point precision)")
|
||||
else:
|
||||
print(f" ❌ FAILED: Significant differences detected")
|
||||
else:
|
||||
print(f" ❌ ERROR: No valid data points")
|
||||
|
||||
def plot_comparison(self, indicator_name: str):
|
||||
"""Plot detailed comparison for a specific indicator."""
|
||||
if indicator_name not in self.results:
|
||||
print(f"No results found for {indicator_name}")
|
||||
return
|
||||
|
||||
result = self.results[indicator_name]
|
||||
dates = pd.to_datetime(result['dates'])
|
||||
|
||||
# Create figure with subplots
|
||||
fig, axes = plt.subplots(3, 1, figsize=(15, 12))
|
||||
fig.suptitle(f'{indicator_name} - Detailed Comparison Analysis', fontsize=16)
|
||||
|
||||
# Plot 1: Price and indicators
|
||||
ax1 = axes[0]
|
||||
ax1.plot(dates, result['prices'], label='Price', alpha=0.6, color='gray')
|
||||
ax1.plot(dates, result['original'], label='Original', alpha=0.8, linewidth=2)
|
||||
ax1.plot(dates, result['new'], label='New', alpha=0.8, linewidth=2, linestyle='--')
|
||||
ax1.set_title(f'{indicator_name} vs Price')
|
||||
ax1.legend()
|
||||
ax1.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 2: Overlay comparison (zoomed)
|
||||
ax2 = axes[1]
|
||||
ax2.plot(dates, result['original'], label='Original', alpha=0.8, linewidth=2)
|
||||
ax2.plot(dates, result['new'], label='New', alpha=0.8, linewidth=2, linestyle='--')
|
||||
ax2.set_title(f'{indicator_name} Values Comparison (Detailed)')
|
||||
ax2.legend()
|
||||
ax2.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 3: Difference analysis
|
||||
ax3 = axes[2]
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
ax3.plot(dates, diff, color='red', alpha=0.7, linewidth=1)
|
||||
ax3.set_title(f'{indicator_name} Difference (New - Original)')
|
||||
ax3.axhline(y=0, color='black', linestyle='-', alpha=0.5)
|
||||
ax3.grid(True, alpha=0.3)
|
||||
|
||||
# Add statistics text
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
if len(valid_diff) > 0:
|
||||
stats_text = f'Max: {np.max(np.abs(valid_diff)):.2e}\n'
|
||||
stats_text += f'Mean: {np.mean(np.abs(valid_diff)):.2e}\n'
|
||||
stats_text += f'Std: {np.std(valid_diff):.2e}'
|
||||
ax3.text(0.02, 0.98, stats_text, transform=ax3.transAxes,
|
||||
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
|
||||
|
||||
# Format x-axis
|
||||
for ax in axes:
|
||||
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
||||
ax.xaxis.set_major_locator(mdates.DayLocator(interval=max(1, len(dates)//10)))
|
||||
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45)
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
# Save plot
|
||||
plot_path = self.results_dir / f"{indicator_name}_detailed_comparison.png"
|
||||
plt.savefig(plot_path, dpi=300, bbox_inches='tight')
|
||||
print(f"Plot saved to {plot_path}")
|
||||
|
||||
plt.show()
|
||||
|
||||
def plot_all_comparisons(self):
|
||||
"""Plot comparisons for all tested indicators."""
|
||||
print("\n=== Generating Detailed Comparison Plots ===")
|
||||
|
||||
for indicator_name in self.results.keys():
|
||||
print(f"Plotting {indicator_name}...")
|
||||
self.plot_comparison(indicator_name)
|
||||
plt.close('all')
|
||||
|
||||
def generate_report(self):
|
||||
"""Generate detailed report for moving averages."""
|
||||
print("\n=== Generating Moving Average Report ===")
|
||||
|
||||
report_lines = []
|
||||
report_lines.append("# Moving Average Indicators Comparison Report")
|
||||
report_lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
report_lines.append(f"Data file: {self.data_file}")
|
||||
report_lines.append(f"Sample size: {len(self.data)} data points")
|
||||
report_lines.append("")
|
||||
|
||||
# Summary table
|
||||
report_lines.append("## Summary Table")
|
||||
report_lines.append("| Indicator | Period | Max Diff | Mean Diff | Status |")
|
||||
report_lines.append("|-----------|--------|----------|-----------|--------|")
|
||||
|
||||
for indicator_name, result in self.results.items():
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
|
||||
if max_diff < 1e-10:
|
||||
status = "✅ PASSED"
|
||||
elif max_diff < 1e-6:
|
||||
status = "⚠️ WARNING"
|
||||
else:
|
||||
status = "❌ FAILED"
|
||||
|
||||
report_lines.append(f"| {indicator_name} | {result['period']} | {max_diff:.2e} | {mean_diff:.2e} | {status} |")
|
||||
else:
|
||||
report_lines.append(f"| {indicator_name} | {result['period']} | N/A | N/A | ❌ ERROR |")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Detailed analysis
|
||||
report_lines.append("## Detailed Analysis")
|
||||
|
||||
for indicator_name, result in self.results.items():
|
||||
report_lines.append(f"### {indicator_name}")
|
||||
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
report_lines.append(f"- **Period**: {result['period']}")
|
||||
report_lines.append(f"- **Valid data points**: {len(valid_diff)}")
|
||||
report_lines.append(f"- **Max absolute difference**: {np.max(np.abs(valid_diff)):.12f}")
|
||||
report_lines.append(f"- **Mean absolute difference**: {np.mean(np.abs(valid_diff)):.12f}")
|
||||
report_lines.append(f"- **Standard deviation**: {np.std(valid_diff):.12f}")
|
||||
report_lines.append(f"- **Min difference**: {np.min(valid_diff):.12f}")
|
||||
report_lines.append(f"- **Max difference**: {np.max(valid_diff):.12f}")
|
||||
|
||||
# Percentile analysis
|
||||
percentiles = [1, 5, 25, 50, 75, 95, 99]
|
||||
perc_values = np.percentile(np.abs(valid_diff), percentiles)
|
||||
perc_str = ", ".join([f"P{p}: {v:.2e}" for p, v in zip(percentiles, perc_values)])
|
||||
report_lines.append(f"- **Percentiles**: {perc_str}")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Save report
|
||||
report_path = self.results_dir / "moving_averages_report.md"
|
||||
with open(report_path, 'w', encoding='utf-8') as f:
|
||||
f.write('\n'.join(report_lines))
|
||||
|
||||
print(f"Report saved to {report_path}")
|
||||
|
||||
def run_tests(self):
|
||||
"""Run all moving average tests."""
|
||||
print("Starting Moving Average Comparison Tests...")
|
||||
|
||||
# Load data
|
||||
self.load_data()
|
||||
|
||||
# Run tests
|
||||
self.test_simple_moving_average()
|
||||
self.test_exponential_moving_average()
|
||||
|
||||
# Generate outputs
|
||||
self.plot_all_comparisons()
|
||||
self.generate_report()
|
||||
|
||||
print("\n✅ Moving Average tests completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tester = MovingAverageComparisonTest(sample_size=3000)
|
||||
tester.run_tests()
|
||||
@ -1,401 +0,0 @@
|
||||
"""
|
||||
RSI Indicators Comparison Test
|
||||
|
||||
Focused testing for RSI and Simple RSI implementations.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.dates as mdates
|
||||
from datetime import datetime
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
# Import original indicators
|
||||
from cycles.IncStrategies.indicators import (
|
||||
RSIState as OriginalRSI,
|
||||
SimpleRSIState as OriginalSimpleRSI
|
||||
)
|
||||
|
||||
# Import new indicators
|
||||
from IncrementalTrader.strategies.indicators import (
|
||||
RSIState as NewRSI,
|
||||
SimpleRSIState as NewSimpleRSI
|
||||
)
|
||||
|
||||
|
||||
class RSIComparisonTest:
|
||||
"""Test framework for comparing RSI implementations."""
|
||||
|
||||
def __init__(self, data_file: str = "data/btcusd_1-min_data.csv", sample_size: int = 5000):
|
||||
self.data_file = data_file
|
||||
self.sample_size = sample_size
|
||||
self.data = None
|
||||
self.results = {}
|
||||
|
||||
# Create results directory
|
||||
self.results_dir = Path("test/results/rsi_indicators")
|
||||
self.results_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def load_data(self):
|
||||
"""Load and prepare the data for testing."""
|
||||
print(f"Loading data from {self.data_file}...")
|
||||
|
||||
df = pd.read_csv(self.data_file)
|
||||
df['datetime'] = pd.to_datetime(df['Timestamp'], unit='s')
|
||||
|
||||
if self.sample_size and len(df) > self.sample_size:
|
||||
df = df.tail(self.sample_size).reset_index(drop=True)
|
||||
|
||||
self.data = df
|
||||
print(f"Loaded {len(df)} data points from {df['datetime'].iloc[0]} to {df['datetime'].iloc[-1]}")
|
||||
|
||||
def test_rsi(self, periods=[7, 14, 21, 28]):
|
||||
"""Test RSI implementations (Wilder's smoothing)."""
|
||||
print("\n=== Testing RSI (Wilder's Smoothing) ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing RSI({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_rsi = OriginalRSI(period)
|
||||
new_rsi = NewRSI(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
prices = []
|
||||
price_changes = []
|
||||
|
||||
# Process data
|
||||
prev_price = None
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
prices.append(price)
|
||||
|
||||
if prev_price is not None:
|
||||
price_changes.append(price - prev_price)
|
||||
else:
|
||||
price_changes.append(0)
|
||||
|
||||
original_rsi.update(price)
|
||||
new_rsi.update(price)
|
||||
|
||||
original_values.append(original_rsi.get_current_value() if original_rsi.is_warmed_up() else np.nan)
|
||||
new_values.append(new_rsi.get_current_value() if new_rsi.is_warmed_up() else np.nan)
|
||||
|
||||
prev_price = price
|
||||
|
||||
# Store results
|
||||
self.results[f'RSI_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'prices': prices,
|
||||
'price_changes': price_changes,
|
||||
'dates': self.data['datetime'].tolist(),
|
||||
'period': period
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
std_diff = np.std(valid_diff)
|
||||
|
||||
print(f" Max difference: {max_diff:.12f}")
|
||||
print(f" Mean difference: {mean_diff:.12f}")
|
||||
print(f" Std difference: {std_diff:.12f}")
|
||||
|
||||
# Status check
|
||||
if max_diff < 1e-10:
|
||||
print(f" ✅ PASSED: Mathematically equivalent")
|
||||
elif max_diff < 1e-6:
|
||||
print(f" ⚠️ WARNING: Small differences (floating point precision)")
|
||||
else:
|
||||
print(f" ❌ FAILED: Significant differences detected")
|
||||
else:
|
||||
print(f" ❌ ERROR: No valid data points")
|
||||
|
||||
def test_simple_rsi(self, periods=[7, 14, 21, 28]):
|
||||
"""Test Simple RSI implementations (Simple moving average)."""
|
||||
print("\n=== Testing Simple RSI (Simple Moving Average) ===")
|
||||
|
||||
for period in periods:
|
||||
print(f"Testing SimpleRSI({period})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_rsi = OriginalSimpleRSI(period)
|
||||
new_rsi = NewSimpleRSI(period)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
prices = []
|
||||
price_changes = []
|
||||
|
||||
# Process data
|
||||
prev_price = None
|
||||
for _, row in self.data.iterrows():
|
||||
price = row['Close']
|
||||
prices.append(price)
|
||||
|
||||
if prev_price is not None:
|
||||
price_changes.append(price - prev_price)
|
||||
else:
|
||||
price_changes.append(0)
|
||||
|
||||
original_rsi.update(price)
|
||||
new_rsi.update(price)
|
||||
|
||||
original_values.append(original_rsi.get_current_value() if original_rsi.is_warmed_up() else np.nan)
|
||||
new_values.append(new_rsi.get_current_value() if new_rsi.is_warmed_up() else np.nan)
|
||||
|
||||
prev_price = price
|
||||
|
||||
# Store results
|
||||
self.results[f'SimpleRSI_{period}'] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'prices': prices,
|
||||
'price_changes': price_changes,
|
||||
'dates': self.data['datetime'].tolist(),
|
||||
'period': period
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
std_diff = np.std(valid_diff)
|
||||
|
||||
print(f" Max difference: {max_diff:.12f}")
|
||||
print(f" Mean difference: {mean_diff:.12f}")
|
||||
print(f" Std difference: {std_diff:.12f}")
|
||||
|
||||
# Status check
|
||||
if max_diff < 1e-10:
|
||||
print(f" ✅ PASSED: Mathematically equivalent")
|
||||
elif max_diff < 1e-6:
|
||||
print(f" ⚠️ WARNING: Small differences (floating point precision)")
|
||||
else:
|
||||
print(f" ❌ FAILED: Significant differences detected")
|
||||
else:
|
||||
print(f" ❌ ERROR: No valid data points")
|
||||
|
||||
def plot_comparison(self, indicator_name: str):
|
||||
"""Plot detailed comparison for a specific indicator."""
|
||||
if indicator_name not in self.results:
|
||||
print(f"No results found for {indicator_name}")
|
||||
return
|
||||
|
||||
result = self.results[indicator_name]
|
||||
dates = pd.to_datetime(result['dates'])
|
||||
|
||||
# Create figure with subplots
|
||||
fig, axes = plt.subplots(4, 1, figsize=(15, 16))
|
||||
fig.suptitle(f'{indicator_name} - Detailed Comparison Analysis', fontsize=16)
|
||||
|
||||
# Plot 1: Price data
|
||||
ax1 = axes[0]
|
||||
ax1.plot(dates, result['prices'], label='Close Price', alpha=0.8, color='black', linewidth=1)
|
||||
ax1.set_title('Price Data')
|
||||
ax1.legend()
|
||||
ax1.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 2: RSI comparison with levels
|
||||
ax2 = axes[1]
|
||||
ax2.plot(dates, result['original'], label='Original', alpha=0.8, linewidth=2, color='blue')
|
||||
ax2.plot(dates, result['new'], label='New', alpha=0.8, linewidth=2, linestyle='--', color='red')
|
||||
ax2.axhline(y=70, color='red', linestyle=':', alpha=0.7, label='Overbought (70)')
|
||||
ax2.axhline(y=30, color='green', linestyle=':', alpha=0.7, label='Oversold (30)')
|
||||
ax2.axhline(y=50, color='gray', linestyle='-', alpha=0.5, label='Midline (50)')
|
||||
ax2.set_title(f'{indicator_name} Values Comparison')
|
||||
ax2.set_ylim(0, 100)
|
||||
ax2.legend()
|
||||
ax2.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 3: Price changes
|
||||
ax3 = axes[2]
|
||||
positive_changes = [max(0, change) for change in result['price_changes']]
|
||||
negative_changes = [abs(min(0, change)) for change in result['price_changes']]
|
||||
ax3.plot(dates, positive_changes, label='Positive Changes', alpha=0.7, color='green')
|
||||
ax3.plot(dates, negative_changes, label='Negative Changes', alpha=0.7, color='red')
|
||||
ax3.set_title('Price Changes (Gains and Losses)')
|
||||
ax3.legend()
|
||||
ax3.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 4: Difference analysis
|
||||
ax4 = axes[3]
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
ax4.plot(dates, diff, color='red', alpha=0.7, linewidth=1)
|
||||
ax4.set_title(f'{indicator_name} Difference (New - Original)')
|
||||
ax4.axhline(y=0, color='black', linestyle='-', alpha=0.5)
|
||||
ax4.grid(True, alpha=0.3)
|
||||
|
||||
# Add statistics text
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
if len(valid_diff) > 0:
|
||||
stats_text = f'Max: {np.max(np.abs(valid_diff)):.2e}\n'
|
||||
stats_text += f'Mean: {np.mean(np.abs(valid_diff)):.2e}\n'
|
||||
stats_text += f'Std: {np.std(valid_diff):.2e}'
|
||||
ax4.text(0.02, 0.98, stats_text, transform=ax4.transAxes,
|
||||
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
|
||||
|
||||
# Format x-axis
|
||||
for ax in axes:
|
||||
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
||||
ax.xaxis.set_major_locator(mdates.DayLocator(interval=max(1, len(dates)//10)))
|
||||
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45)
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
# Save plot
|
||||
plot_path = self.results_dir / f"{indicator_name}_detailed_comparison.png"
|
||||
plt.savefig(plot_path, dpi=300, bbox_inches='tight')
|
||||
print(f"Plot saved to {plot_path}")
|
||||
|
||||
plt.show()
|
||||
|
||||
def plot_all_comparisons(self):
|
||||
"""Plot comparisons for all tested indicators."""
|
||||
print("\n=== Generating Detailed Comparison Plots ===")
|
||||
|
||||
for indicator_name in self.results.keys():
|
||||
print(f"Plotting {indicator_name}...")
|
||||
self.plot_comparison(indicator_name)
|
||||
plt.close('all')
|
||||
|
||||
def generate_report(self):
|
||||
"""Generate detailed report for RSI indicators."""
|
||||
print("\n=== Generating RSI Report ===")
|
||||
|
||||
report_lines = []
|
||||
report_lines.append("# RSI Indicators Comparison Report")
|
||||
report_lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
report_lines.append(f"Data file: {self.data_file}")
|
||||
report_lines.append(f"Sample size: {len(self.data)} data points")
|
||||
report_lines.append("")
|
||||
|
||||
# Summary table
|
||||
report_lines.append("## Summary Table")
|
||||
report_lines.append("| Indicator | Period | Max Diff | Mean Diff | Status |")
|
||||
report_lines.append("|-----------|--------|----------|-----------|--------|")
|
||||
|
||||
for indicator_name, result in self.results.items():
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
|
||||
if max_diff < 1e-10:
|
||||
status = "✅ PASSED"
|
||||
elif max_diff < 1e-6:
|
||||
status = "⚠️ WARNING"
|
||||
else:
|
||||
status = "❌ FAILED"
|
||||
|
||||
report_lines.append(f"| {indicator_name} | {result['period']} | {max_diff:.2e} | {mean_diff:.2e} | {status} |")
|
||||
else:
|
||||
report_lines.append(f"| {indicator_name} | {result['period']} | N/A | N/A | ❌ ERROR |")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Methodology explanation
|
||||
report_lines.append("## Methodology")
|
||||
report_lines.append("### RSI (Relative Strength Index)")
|
||||
report_lines.append("- Uses Wilder's smoothing for average gains and losses")
|
||||
report_lines.append("- Average Gain = (Previous Average Gain × (n-1) + Current Gain) / n")
|
||||
report_lines.append("- Average Loss = (Previous Average Loss × (n-1) + Current Loss) / n")
|
||||
report_lines.append("- RS = Average Gain / Average Loss")
|
||||
report_lines.append("- RSI = 100 - (100 / (1 + RS))")
|
||||
report_lines.append("")
|
||||
report_lines.append("### Simple RSI")
|
||||
report_lines.append("- Uses simple moving average for average gains and losses")
|
||||
report_lines.append("- More responsive to recent price changes than Wilder's method")
|
||||
report_lines.append("")
|
||||
|
||||
# Detailed analysis
|
||||
report_lines.append("## Detailed Analysis")
|
||||
|
||||
for indicator_name, result in self.results.items():
|
||||
report_lines.append(f"### {indicator_name}")
|
||||
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
report_lines.append(f"- **Period**: {result['period']}")
|
||||
report_lines.append(f"- **Valid data points**: {len(valid_diff)}")
|
||||
report_lines.append(f"- **Max absolute difference**: {np.max(np.abs(valid_diff)):.12f}")
|
||||
report_lines.append(f"- **Mean absolute difference**: {np.mean(np.abs(valid_diff)):.12f}")
|
||||
report_lines.append(f"- **Standard deviation**: {np.std(valid_diff):.12f}")
|
||||
|
||||
# RSI-specific metrics
|
||||
valid_original = np.array(result['original'])[~np.isnan(result['original'])]
|
||||
if len(valid_original) > 0:
|
||||
mean_rsi = np.mean(valid_original)
|
||||
overbought_count = np.sum(valid_original > 70)
|
||||
oversold_count = np.sum(valid_original < 30)
|
||||
|
||||
report_lines.append(f"- **Mean RSI value**: {mean_rsi:.2f}")
|
||||
report_lines.append(f"- **Overbought periods (>70)**: {overbought_count} ({overbought_count/len(valid_original)*100:.1f}%)")
|
||||
report_lines.append(f"- **Oversold periods (<30)**: {oversold_count} ({oversold_count/len(valid_original)*100:.1f}%)")
|
||||
|
||||
# Price change analysis
|
||||
positive_changes = [max(0, change) for change in result['price_changes']]
|
||||
negative_changes = [abs(min(0, change)) for change in result['price_changes']]
|
||||
avg_gain = np.mean([change for change in positive_changes if change > 0]) if any(change > 0 for change in positive_changes) else 0
|
||||
avg_loss = np.mean([change for change in negative_changes if change > 0]) if any(change > 0 for change in negative_changes) else 0
|
||||
|
||||
report_lines.append(f"- **Average gain**: {avg_gain:.6f}")
|
||||
report_lines.append(f"- **Average loss**: {avg_loss:.6f}")
|
||||
if avg_loss > 0:
|
||||
report_lines.append(f"- **Gain/Loss ratio**: {avg_gain/avg_loss:.3f}")
|
||||
|
||||
# Percentile analysis
|
||||
percentiles = [1, 5, 25, 50, 75, 95, 99]
|
||||
perc_values = np.percentile(np.abs(valid_diff), percentiles)
|
||||
perc_str = ", ".join([f"P{p}: {v:.2e}" for p, v in zip(percentiles, perc_values)])
|
||||
report_lines.append(f"- **Percentiles**: {perc_str}")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Save report
|
||||
report_path = self.results_dir / "rsi_indicators_report.md"
|
||||
with open(report_path, 'w', encoding='utf-8') as f:
|
||||
f.write('\n'.join(report_lines))
|
||||
|
||||
print(f"Report saved to {report_path}")
|
||||
|
||||
def run_tests(self):
|
||||
"""Run all RSI tests."""
|
||||
print("Starting RSI Comparison Tests...")
|
||||
|
||||
# Load data
|
||||
self.load_data()
|
||||
|
||||
# Run tests
|
||||
self.test_rsi()
|
||||
self.test_simple_rsi()
|
||||
|
||||
# Generate outputs
|
||||
self.plot_all_comparisons()
|
||||
self.generate_report()
|
||||
|
||||
print("\n✅ RSI tests completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tester = RSIComparisonTest(sample_size=3000)
|
||||
tester.run_tests()
|
||||
@ -1,374 +0,0 @@
|
||||
"""
|
||||
Supertrend Indicators Comparison Test
|
||||
|
||||
Focused testing for Supertrend implementations.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.dates as mdates
|
||||
from datetime import datetime
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
# Import original indicators
|
||||
from cycles.IncStrategies.indicators import (
|
||||
SupertrendState as OriginalSupertrend
|
||||
)
|
||||
|
||||
# Import new indicators
|
||||
from IncrementalTrader.strategies.indicators import (
|
||||
SupertrendState as NewSupertrend
|
||||
)
|
||||
|
||||
|
||||
class SupertrendComparisonTest:
|
||||
"""Test framework for comparing Supertrend implementations."""
|
||||
|
||||
def __init__(self, data_file: str = "data/btcusd_1-min_data.csv", sample_size: int = 5000):
|
||||
self.data_file = data_file
|
||||
self.sample_size = sample_size
|
||||
self.data = None
|
||||
self.results = {}
|
||||
|
||||
# Create results directory
|
||||
self.results_dir = Path("test/results/supertrend_indicators")
|
||||
self.results_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def load_data(self):
|
||||
"""Load and prepare the data for testing."""
|
||||
print(f"Loading data from {self.data_file}...")
|
||||
|
||||
df = pd.read_csv(self.data_file)
|
||||
df['datetime'] = pd.to_datetime(df['Timestamp'], unit='s')
|
||||
|
||||
if self.sample_size and len(df) > self.sample_size:
|
||||
df = df.tail(self.sample_size).reset_index(drop=True)
|
||||
|
||||
self.data = df
|
||||
print(f"Loaded {len(df)} data points from {df['datetime'].iloc[0]} to {df['datetime'].iloc[-1]}")
|
||||
|
||||
def test_supertrend(self, periods=[7, 10, 14, 21], multipliers=[2.0, 3.0, 4.0]):
|
||||
"""Test Supertrend implementations."""
|
||||
print("\n=== Testing Supertrend ===")
|
||||
|
||||
for period in periods:
|
||||
for multiplier in multipliers:
|
||||
print(f"Testing Supertrend({period}, {multiplier})...")
|
||||
|
||||
# Initialize indicators
|
||||
original_st = OriginalSupertrend(period, multiplier)
|
||||
new_st = NewSupertrend(period, multiplier)
|
||||
|
||||
original_values = []
|
||||
new_values = []
|
||||
original_trends = []
|
||||
new_trends = []
|
||||
original_signals = []
|
||||
new_signals = []
|
||||
|
||||
# Process data
|
||||
for _, row in self.data.iterrows():
|
||||
high, low, close = row['High'], row['Low'], row['Close']
|
||||
|
||||
# Create OHLC dictionary for both indicators
|
||||
ohlc_data = {
|
||||
'open': row['Open'],
|
||||
'high': high,
|
||||
'low': low,
|
||||
'close': close
|
||||
}
|
||||
|
||||
original_st.update(ohlc_data)
|
||||
new_st.update(ohlc_data)
|
||||
|
||||
original_values.append(original_st.get_current_value()['supertrend'] if original_st.is_warmed_up() else np.nan)
|
||||
new_values.append(new_st.get_current_value()['supertrend'] if new_st.is_warmed_up() else np.nan)
|
||||
original_trends.append(original_st.get_current_value()['trend'] if original_st.is_warmed_up() else 0)
|
||||
new_trends.append(new_st.get_current_value()['trend'] if new_st.is_warmed_up() else 0)
|
||||
|
||||
# Check for trend changes (signals)
|
||||
if len(original_trends) > 1:
|
||||
original_signals.append(1 if original_trends[-1] != original_trends[-2] else 0)
|
||||
new_signals.append(1 if new_trends[-1] != new_trends[-2] else 0)
|
||||
else:
|
||||
original_signals.append(0)
|
||||
new_signals.append(0)
|
||||
|
||||
# Store results
|
||||
key = f'Supertrend_{period}_{multiplier}'
|
||||
self.results[key] = {
|
||||
'original': original_values,
|
||||
'new': new_values,
|
||||
'original_trend': original_trends,
|
||||
'new_trend': new_trends,
|
||||
'original_signals': original_signals,
|
||||
'new_signals': new_signals,
|
||||
'highs': self.data['High'].tolist(),
|
||||
'lows': self.data['Low'].tolist(),
|
||||
'closes': self.data['Close'].tolist(),
|
||||
'dates': self.data['datetime'].tolist(),
|
||||
'period': period,
|
||||
'multiplier': multiplier
|
||||
}
|
||||
|
||||
# Calculate differences
|
||||
diff = np.array(new_values) - np.array(original_values)
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
# Trend comparison
|
||||
trend_diff = np.array(new_trends) - np.array(original_trends)
|
||||
trend_matches = np.sum(trend_diff == 0) / len(trend_diff) * 100
|
||||
|
||||
# Signal comparison
|
||||
signal_diff = np.array(new_signals) - np.array(original_signals)
|
||||
signal_matches = np.sum(signal_diff == 0) / len(signal_diff) * 100
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
std_diff = np.std(valid_diff)
|
||||
|
||||
print(f" Max difference: {max_diff:.12f}")
|
||||
print(f" Mean difference: {mean_diff:.12f}")
|
||||
print(f" Std difference: {std_diff:.12f}")
|
||||
print(f" Trend match: {trend_matches:.2f}%")
|
||||
print(f" Signal match: {signal_matches:.2f}%")
|
||||
|
||||
# Status check
|
||||
if max_diff < 1e-10 and trend_matches == 100:
|
||||
print(f" ✅ PASSED: Mathematically equivalent")
|
||||
elif max_diff < 1e-6 and trend_matches >= 99:
|
||||
print(f" ⚠️ WARNING: Small differences (floating point precision)")
|
||||
else:
|
||||
print(f" ❌ FAILED: Significant differences detected")
|
||||
else:
|
||||
print(f" ❌ ERROR: No valid data points")
|
||||
|
||||
def plot_comparison(self, indicator_name: str):
|
||||
"""Plot detailed comparison for a specific indicator."""
|
||||
if indicator_name not in self.results:
|
||||
print(f"No results found for {indicator_name}")
|
||||
return
|
||||
|
||||
result = self.results[indicator_name]
|
||||
dates = pd.to_datetime(result['dates'])
|
||||
|
||||
# Create figure with subplots
|
||||
fig, axes = plt.subplots(5, 1, figsize=(15, 20))
|
||||
fig.suptitle(f'{indicator_name} - Detailed Comparison Analysis', fontsize=16)
|
||||
|
||||
# Plot 1: Price and Supertrend
|
||||
ax1 = axes[0]
|
||||
ax1.plot(dates, result['closes'], label='Close Price', alpha=0.7, color='black', linewidth=1)
|
||||
ax1.plot(dates, result['original'], label='Original Supertrend', alpha=0.8, linewidth=2, color='blue')
|
||||
ax1.plot(dates, result['new'], label='New Supertrend', alpha=0.8, linewidth=2, linestyle='--', color='red')
|
||||
ax1.set_title(f'{indicator_name} vs Price')
|
||||
ax1.legend()
|
||||
ax1.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 2: Trend comparison
|
||||
ax2 = axes[1]
|
||||
ax2.plot(dates, result['original_trend'], label='Original Trend', alpha=0.8, linewidth=2, color='blue')
|
||||
ax2.plot(dates, result['new_trend'], label='New Trend', alpha=0.8, linewidth=2, linestyle='--', color='red')
|
||||
ax2.set_title(f'{indicator_name} Trend Direction (1=Up, -1=Down)')
|
||||
ax2.legend()
|
||||
ax2.grid(True, alpha=0.3)
|
||||
ax2.set_ylim(-1.5, 1.5)
|
||||
|
||||
# Plot 3: Supertrend values comparison
|
||||
ax3 = axes[2]
|
||||
ax3.plot(dates, result['original'], label='Original', alpha=0.8, linewidth=2)
|
||||
ax3.plot(dates, result['new'], label='New', alpha=0.8, linewidth=2, linestyle='--')
|
||||
ax3.set_title(f'{indicator_name} Values Comparison')
|
||||
ax3.legend()
|
||||
ax3.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 4: Difference analysis
|
||||
ax4 = axes[3]
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
ax4.plot(dates, diff, color='red', alpha=0.7, linewidth=1)
|
||||
ax4.set_title(f'{indicator_name} Difference (New - Original)')
|
||||
ax4.axhline(y=0, color='black', linestyle='-', alpha=0.5)
|
||||
ax4.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 5: Signal comparison
|
||||
ax5 = axes[4]
|
||||
signal_dates = dates[1:] # Signals start from second data point
|
||||
ax5.scatter(signal_dates, np.array(result['original_signals'][1:]),
|
||||
label='Original Signals', alpha=0.7, color='blue', s=30)
|
||||
ax5.scatter(signal_dates, np.array(result['new_signals'][1:]) + 0.1,
|
||||
label='New Signals', alpha=0.7, color='red', s=30, marker='^')
|
||||
ax5.set_title(f'{indicator_name} Trend Change Signals')
|
||||
ax5.legend()
|
||||
ax5.grid(True, alpha=0.3)
|
||||
ax5.set_ylim(-0.2, 1.3)
|
||||
|
||||
# Add statistics text
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
if len(valid_diff) > 0:
|
||||
trend_diff = np.array(result['new_trend']) - np.array(result['original_trend'])
|
||||
trend_matches = np.sum(trend_diff == 0) / len(trend_diff) * 100
|
||||
|
||||
stats_text = f'Max: {np.max(np.abs(valid_diff)):.2e}\n'
|
||||
stats_text += f'Mean: {np.mean(np.abs(valid_diff)):.2e}\n'
|
||||
stats_text += f'Trend Match: {trend_matches:.1f}%'
|
||||
ax4.text(0.02, 0.98, stats_text, transform=ax4.transAxes,
|
||||
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
|
||||
|
||||
# Format x-axis
|
||||
for ax in axes:
|
||||
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
||||
ax.xaxis.set_major_locator(mdates.DayLocator(interval=max(1, len(dates)//10)))
|
||||
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45)
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
# Save plot
|
||||
plot_path = self.results_dir / f"{indicator_name}_detailed_comparison.png"
|
||||
plt.savefig(plot_path, dpi=300, bbox_inches='tight')
|
||||
print(f"Plot saved to {plot_path}")
|
||||
|
||||
plt.show()
|
||||
|
||||
def plot_all_comparisons(self):
|
||||
"""Plot comparisons for all tested indicators."""
|
||||
print("\n=== Generating Detailed Comparison Plots ===")
|
||||
|
||||
for indicator_name in self.results.keys():
|
||||
print(f"Plotting {indicator_name}...")
|
||||
self.plot_comparison(indicator_name)
|
||||
plt.close('all')
|
||||
|
||||
def generate_report(self):
|
||||
"""Generate detailed report for Supertrend indicators."""
|
||||
print("\n=== Generating Supertrend Report ===")
|
||||
|
||||
report_lines = []
|
||||
report_lines.append("# Supertrend Indicators Comparison Report")
|
||||
report_lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
report_lines.append(f"Data file: {self.data_file}")
|
||||
report_lines.append(f"Sample size: {len(self.data)} data points")
|
||||
report_lines.append("")
|
||||
|
||||
# Summary table
|
||||
report_lines.append("## Summary Table")
|
||||
report_lines.append("| Indicator | Period | Multiplier | Max Diff | Mean Diff | Trend Match | Status |")
|
||||
report_lines.append("|-----------|--------|------------|----------|-----------|-------------|--------|")
|
||||
|
||||
for indicator_name, result in self.results.items():
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
trend_diff = np.array(result['new_trend']) - np.array(result['original_trend'])
|
||||
trend_matches = np.sum(trend_diff == 0) / len(trend_diff) * 100
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
max_diff = np.max(np.abs(valid_diff))
|
||||
mean_diff = np.mean(np.abs(valid_diff))
|
||||
|
||||
if max_diff < 1e-10 and trend_matches == 100:
|
||||
status = "✅ PASSED"
|
||||
elif max_diff < 1e-6 and trend_matches >= 99:
|
||||
status = "⚠️ WARNING"
|
||||
else:
|
||||
status = "❌ FAILED"
|
||||
|
||||
report_lines.append(f"| {indicator_name} | {result['period']} | {result['multiplier']} | "
|
||||
f"{max_diff:.2e} | {mean_diff:.2e} | {trend_matches:.1f}% | {status} |")
|
||||
else:
|
||||
report_lines.append(f"| {indicator_name} | {result['period']} | {result['multiplier']} | "
|
||||
f"N/A | N/A | N/A | ❌ ERROR |")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Methodology explanation
|
||||
report_lines.append("## Methodology")
|
||||
report_lines.append("### Supertrend Calculation")
|
||||
report_lines.append("1. **Basic Upper Band**: (High + Low) / 2 + (Multiplier × ATR)")
|
||||
report_lines.append("2. **Basic Lower Band**: (High + Low) / 2 - (Multiplier × ATR)")
|
||||
report_lines.append("3. **Final Upper Band**: min(Basic Upper Band, Previous Final Upper Band if Close[1] <= Previous Final Upper Band)")
|
||||
report_lines.append("4. **Final Lower Band**: max(Basic Lower Band, Previous Final Lower Band if Close[1] >= Previous Final Lower Band)")
|
||||
report_lines.append("5. **Supertrend**: Final Lower Band if trend is up, Final Upper Band if trend is down")
|
||||
report_lines.append("6. **Trend**: Up if Close > Previous Supertrend, Down if Close <= Previous Supertrend")
|
||||
report_lines.append("")
|
||||
|
||||
# Detailed analysis
|
||||
report_lines.append("## Detailed Analysis")
|
||||
|
||||
for indicator_name, result in self.results.items():
|
||||
report_lines.append(f"### {indicator_name}")
|
||||
|
||||
diff = np.array(result['new']) - np.array(result['original'])
|
||||
valid_diff = diff[~np.isnan(diff)]
|
||||
|
||||
trend_diff = np.array(result['new_trend']) - np.array(result['original_trend'])
|
||||
trend_matches = np.sum(trend_diff == 0) / len(trend_diff) * 100
|
||||
|
||||
signal_diff = np.array(result['new_signals']) - np.array(result['original_signals'])
|
||||
signal_matches = np.sum(signal_diff == 0) / len(signal_diff) * 100
|
||||
|
||||
if len(valid_diff) > 0:
|
||||
report_lines.append(f"- **Period**: {result['period']}")
|
||||
report_lines.append(f"- **Multiplier**: {result['multiplier']}")
|
||||
report_lines.append(f"- **Valid data points**: {len(valid_diff)}")
|
||||
report_lines.append(f"- **Max absolute difference**: {np.max(np.abs(valid_diff)):.12f}")
|
||||
report_lines.append(f"- **Mean absolute difference**: {np.mean(np.abs(valid_diff)):.12f}")
|
||||
report_lines.append(f"- **Standard deviation**: {np.std(valid_diff):.12f}")
|
||||
report_lines.append(f"- **Trend direction match**: {trend_matches:.2f}%")
|
||||
report_lines.append(f"- **Signal timing match**: {signal_matches:.2f}%")
|
||||
|
||||
# Supertrend-specific metrics
|
||||
valid_original = np.array(result['original'])[~np.isnan(result['original'])]
|
||||
if len(valid_original) > 0:
|
||||
mean_st = np.mean(valid_original)
|
||||
relative_error = np.mean(np.abs(valid_diff)) / mean_st * 100
|
||||
report_lines.append(f"- **Mean Supertrend value**: {mean_st:.6f}")
|
||||
report_lines.append(f"- **Relative error**: {relative_error:.2e}%")
|
||||
|
||||
# Count trend changes
|
||||
original_changes = np.sum(np.array(result['original_signals']))
|
||||
new_changes = np.sum(np.array(result['new_signals']))
|
||||
report_lines.append(f"- **Original trend changes**: {original_changes}")
|
||||
report_lines.append(f"- **New trend changes**: {new_changes}")
|
||||
|
||||
# Percentile analysis
|
||||
percentiles = [1, 5, 25, 50, 75, 95, 99]
|
||||
perc_values = np.percentile(np.abs(valid_diff), percentiles)
|
||||
perc_str = ", ".join([f"P{p}: {v:.2e}" for p, v in zip(percentiles, perc_values)])
|
||||
report_lines.append(f"- **Percentiles**: {perc_str}")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Save report
|
||||
report_path = self.results_dir / "supertrend_indicators_report.md"
|
||||
with open(report_path, 'w', encoding='utf-8') as f:
|
||||
f.write('\n'.join(report_lines))
|
||||
|
||||
print(f"Report saved to {report_path}")
|
||||
|
||||
def run_tests(self):
|
||||
"""Run all Supertrend tests."""
|
||||
print("Starting Supertrend Comparison Tests...")
|
||||
|
||||
# Load data
|
||||
self.load_data()
|
||||
|
||||
# Run tests
|
||||
self.test_supertrend()
|
||||
|
||||
# Generate outputs
|
||||
self.plot_all_comparisons()
|
||||
self.generate_report()
|
||||
|
||||
print("\n✅ Supertrend tests completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tester = SupertrendComparisonTest(sample_size=3000)
|
||||
tester.run_tests()
|
||||
@ -1,531 +0,0 @@
|
||||
"""
|
||||
Strategy Comparison Test Framework
|
||||
|
||||
Comprehensive testing for comparing original incremental strategies from cycles/IncStrategies
|
||||
with new implementations in IncrementalTrader/strategies.
|
||||
|
||||
This test framework validates:
|
||||
1. MetaTrend Strategy: IncMetaTrendStrategy vs MetaTrendStrategy
|
||||
2. Random Strategy: IncRandomStrategy vs RandomStrategy
|
||||
3. BBRS Strategy: BBRSIncrementalState vs BBRSStrategy
|
||||
|
||||
Each test validates signal generation, mathematical equivalence, and behavioral consistency.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.dates as mdates
|
||||
from datetime import datetime
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Any
|
||||
import os
|
||||
|
||||
# Add project paths
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
sys.path.append(str(Path(__file__).parent.parent / "cycles"))
|
||||
sys.path.append(str(Path(__file__).parent.parent / "IncrementalTrader"))
|
||||
|
||||
# Import original strategies
|
||||
from cycles.IncStrategies.metatrend_strategy import IncMetaTrendStrategy
|
||||
from cycles.IncStrategies.random_strategy import IncRandomStrategy
|
||||
from cycles.IncStrategies.bbrs_incremental import BBRSIncrementalState
|
||||
|
||||
# Import new strategies
|
||||
from IncrementalTrader.strategies.metatrend import MetaTrendStrategy
|
||||
from IncrementalTrader.strategies.random import RandomStrategy
|
||||
from IncrementalTrader.strategies.bbrs import BBRSStrategy
|
||||
|
||||
class StrategyComparisonTester:
|
||||
def __init__(self, data_file: str = "data/btcusd_1-min_data.csv"):
|
||||
"""Initialize the strategy comparison tester."""
|
||||
self.data_file = data_file
|
||||
self.data = None
|
||||
self.results_dir = Path("test/results/strategies")
|
||||
self.results_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def load_data(self, limit: int = 1000) -> bool:
|
||||
"""Load and prepare test data."""
|
||||
try:
|
||||
print(f"Loading data from {self.data_file}...")
|
||||
self.data = pd.read_csv(self.data_file)
|
||||
|
||||
# Limit data for testing
|
||||
if limit:
|
||||
self.data = self.data.head(limit)
|
||||
|
||||
print(f"Loaded {len(self.data)} data points")
|
||||
print(f"Data columns: {list(self.data.columns)}")
|
||||
print(f"Data sample:\n{self.data.head()}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error loading data: {e}")
|
||||
return False
|
||||
|
||||
def compare_metatrend_strategies(self) -> Dict[str, Any]:
|
||||
"""Compare IncMetaTrendStrategy vs MetaTrendStrategy."""
|
||||
print("\n" + "="*80)
|
||||
print("COMPARING METATREND STRATEGIES")
|
||||
print("="*80)
|
||||
|
||||
try:
|
||||
# Initialize strategies with same parameters
|
||||
original_strategy = IncMetaTrendStrategy()
|
||||
new_strategy = MetaTrendStrategy()
|
||||
|
||||
# Track signals
|
||||
original_entry_signals = []
|
||||
new_entry_signals = []
|
||||
original_exit_signals = []
|
||||
new_exit_signals = []
|
||||
combined_original_signals = []
|
||||
combined_new_signals = []
|
||||
timestamps = []
|
||||
|
||||
# Process data
|
||||
for i, row in self.data.iterrows():
|
||||
timestamp = pd.Timestamp(row['Timestamp'], unit='s')
|
||||
ohlcv_data = {
|
||||
'open': row['Open'],
|
||||
'high': row['High'],
|
||||
'low': row['Low'],
|
||||
'close': row['Close'],
|
||||
'volume': row['Volume']
|
||||
}
|
||||
|
||||
# Update original strategy (uses update_minute_data)
|
||||
original_strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
|
||||
# Update new strategy (uses process_data_point)
|
||||
new_strategy.process_data_point(timestamp, ohlcv_data)
|
||||
|
||||
# Get signals
|
||||
orig_entry = original_strategy.get_entry_signal()
|
||||
new_entry = new_strategy.get_entry_signal()
|
||||
orig_exit = original_strategy.get_exit_signal()
|
||||
new_exit = new_strategy.get_exit_signal()
|
||||
|
||||
# Store signals (both use signal_type)
|
||||
original_entry_signals.append(orig_entry.signal_type if orig_entry else "HOLD")
|
||||
new_entry_signals.append(new_entry.signal_type if new_entry else "HOLD")
|
||||
original_exit_signals.append(orig_exit.signal_type if orig_exit else "HOLD")
|
||||
new_exit_signals.append(new_exit.signal_type if new_exit else "HOLD")
|
||||
|
||||
# Combined signal logic (simplified)
|
||||
orig_signal = "BUY" if orig_entry and orig_entry.signal_type == "ENTRY" else ("SELL" if orig_exit and orig_exit.signal_type == "EXIT" else "HOLD")
|
||||
new_signal = "BUY" if new_entry and new_entry.signal_type == "ENTRY" else ("SELL" if new_exit and new_exit.signal_type == "EXIT" else "HOLD")
|
||||
|
||||
combined_original_signals.append(orig_signal)
|
||||
combined_new_signals.append(new_signal)
|
||||
timestamps.append(timestamp)
|
||||
|
||||
# Calculate consistency metrics
|
||||
entry_matches = sum(1 for o, n in zip(original_entry_signals, new_entry_signals) if o == n)
|
||||
exit_matches = sum(1 for o, n in zip(original_exit_signals, new_exit_signals) if o == n)
|
||||
combined_matches = sum(1 for o, n in zip(combined_original_signals, combined_new_signals) if o == n)
|
||||
|
||||
total_points = len(self.data)
|
||||
entry_consistency = (entry_matches / total_points) * 100
|
||||
exit_consistency = (exit_matches / total_points) * 100
|
||||
combined_consistency = (combined_matches / total_points) * 100
|
||||
|
||||
results = {
|
||||
'strategy_name': 'MetaTrend',
|
||||
'total_points': total_points,
|
||||
'entry_consistency': entry_consistency,
|
||||
'exit_consistency': exit_consistency,
|
||||
'combined_consistency': combined_consistency,
|
||||
'original_entry_signals': original_entry_signals,
|
||||
'new_entry_signals': new_entry_signals,
|
||||
'original_exit_signals': original_exit_signals,
|
||||
'new_exit_signals': new_exit_signals,
|
||||
'combined_original_signals': combined_original_signals,
|
||||
'combined_new_signals': combined_new_signals,
|
||||
'timestamps': timestamps
|
||||
}
|
||||
|
||||
print(f"Entry Signal Consistency: {entry_consistency:.2f}%")
|
||||
print(f"Exit Signal Consistency: {exit_consistency:.2f}%")
|
||||
print(f"Combined Signal Consistency: {combined_consistency:.2f}%")
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error comparing MetaTrend strategies: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return {}
|
||||
|
||||
def compare_random_strategies(self) -> Dict[str, Any]:
|
||||
"""Compare IncRandomStrategy vs RandomStrategy."""
|
||||
print("\n" + "="*80)
|
||||
print("COMPARING RANDOM STRATEGIES")
|
||||
print("="*80)
|
||||
|
||||
try:
|
||||
# Initialize strategies with same seed for reproducibility
|
||||
# Original: IncRandomStrategy(weight, params)
|
||||
# New: RandomStrategy(name, weight, params)
|
||||
original_strategy = IncRandomStrategy(weight=1.0, params={"random_seed": 42})
|
||||
new_strategy = RandomStrategy(name="random", weight=1.0, params={"random_seed": 42})
|
||||
|
||||
# Track signals
|
||||
original_signals = []
|
||||
new_signals = []
|
||||
timestamps = []
|
||||
|
||||
# Process data
|
||||
for i, row in self.data.iterrows():
|
||||
timestamp = pd.Timestamp(row['Timestamp'], unit='s')
|
||||
ohlcv_data = {
|
||||
'open': row['Open'],
|
||||
'high': row['High'],
|
||||
'low': row['Low'],
|
||||
'close': row['Close'],
|
||||
'volume': row['Volume']
|
||||
}
|
||||
|
||||
# Update strategies
|
||||
original_strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
new_strategy.process_data_point(timestamp, ohlcv_data)
|
||||
|
||||
# Get signals
|
||||
orig_signal = original_strategy.get_entry_signal() # Random strategy uses get_entry_signal
|
||||
new_signal = new_strategy.get_entry_signal()
|
||||
|
||||
# Store signals
|
||||
original_signals.append(orig_signal.signal_type if orig_signal else "HOLD")
|
||||
new_signals.append(new_signal.signal_type if new_signal else "HOLD")
|
||||
timestamps.append(timestamp)
|
||||
|
||||
# Calculate consistency metrics
|
||||
matches = sum(1 for o, n in zip(original_signals, new_signals) if o == n)
|
||||
total_points = len(self.data)
|
||||
consistency = (matches / total_points) * 100
|
||||
|
||||
results = {
|
||||
'strategy_name': 'Random',
|
||||
'total_points': total_points,
|
||||
'consistency': consistency,
|
||||
'original_signals': original_signals,
|
||||
'new_signals': new_signals,
|
||||
'timestamps': timestamps
|
||||
}
|
||||
|
||||
print(f"Signal Consistency: {consistency:.2f}%")
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error comparing Random strategies: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return {}
|
||||
|
||||
def compare_bbrs_strategies(self) -> Dict[str, Any]:
|
||||
"""Compare BBRSIncrementalState vs BBRSStrategy."""
|
||||
print("\n" + "="*80)
|
||||
print("COMPARING BBRS STRATEGIES")
|
||||
print("="*80)
|
||||
|
||||
try:
|
||||
# Initialize strategies with same configuration
|
||||
# Original: BBRSIncrementalState(config)
|
||||
# New: BBRSStrategy(name, weight, params)
|
||||
original_config = {
|
||||
"timeframe_minutes": 60,
|
||||
"bb_period": 20,
|
||||
"rsi_period": 14,
|
||||
"bb_width": 0.05,
|
||||
"trending": {
|
||||
"bb_std_dev_multiplier": 2.5,
|
||||
"rsi_threshold": [30, 70]
|
||||
},
|
||||
"sideways": {
|
||||
"bb_std_dev_multiplier": 1.8,
|
||||
"rsi_threshold": [40, 60]
|
||||
},
|
||||
"SqueezeStrategy": True
|
||||
}
|
||||
|
||||
new_params = {
|
||||
"timeframe": "1h",
|
||||
"bb_period": 20,
|
||||
"rsi_period": 14,
|
||||
"bb_width_threshold": 0.05,
|
||||
"trending_bb_multiplier": 2.5,
|
||||
"sideways_bb_multiplier": 1.8,
|
||||
"trending_rsi_thresholds": [30, 70],
|
||||
"sideways_rsi_thresholds": [40, 60],
|
||||
"squeeze_strategy": True,
|
||||
"enable_logging": False
|
||||
}
|
||||
|
||||
original_strategy = BBRSIncrementalState(original_config)
|
||||
new_strategy = BBRSStrategy(name="bbrs", weight=1.0, params=new_params)
|
||||
|
||||
# Track signals
|
||||
original_signals = []
|
||||
new_signals = []
|
||||
timestamps = []
|
||||
|
||||
# Process data
|
||||
for i, row in self.data.iterrows():
|
||||
timestamp = pd.Timestamp(row['Timestamp'], unit='s')
|
||||
ohlcv_data = {
|
||||
'open': row['Open'],
|
||||
'high': row['High'],
|
||||
'low': row['Low'],
|
||||
'close': row['Close'],
|
||||
'volume': row['Volume']
|
||||
}
|
||||
|
||||
# Update strategies
|
||||
orig_result = original_strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
new_strategy.process_data_point(timestamp, ohlcv_data)
|
||||
|
||||
# Get signals from original (returns dict with buy_signal/sell_signal)
|
||||
if orig_result and orig_result.get('buy_signal', False):
|
||||
orig_signal = "BUY"
|
||||
elif orig_result and orig_result.get('sell_signal', False):
|
||||
orig_signal = "SELL"
|
||||
else:
|
||||
orig_signal = "HOLD"
|
||||
|
||||
# Get signals from new strategy
|
||||
new_entry = new_strategy.get_entry_signal()
|
||||
new_exit = new_strategy.get_exit_signal()
|
||||
|
||||
if new_entry and new_entry.signal_type == "ENTRY":
|
||||
new_signal = "BUY"
|
||||
elif new_exit and new_exit.signal_type == "EXIT":
|
||||
new_signal = "SELL"
|
||||
else:
|
||||
new_signal = "HOLD"
|
||||
|
||||
# Store signals
|
||||
original_signals.append(orig_signal)
|
||||
new_signals.append(new_signal)
|
||||
timestamps.append(timestamp)
|
||||
|
||||
# Calculate consistency metrics
|
||||
matches = sum(1 for o, n in zip(original_signals, new_signals) if o == n)
|
||||
total_points = len(self.data)
|
||||
consistency = (matches / total_points) * 100
|
||||
|
||||
results = {
|
||||
'strategy_name': 'BBRS',
|
||||
'total_points': total_points,
|
||||
'consistency': consistency,
|
||||
'original_signals': original_signals,
|
||||
'new_signals': new_signals,
|
||||
'timestamps': timestamps
|
||||
}
|
||||
|
||||
print(f"Signal Consistency: {consistency:.2f}%")
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error comparing BBRS strategies: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return {}
|
||||
|
||||
def generate_report(self, results: List[Dict[str, Any]]) -> None:
|
||||
"""Generate comprehensive comparison report."""
|
||||
print("\n" + "="*80)
|
||||
print("GENERATING STRATEGY COMPARISON REPORT")
|
||||
print("="*80)
|
||||
|
||||
# Create summary report
|
||||
report_file = self.results_dir / "strategy_comparison_report.txt"
|
||||
|
||||
with open(report_file, 'w', encoding='utf-8') as f:
|
||||
f.write("Strategy Comparison Report\n")
|
||||
f.write("=" * 50 + "\n\n")
|
||||
f.write(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
|
||||
f.write(f"Data points tested: {results[0]['total_points'] if results else 'N/A'}\n\n")
|
||||
|
||||
for result in results:
|
||||
if not result:
|
||||
continue
|
||||
|
||||
f.write(f"Strategy: {result['strategy_name']}\n")
|
||||
f.write("-" * 30 + "\n")
|
||||
|
||||
if result['strategy_name'] == 'MetaTrend':
|
||||
f.write(f"Entry Signal Consistency: {result['entry_consistency']:.2f}%\n")
|
||||
f.write(f"Exit Signal Consistency: {result['exit_consistency']:.2f}%\n")
|
||||
f.write(f"Combined Signal Consistency: {result['combined_consistency']:.2f}%\n")
|
||||
|
||||
# Status determination
|
||||
if result['combined_consistency'] >= 95:
|
||||
status = "✅ EXCELLENT"
|
||||
elif result['combined_consistency'] >= 90:
|
||||
status = "✅ GOOD"
|
||||
elif result['combined_consistency'] >= 80:
|
||||
status = "⚠️ ACCEPTABLE"
|
||||
else:
|
||||
status = "❌ NEEDS REVIEW"
|
||||
|
||||
else:
|
||||
f.write(f"Signal Consistency: {result['consistency']:.2f}%\n")
|
||||
|
||||
# Status determination
|
||||
if result['consistency'] >= 95:
|
||||
status = "✅ EXCELLENT"
|
||||
elif result['consistency'] >= 90:
|
||||
status = "✅ GOOD"
|
||||
elif result['consistency'] >= 80:
|
||||
status = "⚠️ ACCEPTABLE"
|
||||
else:
|
||||
status = "❌ NEEDS REVIEW"
|
||||
|
||||
f.write(f"Status: {status}\n\n")
|
||||
|
||||
print(f"Report saved to: {report_file}")
|
||||
|
||||
# Generate plots for each strategy
|
||||
for result in results:
|
||||
if not result:
|
||||
continue
|
||||
self.plot_strategy_comparison(result)
|
||||
|
||||
def plot_strategy_comparison(self, result: Dict[str, Any]) -> None:
|
||||
"""Generate comparison plots for a strategy."""
|
||||
strategy_name = result['strategy_name']
|
||||
|
||||
fig, axes = plt.subplots(2, 1, figsize=(15, 10))
|
||||
fig.suptitle(f'{strategy_name} Strategy Comparison', fontsize=16, fontweight='bold')
|
||||
|
||||
timestamps = result['timestamps']
|
||||
|
||||
if strategy_name == 'MetaTrend':
|
||||
# Plot entry signals
|
||||
axes[0].plot(timestamps, [1 if s == "ENTRY" else 0 for s in result['original_entry_signals']],
|
||||
label='Original Entry', alpha=0.7, linewidth=2)
|
||||
axes[0].plot(timestamps, [1 if s == "ENTRY" else 0 for s in result['new_entry_signals']],
|
||||
label='New Entry', alpha=0.7, linewidth=2, linestyle='--')
|
||||
axes[0].set_title(f'Entry Signals - Consistency: {result["entry_consistency"]:.2f}%')
|
||||
axes[0].set_ylabel('Entry Signal')
|
||||
axes[0].legend()
|
||||
axes[0].grid(True, alpha=0.3)
|
||||
|
||||
# Plot combined signals
|
||||
signal_map = {"BUY": 1, "SELL": -1, "HOLD": 0}
|
||||
orig_combined = [signal_map[s] for s in result['combined_original_signals']]
|
||||
new_combined = [signal_map[s] for s in result['combined_new_signals']]
|
||||
|
||||
axes[1].plot(timestamps, orig_combined, label='Original Combined', alpha=0.7, linewidth=2)
|
||||
axes[1].plot(timestamps, new_combined, label='New Combined', alpha=0.7, linewidth=2, linestyle='--')
|
||||
axes[1].set_title(f'Combined Signals - Consistency: {result["combined_consistency"]:.2f}%')
|
||||
axes[1].set_ylabel('Signal (-1=SELL, 0=HOLD, 1=BUY)')
|
||||
|
||||
else:
|
||||
# For Random and BBRS strategies
|
||||
signal_map = {"BUY": 1, "SELL": -1, "HOLD": 0}
|
||||
orig_signals = [signal_map.get(s, 0) for s in result['original_signals']]
|
||||
new_signals = [signal_map.get(s, 0) for s in result['new_signals']]
|
||||
|
||||
axes[0].plot(timestamps, orig_signals, label='Original', alpha=0.7, linewidth=2)
|
||||
axes[0].plot(timestamps, new_signals, label='New', alpha=0.7, linewidth=2, linestyle='--')
|
||||
axes[0].set_title(f'Signals - Consistency: {result["consistency"]:.2f}%')
|
||||
axes[0].set_ylabel('Signal (-1=SELL, 0=HOLD, 1=BUY)')
|
||||
|
||||
# Plot difference
|
||||
diff = [o - n for o, n in zip(orig_signals, new_signals)]
|
||||
axes[1].plot(timestamps, diff, label='Difference (Original - New)', color='red', alpha=0.7)
|
||||
axes[1].set_title('Signal Differences')
|
||||
axes[1].set_ylabel('Difference')
|
||||
axes[1].axhline(y=0, color='black', linestyle='-', alpha=0.3)
|
||||
|
||||
# Format x-axis
|
||||
for ax in axes:
|
||||
ax.legend()
|
||||
ax.grid(True, alpha=0.3)
|
||||
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
|
||||
ax.xaxis.set_major_locator(mdates.HourLocator(interval=2))
|
||||
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45)
|
||||
|
||||
plt.xlabel('Time')
|
||||
plt.tight_layout()
|
||||
|
||||
# Save plot
|
||||
plot_file = self.results_dir / f"{strategy_name.lower()}_strategy_comparison.png"
|
||||
plt.savefig(plot_file, dpi=300, bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
print(f"Plot saved to: {plot_file}")
|
||||
|
||||
def main():
|
||||
"""Main test execution."""
|
||||
print("Strategy Comparison Test Framework")
|
||||
print("=" * 50)
|
||||
|
||||
# Initialize tester
|
||||
tester = StrategyComparisonTester()
|
||||
|
||||
# Load data
|
||||
if not tester.load_data(limit=1000): # Use 1000 points for testing
|
||||
print("Failed to load data. Exiting.")
|
||||
return
|
||||
|
||||
# Run comparisons
|
||||
results = []
|
||||
|
||||
# Compare MetaTrend strategies
|
||||
metatrend_result = tester.compare_metatrend_strategies()
|
||||
if metatrend_result:
|
||||
results.append(metatrend_result)
|
||||
|
||||
# Compare Random strategies
|
||||
random_result = tester.compare_random_strategies()
|
||||
if random_result:
|
||||
results.append(random_result)
|
||||
|
||||
# Compare BBRS strategies
|
||||
bbrs_result = tester.compare_bbrs_strategies()
|
||||
if bbrs_result:
|
||||
results.append(bbrs_result)
|
||||
|
||||
# Generate comprehensive report
|
||||
if results:
|
||||
tester.generate_report(results)
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("STRATEGY COMPARISON SUMMARY")
|
||||
print("="*80)
|
||||
|
||||
for result in results:
|
||||
if not result:
|
||||
continue
|
||||
|
||||
strategy_name = result['strategy_name']
|
||||
|
||||
if strategy_name == 'MetaTrend':
|
||||
consistency = result['combined_consistency']
|
||||
print(f"{strategy_name}: {consistency:.2f}% consistency")
|
||||
else:
|
||||
consistency = result['consistency']
|
||||
print(f"{strategy_name}: {consistency:.2f}% consistency")
|
||||
|
||||
if consistency >= 95:
|
||||
status = "✅ EXCELLENT"
|
||||
elif consistency >= 90:
|
||||
status = "✅ GOOD"
|
||||
elif consistency >= 80:
|
||||
status = "⚠️ ACCEPTABLE"
|
||||
else:
|
||||
status = "❌ NEEDS REVIEW"
|
||||
|
||||
print(f" Status: {status}")
|
||||
|
||||
print(f"\nDetailed results saved in: {tester.results_dir}")
|
||||
else:
|
||||
print("No successful comparisons completed.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,618 +0,0 @@
|
||||
"""
|
||||
Enhanced Strategy Comparison Test Framework for 2025 Data
|
||||
|
||||
Comprehensive testing for comparing original incremental strategies from cycles/IncStrategies
|
||||
with new implementations in IncrementalTrader/strategies using real 2025 data.
|
||||
|
||||
Features:
|
||||
- Interactive plots using Plotly
|
||||
- CSV export of all signals
|
||||
- Detailed signal analysis
|
||||
- Performance comparison
|
||||
- Real 2025 data (Jan-Apr)
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import plotly.graph_objects as go
|
||||
import plotly.subplots as sp
|
||||
from plotly.offline import plot
|
||||
from datetime import datetime
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Any
|
||||
import warnings
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
# Add project paths
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
sys.path.insert(0, str(project_root / "cycles"))
|
||||
sys.path.insert(0, str(project_root / "IncrementalTrader"))
|
||||
|
||||
# Import original strategies
|
||||
from cycles.IncStrategies.metatrend_strategy import IncMetaTrendStrategy
|
||||
from cycles.IncStrategies.random_strategy import IncRandomStrategy
|
||||
from cycles.IncStrategies.bbrs_incremental import BBRSIncrementalState
|
||||
|
||||
# Import new strategies
|
||||
from IncrementalTrader.strategies.metatrend import MetaTrendStrategy
|
||||
from IncrementalTrader.strategies.random import RandomStrategy
|
||||
from IncrementalTrader.strategies.bbrs import BBRSStrategy
|
||||
|
||||
class Enhanced2025StrategyComparison:
|
||||
"""Enhanced strategy comparison framework with interactive plots and CSV export."""
|
||||
|
||||
def __init__(self, data_file: str = "data/temp_2025_data.csv"):
|
||||
"""Initialize the comparison framework."""
|
||||
self.data_file = data_file
|
||||
self.data = None
|
||||
self.results = {}
|
||||
|
||||
# Create results directory
|
||||
self.results_dir = Path("test/results/strategies_2025")
|
||||
self.results_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
print("Enhanced 2025 Strategy Comparison Framework")
|
||||
print("=" * 60)
|
||||
|
||||
def load_data(self) -> None:
|
||||
"""Load and prepare 2025 data."""
|
||||
print(f"Loading data from {self.data_file}...")
|
||||
|
||||
self.data = pd.read_csv(self.data_file)
|
||||
|
||||
# Convert timestamp to datetime
|
||||
self.data['DateTime'] = pd.to_datetime(self.data['Timestamp'], unit='s')
|
||||
|
||||
print(f"Data loaded: {len(self.data):,} rows")
|
||||
print(f"Date range: {self.data['DateTime'].iloc[0]} to {self.data['DateTime'].iloc[-1]}")
|
||||
print(f"Columns: {list(self.data.columns)}")
|
||||
|
||||
def compare_metatrend_strategies(self) -> Dict[str, Any]:
|
||||
"""Compare IncMetaTrendStrategy vs MetaTrendStrategy with detailed analysis."""
|
||||
print("\n" + "="*80)
|
||||
print("COMPARING METATREND STRATEGIES - 2025 DATA")
|
||||
print("="*80)
|
||||
|
||||
try:
|
||||
# Initialize strategies
|
||||
original_strategy = IncMetaTrendStrategy(weight=1.0, params={})
|
||||
new_strategy = MetaTrendStrategy(name="metatrend", weight=1.0, params={})
|
||||
|
||||
# Track all signals and data
|
||||
signals_data = []
|
||||
price_data = []
|
||||
|
||||
print("Processing data points...")
|
||||
|
||||
# Process data
|
||||
for i, row in self.data.iterrows():
|
||||
if i % 10000 == 0:
|
||||
print(f"Processed {i:,} / {len(self.data):,} data points...")
|
||||
|
||||
timestamp = row['DateTime']
|
||||
ohlcv_data = {
|
||||
'open': row['Open'],
|
||||
'high': row['High'],
|
||||
'low': row['Low'],
|
||||
'close': row['Close'],
|
||||
'volume': row['Volume']
|
||||
}
|
||||
|
||||
# Update strategies
|
||||
original_strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
new_strategy.process_data_point(timestamp, ohlcv_data)
|
||||
|
||||
# Get signals
|
||||
orig_entry = original_strategy.get_entry_signal()
|
||||
new_entry = new_strategy.get_entry_signal()
|
||||
orig_exit = original_strategy.get_exit_signal()
|
||||
new_exit = new_strategy.get_exit_signal()
|
||||
|
||||
# Determine combined signals
|
||||
orig_signal = "BUY" if orig_entry and orig_entry.signal_type == "ENTRY" else (
|
||||
"SELL" if orig_exit and orig_exit.signal_type == "EXIT" else "HOLD")
|
||||
new_signal = "BUY" if new_entry and new_entry.signal_type == "ENTRY" else (
|
||||
"SELL" if new_exit and new_exit.signal_type == "EXIT" else "HOLD")
|
||||
|
||||
# Store data
|
||||
signals_data.append({
|
||||
'timestamp': timestamp,
|
||||
'price': row['Close'],
|
||||
'original_entry': orig_entry.signal_type if orig_entry else "HOLD",
|
||||
'new_entry': new_entry.signal_type if new_entry else "HOLD",
|
||||
'original_exit': orig_exit.signal_type if orig_exit else "HOLD",
|
||||
'new_exit': new_exit.signal_type if new_exit else "HOLD",
|
||||
'original_combined': orig_signal,
|
||||
'new_combined': new_signal,
|
||||
'signals_match': orig_signal == new_signal
|
||||
})
|
||||
|
||||
price_data.append({
|
||||
'timestamp': timestamp,
|
||||
'open': row['Open'],
|
||||
'high': row['High'],
|
||||
'low': row['Low'],
|
||||
'close': row['Close'],
|
||||
'volume': row['Volume']
|
||||
})
|
||||
|
||||
# Convert to DataFrame
|
||||
signals_df = pd.DataFrame(signals_data)
|
||||
price_df = pd.DataFrame(price_data)
|
||||
|
||||
# Calculate statistics
|
||||
total_signals = len(signals_df)
|
||||
matching_signals = signals_df['signals_match'].sum()
|
||||
consistency = (matching_signals / total_signals) * 100
|
||||
|
||||
# Signal distribution
|
||||
orig_signal_counts = signals_df['original_combined'].value_counts()
|
||||
new_signal_counts = signals_df['new_combined'].value_counts()
|
||||
|
||||
# Save signals to CSV
|
||||
csv_file = self.results_dir / "metatrend_signals_2025.csv"
|
||||
signals_df.to_csv(csv_file, index=False, encoding='utf-8')
|
||||
|
||||
# Create interactive plot
|
||||
self.create_interactive_plot(signals_df, price_df, "MetaTrend", "metatrend_2025")
|
||||
|
||||
results = {
|
||||
'strategy': 'MetaTrend',
|
||||
'total_signals': total_signals,
|
||||
'matching_signals': matching_signals,
|
||||
'consistency_percentage': consistency,
|
||||
'original_signal_distribution': orig_signal_counts.to_dict(),
|
||||
'new_signal_distribution': new_signal_counts.to_dict(),
|
||||
'signals_dataframe': signals_df,
|
||||
'csv_file': str(csv_file)
|
||||
}
|
||||
|
||||
print(f"✅ MetaTrend Strategy Comparison Complete")
|
||||
print(f" Signal Consistency: {consistency:.2f}%")
|
||||
print(f" Total Signals: {total_signals:,}")
|
||||
print(f" Matching Signals: {matching_signals:,}")
|
||||
print(f" CSV Saved: {csv_file}")
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error in MetaTrend comparison: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return {'error': str(e)}
|
||||
|
||||
def compare_random_strategies(self) -> Dict[str, Any]:
|
||||
"""Compare IncRandomStrategy vs RandomStrategy with detailed analysis."""
|
||||
print("\n" + "="*80)
|
||||
print("COMPARING RANDOM STRATEGIES - 2025 DATA")
|
||||
print("="*80)
|
||||
|
||||
try:
|
||||
# Initialize strategies with same seed for reproducibility
|
||||
original_strategy = IncRandomStrategy(weight=1.0, params={"random_seed": 42})
|
||||
new_strategy = RandomStrategy(name="random", weight=1.0, params={"random_seed": 42})
|
||||
|
||||
# Track all signals and data
|
||||
signals_data = []
|
||||
|
||||
print("Processing data points...")
|
||||
|
||||
# Process data (use subset for Random strategy to speed up)
|
||||
subset_data = self.data.iloc[::10] # Every 10th point for Random strategy
|
||||
|
||||
for i, row in subset_data.iterrows():
|
||||
if i % 1000 == 0:
|
||||
print(f"Processed {i:,} data points...")
|
||||
|
||||
timestamp = row['DateTime']
|
||||
ohlcv_data = {
|
||||
'open': row['Open'],
|
||||
'high': row['High'],
|
||||
'low': row['Low'],
|
||||
'close': row['Close'],
|
||||
'volume': row['Volume']
|
||||
}
|
||||
|
||||
# Update strategies
|
||||
original_strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
new_strategy.process_data_point(timestamp, ohlcv_data)
|
||||
|
||||
# Get signals
|
||||
orig_entry = original_strategy.get_entry_signal()
|
||||
new_entry = new_strategy.get_entry_signal()
|
||||
orig_exit = original_strategy.get_exit_signal()
|
||||
new_exit = new_strategy.get_exit_signal()
|
||||
|
||||
# Determine combined signals
|
||||
orig_signal = "BUY" if orig_entry and orig_entry.signal_type == "ENTRY" else (
|
||||
"SELL" if orig_exit and orig_exit.signal_type == "EXIT" else "HOLD")
|
||||
new_signal = "BUY" if new_entry and new_entry.signal_type == "ENTRY" else (
|
||||
"SELL" if new_exit and new_exit.signal_type == "EXIT" else "HOLD")
|
||||
|
||||
# Store data
|
||||
signals_data.append({
|
||||
'timestamp': timestamp,
|
||||
'price': row['Close'],
|
||||
'original_entry': orig_entry.signal_type if orig_entry else "HOLD",
|
||||
'new_entry': new_entry.signal_type if new_entry else "HOLD",
|
||||
'original_exit': orig_exit.signal_type if orig_exit else "HOLD",
|
||||
'new_exit': new_exit.signal_type if new_exit else "HOLD",
|
||||
'original_combined': orig_signal,
|
||||
'new_combined': new_signal,
|
||||
'signals_match': orig_signal == new_signal
|
||||
})
|
||||
|
||||
# Convert to DataFrame
|
||||
signals_df = pd.DataFrame(signals_data)
|
||||
|
||||
# Calculate statistics
|
||||
total_signals = len(signals_df)
|
||||
matching_signals = signals_df['signals_match'].sum()
|
||||
consistency = (matching_signals / total_signals) * 100
|
||||
|
||||
# Save signals to CSV
|
||||
csv_file = self.results_dir / "random_signals_2025.csv"
|
||||
signals_df.to_csv(csv_file, index=False, encoding='utf-8')
|
||||
|
||||
results = {
|
||||
'strategy': 'Random',
|
||||
'total_signals': total_signals,
|
||||
'matching_signals': matching_signals,
|
||||
'consistency_percentage': consistency,
|
||||
'signals_dataframe': signals_df,
|
||||
'csv_file': str(csv_file)
|
||||
}
|
||||
|
||||
print(f"✅ Random Strategy Comparison Complete")
|
||||
print(f" Signal Consistency: {consistency:.2f}%")
|
||||
print(f" Total Signals: {total_signals:,}")
|
||||
print(f" CSV Saved: {csv_file}")
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error in Random comparison: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return {'error': str(e)}
|
||||
|
||||
def compare_bbrs_strategies(self) -> Dict[str, Any]:
|
||||
"""Compare BBRSIncrementalState vs BBRSStrategy with detailed analysis."""
|
||||
print("\n" + "="*80)
|
||||
print("COMPARING BBRS STRATEGIES - 2025 DATA")
|
||||
print("="*80)
|
||||
|
||||
try:
|
||||
# Initialize strategies
|
||||
bbrs_config = {
|
||||
"bb_period": 20,
|
||||
"bb_std": 2.0,
|
||||
"rsi_period": 14,
|
||||
"volume_ma_period": 20
|
||||
}
|
||||
|
||||
original_strategy = BBRSIncrementalState(config=bbrs_config)
|
||||
new_strategy = BBRSStrategy(name="bbrs", weight=1.0, params=bbrs_config)
|
||||
|
||||
# Track all signals and data
|
||||
signals_data = []
|
||||
|
||||
print("Processing data points...")
|
||||
|
||||
# Process data
|
||||
for i, row in self.data.iterrows():
|
||||
if i % 10000 == 0:
|
||||
print(f"Processed {i:,} / {len(self.data):,} data points...")
|
||||
|
||||
timestamp = row['DateTime']
|
||||
ohlcv_data = {
|
||||
'open': row['Open'],
|
||||
'high': row['High'],
|
||||
'low': row['Low'],
|
||||
'close': row['Close'],
|
||||
'volume': row['Volume']
|
||||
}
|
||||
|
||||
# Update strategies
|
||||
orig_result = original_strategy.update_minute_data(timestamp, ohlcv_data)
|
||||
new_strategy.process_data_point(timestamp, ohlcv_data)
|
||||
|
||||
# Get signals - original returns signals in result, new uses methods
|
||||
if orig_result is not None:
|
||||
orig_buy = orig_result.get('buy_signal', False)
|
||||
orig_sell = orig_result.get('sell_signal', False)
|
||||
else:
|
||||
orig_buy = False
|
||||
orig_sell = False
|
||||
|
||||
new_entry = new_strategy.get_entry_signal()
|
||||
new_exit = new_strategy.get_exit_signal()
|
||||
new_buy = new_entry and new_entry.signal_type == "ENTRY"
|
||||
new_sell = new_exit and new_exit.signal_type == "EXIT"
|
||||
|
||||
# Determine combined signals
|
||||
orig_signal = "BUY" if orig_buy else ("SELL" if orig_sell else "HOLD")
|
||||
new_signal = "BUY" if new_buy else ("SELL" if new_sell else "HOLD")
|
||||
|
||||
# Store data
|
||||
signals_data.append({
|
||||
'timestamp': timestamp,
|
||||
'price': row['Close'],
|
||||
'original_entry': "ENTRY" if orig_buy else "HOLD",
|
||||
'new_entry': new_entry.signal_type if new_entry else "HOLD",
|
||||
'original_exit': "EXIT" if orig_sell else "HOLD",
|
||||
'new_exit': new_exit.signal_type if new_exit else "HOLD",
|
||||
'original_combined': orig_signal,
|
||||
'new_combined': new_signal,
|
||||
'signals_match': orig_signal == new_signal
|
||||
})
|
||||
|
||||
# Convert to DataFrame
|
||||
signals_df = pd.DataFrame(signals_data)
|
||||
|
||||
# Calculate statistics
|
||||
total_signals = len(signals_df)
|
||||
matching_signals = signals_df['signals_match'].sum()
|
||||
consistency = (matching_signals / total_signals) * 100
|
||||
|
||||
# Save signals to CSV
|
||||
csv_file = self.results_dir / "bbrs_signals_2025.csv"
|
||||
signals_df.to_csv(csv_file, index=False, encoding='utf-8')
|
||||
|
||||
# Create interactive plot
|
||||
self.create_interactive_plot(signals_df, self.data, "BBRS", "bbrs_2025")
|
||||
|
||||
results = {
|
||||
'strategy': 'BBRS',
|
||||
'total_signals': total_signals,
|
||||
'matching_signals': matching_signals,
|
||||
'consistency_percentage': consistency,
|
||||
'signals_dataframe': signals_df,
|
||||
'csv_file': str(csv_file)
|
||||
}
|
||||
|
||||
print(f"✅ BBRS Strategy Comparison Complete")
|
||||
print(f" Signal Consistency: {consistency:.2f}%")
|
||||
print(f" Total Signals: {total_signals:,}")
|
||||
print(f" CSV Saved: {csv_file}")
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error in BBRS comparison: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return {'error': str(e)}
|
||||
|
||||
def create_interactive_plot(self, signals_df: pd.DataFrame, price_df: pd.DataFrame,
|
||||
strategy_name: str, filename: str) -> None:
|
||||
"""Create interactive Plotly chart with signals and price data."""
|
||||
print(f"Creating interactive plot for {strategy_name}...")
|
||||
|
||||
# Create subplots
|
||||
fig = sp.make_subplots(
|
||||
rows=3, cols=1,
|
||||
shared_xaxes=True,
|
||||
vertical_spacing=0.05,
|
||||
subplot_titles=(
|
||||
f'{strategy_name} Strategy - Price & Signals',
|
||||
'Signal Comparison',
|
||||
'Signal Consistency'
|
||||
),
|
||||
row_heights=[0.6, 0.2, 0.2]
|
||||
)
|
||||
|
||||
# Price chart with signals
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=price_df['timestamp'],
|
||||
y=price_df['close'],
|
||||
mode='lines',
|
||||
name='Price',
|
||||
line=dict(color='blue', width=1)
|
||||
),
|
||||
row=1, col=1
|
||||
)
|
||||
|
||||
# Add buy signals
|
||||
buy_signals_orig = signals_df[signals_df['original_combined'] == 'BUY']
|
||||
buy_signals_new = signals_df[signals_df['new_combined'] == 'BUY']
|
||||
|
||||
if len(buy_signals_orig) > 0:
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=buy_signals_orig['timestamp'],
|
||||
y=buy_signals_orig['price'],
|
||||
mode='markers',
|
||||
name='Original BUY',
|
||||
marker=dict(color='green', size=8, symbol='triangle-up')
|
||||
),
|
||||
row=1, col=1
|
||||
)
|
||||
|
||||
if len(buy_signals_new) > 0:
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=buy_signals_new['timestamp'],
|
||||
y=buy_signals_new['price'],
|
||||
mode='markers',
|
||||
name='New BUY',
|
||||
marker=dict(color='lightgreen', size=6, symbol='triangle-up')
|
||||
),
|
||||
row=1, col=1
|
||||
)
|
||||
|
||||
# Add sell signals
|
||||
sell_signals_orig = signals_df[signals_df['original_combined'] == 'SELL']
|
||||
sell_signals_new = signals_df[signals_df['new_combined'] == 'SELL']
|
||||
|
||||
if len(sell_signals_orig) > 0:
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=sell_signals_orig['timestamp'],
|
||||
y=sell_signals_orig['price'],
|
||||
mode='markers',
|
||||
name='Original SELL',
|
||||
marker=dict(color='red', size=8, symbol='triangle-down')
|
||||
),
|
||||
row=1, col=1
|
||||
)
|
||||
|
||||
if len(sell_signals_new) > 0:
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=sell_signals_new['timestamp'],
|
||||
y=sell_signals_new['price'],
|
||||
mode='markers',
|
||||
name='New SELL',
|
||||
marker=dict(color='pink', size=6, symbol='triangle-down')
|
||||
),
|
||||
row=1, col=1
|
||||
)
|
||||
|
||||
# Signal comparison chart
|
||||
signal_mapping = {'HOLD': 0, 'BUY': 1, 'SELL': -1}
|
||||
signals_df['original_numeric'] = signals_df['original_combined'].map(signal_mapping)
|
||||
signals_df['new_numeric'] = signals_df['new_combined'].map(signal_mapping)
|
||||
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=signals_df['timestamp'],
|
||||
y=signals_df['original_numeric'],
|
||||
mode='lines',
|
||||
name='Original Signals',
|
||||
line=dict(color='blue', width=2)
|
||||
),
|
||||
row=2, col=1
|
||||
)
|
||||
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=signals_df['timestamp'],
|
||||
y=signals_df['new_numeric'],
|
||||
mode='lines',
|
||||
name='New Signals',
|
||||
line=dict(color='red', width=1, dash='dash')
|
||||
),
|
||||
row=2, col=1
|
||||
)
|
||||
|
||||
# Signal consistency chart
|
||||
signals_df['consistency_numeric'] = signals_df['signals_match'].astype(int)
|
||||
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=signals_df['timestamp'],
|
||||
y=signals_df['consistency_numeric'],
|
||||
mode='lines',
|
||||
name='Signal Match',
|
||||
line=dict(color='green', width=1),
|
||||
fill='tonexty'
|
||||
),
|
||||
row=3, col=1
|
||||
)
|
||||
|
||||
# Update layout
|
||||
fig.update_layout(
|
||||
title=f'{strategy_name} Strategy Comparison - 2025 Data',
|
||||
height=800,
|
||||
showlegend=True,
|
||||
hovermode='x unified'
|
||||
)
|
||||
|
||||
# Update y-axes
|
||||
fig.update_yaxes(title_text="Price ($)", row=1, col=1)
|
||||
fig.update_yaxes(title_text="Signal", row=2, col=1, tickvals=[-1, 0, 1], ticktext=['SELL', 'HOLD', 'BUY'])
|
||||
fig.update_yaxes(title_text="Match", row=3, col=1, tickvals=[0, 1], ticktext=['No', 'Yes'])
|
||||
|
||||
# Save interactive plot
|
||||
html_file = self.results_dir / f"{filename}_interactive.html"
|
||||
plot(fig, filename=str(html_file), auto_open=False)
|
||||
|
||||
print(f" Interactive plot saved: {html_file}")
|
||||
|
||||
def generate_comprehensive_report(self) -> None:
|
||||
"""Generate comprehensive comparison report."""
|
||||
print("\n" + "="*80)
|
||||
print("GENERATING COMPREHENSIVE REPORT")
|
||||
print("="*80)
|
||||
|
||||
report_file = self.results_dir / "comprehensive_strategy_comparison_2025.md"
|
||||
|
||||
with open(report_file, 'w', encoding='utf-8') as f:
|
||||
f.write("# Comprehensive Strategy Comparison Report - 2025 Data\n\n")
|
||||
f.write(f"**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
|
||||
f.write(f"**Data Period**: January 1, 2025 - April 30, 2025\n")
|
||||
f.write(f"**Total Data Points**: {len(self.data):,} minute-level OHLCV records\n\n")
|
||||
|
||||
f.write("## Executive Summary\n\n")
|
||||
f.write("This report compares the signal generation consistency between original incremental strategies ")
|
||||
f.write("from `cycles/IncStrategies` and new implementations in `IncrementalTrader/strategies` ")
|
||||
f.write("using real market data from 2025.\n\n")
|
||||
|
||||
f.write("## Strategy Comparison Results\n\n")
|
||||
|
||||
for strategy_name, results in self.results.items():
|
||||
if 'error' not in results:
|
||||
f.write(f"### {results['strategy']} Strategy\n\n")
|
||||
f.write(f"- **Signal Consistency**: {results['consistency_percentage']:.2f}%\n")
|
||||
f.write(f"- **Total Signals Compared**: {results['total_signals']:,}\n")
|
||||
f.write(f"- **Matching Signals**: {results['matching_signals']:,}\n")
|
||||
f.write(f"- **CSV Export**: `{results['csv_file']}`\n\n")
|
||||
|
||||
if 'original_signal_distribution' in results:
|
||||
f.write("**Original Strategy Signal Distribution:**\n")
|
||||
for signal, count in results['original_signal_distribution'].items():
|
||||
f.write(f"- {signal}: {count:,}\n")
|
||||
f.write("\n")
|
||||
|
||||
f.write("**New Strategy Signal Distribution:**\n")
|
||||
for signal, count in results['new_signal_distribution'].items():
|
||||
f.write(f"- {signal}: {count:,}\n")
|
||||
f.write("\n")
|
||||
|
||||
f.write("## Files Generated\n\n")
|
||||
f.write("### CSV Signal Exports\n")
|
||||
for csv_file in self.results_dir.glob("*_signals_2025.csv"):
|
||||
f.write(f"- `{csv_file.name}`: Complete signal history with timestamps\n")
|
||||
|
||||
f.write("\n### Interactive Plots\n")
|
||||
for html_file in self.results_dir.glob("*_interactive.html"):
|
||||
f.write(f"- `{html_file.name}`: Interactive Plotly visualization\n")
|
||||
|
||||
f.write("\n## Conclusion\n\n")
|
||||
f.write("The strategy comparison validates the migration accuracy by comparing signal generation ")
|
||||
f.write("between original and refactored implementations. High consistency percentages indicate ")
|
||||
f.write("successful preservation of strategy behavior during the refactoring process.\n")
|
||||
|
||||
print(f"✅ Comprehensive report saved: {report_file}")
|
||||
|
||||
def run_all_comparisons(self) -> None:
|
||||
"""Run all strategy comparisons."""
|
||||
print("Starting comprehensive strategy comparison with 2025 data...")
|
||||
|
||||
# Load data
|
||||
self.load_data()
|
||||
|
||||
# Run comparisons
|
||||
self.results['metatrend'] = self.compare_metatrend_strategies()
|
||||
self.results['random'] = self.compare_random_strategies()
|
||||
self.results['bbrs'] = self.compare_bbrs_strategies()
|
||||
|
||||
# Generate report
|
||||
self.generate_comprehensive_report()
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("ALL STRATEGY COMPARISONS COMPLETED")
|
||||
print("="*80)
|
||||
print(f"Results directory: {self.results_dir}")
|
||||
print("Files generated:")
|
||||
for file in sorted(self.results_dir.glob("*")):
|
||||
print(f" - {file.name}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run the enhanced comparison
|
||||
comparison = Enhanced2025StrategyComparison()
|
||||
comparison.run_all_comparisons()
|
||||
Loading…
x
Reference in New Issue
Block a user