4.0 - 4.0 Implement real-time strategy execution and data integration features
- Added `realtime_execution.py` for real-time strategy execution, enabling live signal generation and integration with the dashboard's chart refresh cycle. - Introduced `data_integration.py` to manage market data orchestration, caching, and technical indicator calculations for strategy signal generation. - Implemented `validation.py` for comprehensive validation and quality assessment of strategy-generated signals, ensuring reliability and consistency. - Developed `batch_processing.py` to facilitate efficient backtesting of multiple strategies across large datasets with memory management and performance optimization. - Updated `__init__.py` files to include new modules and ensure proper exports, enhancing modularity and maintainability. - Enhanced unit tests for the new features, ensuring robust functionality and adherence to project standards. These changes establish a solid foundation for real-time strategy execution and data integration, aligning with project goals for modularity, performance, and maintainability.
This commit is contained in:
798
tests/strategies/test_batch_processing.py
Normal file
798
tests/strategies/test_batch_processing.py
Normal file
@@ -0,0 +1,798 @@
|
||||
"""
|
||||
Tests for Strategy Batch Processing
|
||||
|
||||
This module tests batch processing capabilities for strategy backtesting
|
||||
including memory management, parallel processing, and performance monitoring.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
from datetime import datetime, timezone
|
||||
import pandas as pd
|
||||
|
||||
from strategies.batch_processing import BacktestingBatchProcessor, BatchProcessingConfig
|
||||
from strategies.data_types import StrategyResult, StrategySignal, SignalType
|
||||
|
||||
|
||||
class TestBatchProcessingConfig:
|
||||
"""Tests for BatchProcessingConfig dataclass."""
|
||||
|
||||
def test_default_config(self):
|
||||
"""Test default batch processing configuration."""
|
||||
config = BatchProcessingConfig()
|
||||
|
||||
assert config.max_concurrent_strategies == 4
|
||||
assert config.max_memory_usage_percent == 80.0
|
||||
assert config.chunk_size_days == 30
|
||||
assert config.enable_memory_monitoring is True
|
||||
assert config.enable_result_validation is True
|
||||
assert config.result_cache_size == 1000
|
||||
assert config.progress_reporting_interval == 10
|
||||
|
||||
def test_custom_config(self):
|
||||
"""Test custom batch processing configuration."""
|
||||
config = BatchProcessingConfig(
|
||||
max_concurrent_strategies=8,
|
||||
max_memory_usage_percent=90.0,
|
||||
chunk_size_days=60,
|
||||
enable_memory_monitoring=False,
|
||||
enable_result_validation=False,
|
||||
result_cache_size=500,
|
||||
progress_reporting_interval=5
|
||||
)
|
||||
|
||||
assert config.max_concurrent_strategies == 8
|
||||
assert config.max_memory_usage_percent == 90.0
|
||||
assert config.chunk_size_days == 60
|
||||
assert config.enable_memory_monitoring is False
|
||||
assert config.enable_result_validation is False
|
||||
assert config.result_cache_size == 500
|
||||
assert config.progress_reporting_interval == 5
|
||||
|
||||
|
||||
class TestBacktestingBatchProcessor:
|
||||
"""Tests for BacktestingBatchProcessor class."""
|
||||
|
||||
@pytest.fixture
|
||||
def processor(self):
|
||||
"""Create batch processor with default configuration."""
|
||||
config = BatchProcessingConfig(
|
||||
enable_memory_monitoring=False, # Disable for testing
|
||||
progress_reporting_interval=1, # Report every strategy for testing
|
||||
enable_result_validation=False # Disable validation for basic tests
|
||||
)
|
||||
with patch('strategies.batch_processing.StrategyDataIntegrator'):
|
||||
return BacktestingBatchProcessor(config)
|
||||
|
||||
@pytest.fixture
|
||||
def sample_strategy_configs(self):
|
||||
"""Create sample strategy configurations for testing."""
|
||||
return [
|
||||
{
|
||||
'name': 'ema_crossover',
|
||||
'type': 'trend_following',
|
||||
'parameters': {'fast_ema': 12, 'slow_ema': 26}
|
||||
},
|
||||
{
|
||||
'name': 'rsi_momentum',
|
||||
'type': 'momentum',
|
||||
'parameters': {'rsi_period': 14, 'oversold': 30, 'overbought': 70}
|
||||
},
|
||||
{
|
||||
'name': 'macd_trend',
|
||||
'type': 'trend_following',
|
||||
'parameters': {'fast_ema': 12, 'slow_ema': 26, 'signal': 9}
|
||||
}
|
||||
]
|
||||
|
||||
@pytest.fixture
|
||||
def sample_strategy_results(self):
|
||||
"""Create sample strategy results for testing."""
|
||||
return [
|
||||
StrategyResult(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='BTC-USDT',
|
||||
timeframe='1h',
|
||||
strategy_name='test_strategy',
|
||||
signals=[
|
||||
StrategySignal(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='BTC-USDT',
|
||||
timeframe='1h',
|
||||
signal_type=SignalType.BUY,
|
||||
price=50000.0,
|
||||
confidence=0.8,
|
||||
metadata={'rsi': 30}
|
||||
)
|
||||
],
|
||||
indicators_used={'rsi': 30, 'ema': 49000},
|
||||
metadata={'execution_time': 0.5}
|
||||
)
|
||||
]
|
||||
|
||||
def test_initialization(self, processor):
|
||||
"""Test batch processor initialization."""
|
||||
assert processor.config is not None
|
||||
assert processor.logger is not None
|
||||
assert processor.data_integrator is not None
|
||||
assert processor._processing_stats['strategies_processed'] == 0
|
||||
assert processor._processing_stats['total_signals_generated'] == 0
|
||||
assert processor._processing_stats['errors_count'] == 0
|
||||
|
||||
def test_initialization_with_validation_disabled(self):
|
||||
"""Test initialization with validation disabled."""
|
||||
config = BatchProcessingConfig(enable_result_validation=False)
|
||||
with patch('strategies.batch_processing.StrategyDataIntegrator'):
|
||||
processor = BacktestingBatchProcessor(config)
|
||||
assert processor.signal_validator is None
|
||||
|
||||
@patch('strategies.batch_processing.StrategyDataIntegrator')
|
||||
def test_process_strategies_batch(self, mock_integrator_class, processor, sample_strategy_configs, sample_strategy_results):
|
||||
"""Test batch processing of multiple strategies."""
|
||||
# Setup mock data integrator
|
||||
mock_integrator = MagicMock()
|
||||
mock_integrator.calculate_strategy_signals_orchestrated.return_value = sample_strategy_results
|
||||
processor.data_integrator = mock_integrator
|
||||
|
||||
symbols = ['BTC-USDT', 'ETH-USDT']
|
||||
timeframe = '1h'
|
||||
days_back = 30
|
||||
|
||||
results = processor.process_strategies_batch(
|
||||
strategy_configs=sample_strategy_configs,
|
||||
symbols=symbols,
|
||||
timeframe=timeframe,
|
||||
days_back=days_back
|
||||
)
|
||||
|
||||
# Verify results structure
|
||||
assert len(results) == len(sample_strategy_configs)
|
||||
assert 'ema_crossover' in results
|
||||
assert 'rsi_momentum' in results
|
||||
assert 'macd_trend' in results
|
||||
|
||||
# Verify statistics
|
||||
stats = processor.get_processing_statistics()
|
||||
assert stats['strategies_processed'] == 3
|
||||
assert stats['total_signals_generated'] == 6 # 3 strategies × 2 symbols × 1 signal each
|
||||
assert stats['errors_count'] == 0
|
||||
|
||||
def test_process_single_strategy_batch(self, processor, sample_strategy_results):
|
||||
"""Test processing a single strategy across multiple symbols."""
|
||||
# Setup mock data integrator
|
||||
mock_integrator = MagicMock()
|
||||
mock_integrator.calculate_strategy_signals_orchestrated.return_value = sample_strategy_results
|
||||
processor.data_integrator = mock_integrator
|
||||
|
||||
strategy_config = {'name': 'test_strategy', 'type': 'test'}
|
||||
symbols = ['BTC-USDT', 'ETH-USDT']
|
||||
|
||||
results = processor._process_single_strategy_batch(
|
||||
strategy_config, symbols, '1h', 30, 'okx'
|
||||
)
|
||||
|
||||
assert len(results) == 2 # Results for 2 symbols
|
||||
assert processor._processing_stats['total_signals_generated'] == 2
|
||||
|
||||
def test_validate_strategy_results(self, processor, sample_strategy_results):
|
||||
"""Test strategy result validation."""
|
||||
# Setup mock signal validator
|
||||
mock_validator = MagicMock()
|
||||
mock_validator.validate_signals_batch.return_value = (
|
||||
sample_strategy_results[0].signals, # valid signals
|
||||
[] # no invalid signals
|
||||
)
|
||||
processor.signal_validator = mock_validator
|
||||
|
||||
validated_results = processor._validate_strategy_results(sample_strategy_results)
|
||||
|
||||
assert len(validated_results) == 1
|
||||
assert len(validated_results[0].signals) == 1
|
||||
mock_validator.validate_signals_batch.assert_called_once()
|
||||
|
||||
@patch('strategies.batch_processing.psutil')
|
||||
def test_check_memory_usage_normal(self, mock_psutil, processor):
|
||||
"""Test memory usage monitoring under normal conditions."""
|
||||
# Mock memory usage below threshold
|
||||
mock_process = MagicMock()
|
||||
mock_process.memory_percent.return_value = 60.0 # Below 80% threshold
|
||||
mock_process.memory_info.return_value.rss = 500 * 1024 * 1024 # 500 MB
|
||||
mock_psutil.Process.return_value = mock_process
|
||||
|
||||
processor._check_memory_usage()
|
||||
|
||||
assert processor._processing_stats['memory_peak_mb'] == 500.0
|
||||
|
||||
@patch('strategies.batch_processing.psutil')
|
||||
def test_check_memory_usage_high(self, mock_psutil, processor):
|
||||
"""Test memory usage monitoring with high usage."""
|
||||
# Mock memory usage above threshold
|
||||
mock_process = MagicMock()
|
||||
mock_process.memory_percent.return_value = 85.0 # Above 80% threshold
|
||||
mock_process.memory_info.return_value.rss = 1000 * 1024 * 1024 # 1000 MB
|
||||
mock_psutil.Process.return_value = mock_process
|
||||
|
||||
with patch.object(processor, '_cleanup_memory') as mock_cleanup:
|
||||
processor._check_memory_usage()
|
||||
mock_cleanup.assert_called_once()
|
||||
|
||||
def test_cleanup_memory(self, processor):
|
||||
"""Test memory cleanup operations."""
|
||||
# Fill result cache beyond limit
|
||||
for i in range(1500): # Above 1000 limit
|
||||
processor._result_cache[f'key_{i}'] = f'result_{i}'
|
||||
|
||||
initial_cache_size = len(processor._result_cache)
|
||||
|
||||
with patch.object(processor.data_integrator, 'clear_cache') as mock_clear, \
|
||||
patch('strategies.batch_processing.gc.collect') as mock_gc:
|
||||
|
||||
processor._cleanup_memory()
|
||||
|
||||
# Verify cache was reduced
|
||||
assert len(processor._result_cache) < initial_cache_size
|
||||
assert len(processor._result_cache) == 500 # Half of cache size limit
|
||||
|
||||
# Verify other cleanup operations
|
||||
mock_clear.assert_called_once()
|
||||
mock_gc.assert_called_once()
|
||||
|
||||
def test_get_processing_statistics(self, processor):
|
||||
"""Test processing statistics calculation."""
|
||||
# Set some test statistics
|
||||
processor._processing_stats.update({
|
||||
'strategies_processed': 5,
|
||||
'total_signals_generated': 25,
|
||||
'processing_time_seconds': 10.0,
|
||||
'errors_count': 1,
|
||||
'validation_failures': 2
|
||||
})
|
||||
|
||||
stats = processor.get_processing_statistics()
|
||||
|
||||
assert stats['strategies_processed'] == 5
|
||||
assert stats['total_signals_generated'] == 25
|
||||
assert stats['average_signals_per_strategy'] == 5.0
|
||||
assert stats['average_processing_time_per_strategy'] == 2.0
|
||||
assert stats['error_rate'] == 20.0 # 1/5 * 100
|
||||
assert stats['validation_failure_rate'] == 8.0 # 2/25 * 100
|
||||
|
||||
def test_get_processing_statistics_zero_division(self, processor):
|
||||
"""Test statistics calculation with zero values."""
|
||||
stats = processor.get_processing_statistics()
|
||||
|
||||
assert stats['average_signals_per_strategy'] == 0
|
||||
assert stats['average_processing_time_per_strategy'] == 0
|
||||
assert stats['error_rate'] == 0.0
|
||||
assert stats['validation_failure_rate'] == 0.0
|
||||
|
||||
def test_process_strategies_batch_with_error(self, processor, sample_strategy_configs):
|
||||
"""Test batch processing with errors."""
|
||||
# Setup mock to raise an exception
|
||||
mock_integrator = MagicMock()
|
||||
mock_integrator.calculate_strategy_signals_orchestrated.side_effect = Exception("Test error")
|
||||
processor.data_integrator = mock_integrator
|
||||
|
||||
results = processor.process_strategies_batch(
|
||||
strategy_configs=sample_strategy_configs,
|
||||
symbols=['BTC-USDT'],
|
||||
timeframe='1h',
|
||||
days_back=30
|
||||
)
|
||||
|
||||
# Should handle errors gracefully
|
||||
assert isinstance(results, dict)
|
||||
assert processor._processing_stats['errors_count'] > 0
|
||||
|
||||
@patch('strategies.batch_processing.StrategyDataIntegrator')
|
||||
def test_process_strategies_parallel(self, mock_integrator_class, processor, sample_strategy_configs, sample_strategy_results):
|
||||
"""Test parallel processing of multiple strategies."""
|
||||
# Setup mock data integrator
|
||||
mock_integrator = MagicMock()
|
||||
mock_integrator.calculate_strategy_signals_orchestrated.return_value = sample_strategy_results
|
||||
processor.data_integrator = mock_integrator
|
||||
|
||||
symbols = ['BTC-USDT', 'ETH-USDT']
|
||||
timeframe = '1h'
|
||||
days_back = 30
|
||||
|
||||
results = processor.process_strategies_parallel(
|
||||
strategy_configs=sample_strategy_configs,
|
||||
symbols=symbols,
|
||||
timeframe=timeframe,
|
||||
days_back=days_back
|
||||
)
|
||||
|
||||
# Verify results structure (same as sequential processing)
|
||||
assert len(results) == len(sample_strategy_configs)
|
||||
assert 'ema_crossover' in results
|
||||
assert 'rsi_momentum' in results
|
||||
assert 'macd_trend' in results
|
||||
|
||||
# Verify statistics
|
||||
stats = processor.get_processing_statistics()
|
||||
assert stats['strategies_processed'] == 3
|
||||
assert stats['total_signals_generated'] == 6 # 3 strategies × 2 symbols × 1 signal each
|
||||
assert stats['errors_count'] == 0
|
||||
|
||||
def test_process_symbols_parallel(self, processor, sample_strategy_results):
|
||||
"""Test parallel processing of single strategy across multiple symbols."""
|
||||
# Setup mock data integrator
|
||||
mock_integrator = MagicMock()
|
||||
mock_integrator.calculate_strategy_signals_orchestrated.return_value = sample_strategy_results
|
||||
processor.data_integrator = mock_integrator
|
||||
|
||||
strategy_config = {'name': 'test_strategy', 'type': 'test'}
|
||||
symbols = ['BTC-USDT', 'ETH-USDT', 'BNB-USDT']
|
||||
|
||||
results = processor.process_symbols_parallel(
|
||||
strategy_config=strategy_config,
|
||||
symbols=symbols,
|
||||
timeframe='1h',
|
||||
days_back=30
|
||||
)
|
||||
|
||||
# Should have results for all symbols
|
||||
assert len(results) == 3 # Results for 3 symbols
|
||||
assert processor._processing_stats['total_signals_generated'] == 3
|
||||
|
||||
def test_process_strategy_for_symbol(self, processor, sample_strategy_results):
|
||||
"""Test processing a single strategy for a single symbol."""
|
||||
# Setup mock data integrator
|
||||
mock_integrator = MagicMock()
|
||||
mock_integrator.calculate_strategy_signals_orchestrated.return_value = sample_strategy_results
|
||||
processor.data_integrator = mock_integrator
|
||||
|
||||
strategy_config = {'name': 'test_strategy', 'type': 'test'}
|
||||
|
||||
results = processor._process_strategy_for_symbol(
|
||||
strategy_config=strategy_config,
|
||||
symbol='BTC-USDT',
|
||||
timeframe='1h',
|
||||
days_back=30,
|
||||
exchange='okx'
|
||||
)
|
||||
|
||||
assert len(results) == 1
|
||||
assert results[0].strategy_name == 'test_strategy'
|
||||
assert results[0].symbol == 'BTC-USDT'
|
||||
|
||||
def test_process_strategy_for_symbol_with_error(self, processor):
|
||||
"""Test symbol processing with error handling."""
|
||||
# Setup mock to raise an exception
|
||||
mock_integrator = MagicMock()
|
||||
mock_integrator.calculate_strategy_signals_orchestrated.side_effect = Exception("Test error")
|
||||
processor.data_integrator = mock_integrator
|
||||
|
||||
strategy_config = {'name': 'test_strategy', 'type': 'test'}
|
||||
|
||||
results = processor._process_strategy_for_symbol(
|
||||
strategy_config=strategy_config,
|
||||
symbol='BTC-USDT',
|
||||
timeframe='1h',
|
||||
days_back=30,
|
||||
exchange='okx'
|
||||
)
|
||||
|
||||
# Should return empty list on error
|
||||
assert results == []
|
||||
|
||||
def test_process_large_dataset_streaming(self, processor, sample_strategy_configs, sample_strategy_results):
|
||||
"""Test streaming processing for large datasets."""
|
||||
# Setup mock data integrator
|
||||
mock_integrator = MagicMock()
|
||||
mock_integrator.calculate_strategy_signals_orchestrated.return_value = sample_strategy_results
|
||||
processor.data_integrator = mock_integrator
|
||||
|
||||
# Mock the parallel processing method to avoid actual parallel execution
|
||||
with patch.object(processor, 'process_strategies_parallel') as mock_parallel:
|
||||
mock_parallel.return_value = {
|
||||
'test_strategy': sample_strategy_results
|
||||
}
|
||||
|
||||
# Test streaming with 90 days split into 30-day chunks
|
||||
stream = processor.process_large_dataset_streaming(
|
||||
strategy_configs=sample_strategy_configs,
|
||||
symbols=['BTC-USDT'],
|
||||
timeframe='1h',
|
||||
total_days_back=90 # Should create 3 chunks
|
||||
)
|
||||
|
||||
# Collect all chunks
|
||||
chunks = list(stream)
|
||||
|
||||
assert len(chunks) == 3 # 90 days / 30 days per chunk
|
||||
|
||||
# Each chunk should have results for all strategies
|
||||
for chunk in chunks:
|
||||
assert 'test_strategy' in chunk
|
||||
|
||||
def test_aggregate_streaming_results(self, processor, sample_strategy_results):
|
||||
"""Test aggregation of streaming results."""
|
||||
# Create mock streaming results
|
||||
chunk1 = {'strategy1': sample_strategy_results[:1], 'strategy2': []}
|
||||
chunk2 = {'strategy1': [], 'strategy2': sample_strategy_results[:1]}
|
||||
chunk3 = {'strategy1': sample_strategy_results[:1], 'strategy2': sample_strategy_results[:1]}
|
||||
|
||||
stream = iter([chunk1, chunk2, chunk3])
|
||||
|
||||
aggregated = processor.aggregate_streaming_results(stream)
|
||||
|
||||
assert len(aggregated) == 2
|
||||
assert 'strategy1' in aggregated
|
||||
assert 'strategy2' in aggregated
|
||||
assert len(aggregated['strategy1']) == 2 # From chunk1 and chunk3
|
||||
assert len(aggregated['strategy2']) == 2 # From chunk2 and chunk3
|
||||
|
||||
@patch('strategies.batch_processing.psutil')
|
||||
def test_process_with_memory_constraints_sufficient_memory(self, mock_psutil, processor, sample_strategy_configs):
|
||||
"""Test memory-constrained processing with sufficient memory."""
|
||||
# Mock low memory usage
|
||||
mock_process = MagicMock()
|
||||
mock_process.memory_info.return_value.rss = 100 * 1024 * 1024 # 100 MB
|
||||
mock_psutil.Process.return_value = mock_process
|
||||
|
||||
with patch.object(processor, 'process_strategies_parallel') as mock_parallel:
|
||||
mock_parallel.return_value = {}
|
||||
|
||||
processor.process_with_memory_constraints(
|
||||
strategy_configs=sample_strategy_configs,
|
||||
symbols=['BTC-USDT'],
|
||||
timeframe='1h',
|
||||
days_back=30,
|
||||
max_memory_mb=1000.0 # High limit
|
||||
)
|
||||
|
||||
# Should use parallel processing for sufficient memory
|
||||
mock_parallel.assert_called_once()
|
||||
|
||||
@patch('strategies.batch_processing.psutil')
|
||||
def test_process_with_memory_constraints_moderate_constraint(self, mock_psutil, processor, sample_strategy_configs):
|
||||
"""Test memory-constrained processing with moderate constraint."""
|
||||
# Mock moderate memory usage
|
||||
mock_process = MagicMock()
|
||||
mock_process.memory_info.return_value.rss = 400 * 1024 * 1024 # 400 MB
|
||||
mock_psutil.Process.return_value = mock_process
|
||||
|
||||
with patch.object(processor, 'process_strategies_batch') as mock_batch:
|
||||
mock_batch.return_value = {}
|
||||
|
||||
processor.process_with_memory_constraints(
|
||||
strategy_configs=sample_strategy_configs,
|
||||
symbols=['BTC-USDT'],
|
||||
timeframe='1h',
|
||||
days_back=30,
|
||||
max_memory_mb=500.0 # Moderate limit
|
||||
)
|
||||
|
||||
# Should use sequential batch processing
|
||||
mock_batch.assert_called_once()
|
||||
|
||||
@patch('strategies.batch_processing.psutil')
|
||||
def test_process_with_memory_constraints_severe_constraint(self, mock_psutil, processor, sample_strategy_configs):
|
||||
"""Test memory-constrained processing with severe constraint."""
|
||||
# Mock high memory usage
|
||||
mock_process = MagicMock()
|
||||
mock_process.memory_info.return_value.rss = 450 * 1024 * 1024 # 450 MB
|
||||
mock_psutil.Process.return_value = mock_process
|
||||
|
||||
with patch.object(processor, 'process_large_dataset_streaming_with_warmup') as mock_streaming, \
|
||||
patch.object(processor, 'aggregate_streaming_results') as mock_aggregate:
|
||||
|
||||
mock_streaming.return_value = iter([{}])
|
||||
mock_aggregate.return_value = {}
|
||||
|
||||
processor.process_with_memory_constraints(
|
||||
strategy_configs=sample_strategy_configs,
|
||||
symbols=['BTC-USDT'],
|
||||
timeframe='1h',
|
||||
days_back=30,
|
||||
max_memory_mb=500.0 # Low limit with high current usage
|
||||
)
|
||||
|
||||
# Should use streaming processing with warm-up
|
||||
mock_streaming.assert_called_once()
|
||||
mock_aggregate.assert_called_once()
|
||||
|
||||
def test_get_performance_metrics(self, processor):
|
||||
"""Test comprehensive performance metrics calculation."""
|
||||
# Set some test statistics
|
||||
processor._processing_stats.update({
|
||||
'strategies_processed': 5,
|
||||
'total_signals_generated': 25,
|
||||
'processing_time_seconds': 10.0,
|
||||
'memory_peak_mb': 500.0,
|
||||
'errors_count': 1,
|
||||
'validation_failures': 2
|
||||
})
|
||||
|
||||
with patch.object(processor.data_integrator, 'get_cache_stats') as mock_cache_stats:
|
||||
mock_cache_stats.return_value = {'cache_hits': 80, 'cache_misses': 20}
|
||||
|
||||
metrics = processor.get_performance_metrics()
|
||||
|
||||
assert 'cache_hit_rate' in metrics
|
||||
assert 'memory_efficiency' in metrics
|
||||
assert 'throughput_signals_per_second' in metrics
|
||||
assert 'parallel_efficiency' in metrics
|
||||
assert 'optimization_recommendations' in metrics
|
||||
|
||||
assert metrics['cache_hit_rate'] == 80.0 # 80/(80+20) * 100
|
||||
assert metrics['throughput_signals_per_second'] == 2.5 # 25/10
|
||||
|
||||
def test_calculate_cache_hit_rate(self, processor):
|
||||
"""Test cache hit rate calculation."""
|
||||
with patch.object(processor.data_integrator, 'get_cache_stats') as mock_cache_stats:
|
||||
mock_cache_stats.return_value = {'cache_hits': 70, 'cache_misses': 30}
|
||||
|
||||
hit_rate = processor._calculate_cache_hit_rate()
|
||||
assert hit_rate == 70.0 # 70/(70+30) * 100
|
||||
|
||||
def test_calculate_memory_efficiency(self, processor):
|
||||
"""Test memory efficiency calculation."""
|
||||
processor._processing_stats.update({
|
||||
'memory_peak_mb': 200.0,
|
||||
'strategies_processed': 2
|
||||
})
|
||||
|
||||
efficiency = processor._calculate_memory_efficiency()
|
||||
# 200MB / 2 strategies = 100MB per strategy
|
||||
# Baseline is 100MB, so efficiency should be 50%
|
||||
assert efficiency == 50.0
|
||||
|
||||
def test_generate_optimization_recommendations(self, processor):
|
||||
"""Test optimization recommendations generation."""
|
||||
# Set up poor performance metrics
|
||||
processor._processing_stats.update({
|
||||
'strategies_processed': 1,
|
||||
'total_signals_generated': 1,
|
||||
'processing_time_seconds': 10.0,
|
||||
'memory_peak_mb': 1000.0, # High memory usage
|
||||
'errors_count': 2, # High error rate
|
||||
'validation_failures': 0
|
||||
})
|
||||
|
||||
with patch.object(processor.data_integrator, 'get_cache_stats') as mock_cache_stats:
|
||||
mock_cache_stats.return_value = {'cache_hits': 1, 'cache_misses': 9} # Low cache hit rate
|
||||
|
||||
recommendations = processor._generate_optimization_recommendations()
|
||||
|
||||
assert isinstance(recommendations, list)
|
||||
assert len(recommendations) > 0
|
||||
# Should recommend memory efficiency improvement
|
||||
assert any('memory efficiency' in rec.lower() for rec in recommendations)
|
||||
|
||||
def test_optimize_configuration(self, processor):
|
||||
"""Test automatic configuration optimization."""
|
||||
# Set up metrics that indicate poor memory efficiency
|
||||
processor._processing_stats.update({
|
||||
'strategies_processed': 4,
|
||||
'total_signals_generated': 20,
|
||||
'processing_time_seconds': 8.0,
|
||||
'memory_peak_mb': 2000.0, # Very high memory usage
|
||||
'errors_count': 0,
|
||||
'validation_failures': 0
|
||||
})
|
||||
|
||||
with patch.object(processor.data_integrator, 'get_cache_stats') as mock_cache_stats:
|
||||
mock_cache_stats.return_value = {'cache_hits': 10, 'cache_misses': 90}
|
||||
|
||||
original_workers = processor.config.max_concurrent_strategies
|
||||
original_chunk_size = processor.config.chunk_size_days
|
||||
|
||||
optimized_config = processor.optimize_configuration()
|
||||
|
||||
# Should reduce workers and chunk size due to poor memory efficiency
|
||||
assert optimized_config.max_concurrent_strategies <= original_workers
|
||||
assert optimized_config.chunk_size_days <= original_chunk_size
|
||||
|
||||
def test_benchmark_processing_methods(self, processor, sample_strategy_configs):
|
||||
"""Test processing method benchmarking."""
|
||||
with patch.object(processor, 'process_strategies_batch') as mock_batch, \
|
||||
patch.object(processor, 'process_strategies_parallel') as mock_parallel:
|
||||
|
||||
# Mock batch processing results
|
||||
mock_batch.return_value = {'strategy1': []}
|
||||
|
||||
# Mock parallel processing results
|
||||
mock_parallel.return_value = {'strategy1': []}
|
||||
|
||||
benchmark_results = processor.benchmark_processing_methods(
|
||||
strategy_configs=sample_strategy_configs,
|
||||
symbols=['BTC-USDT'],
|
||||
timeframe='1h',
|
||||
days_back=7
|
||||
)
|
||||
|
||||
assert 'sequential' in benchmark_results
|
||||
assert 'parallel' in benchmark_results
|
||||
assert 'recommendation' in benchmark_results
|
||||
|
||||
# Verify both methods were called
|
||||
mock_batch.assert_called_once()
|
||||
mock_parallel.assert_called_once()
|
||||
|
||||
def test_reset_stats(self, processor):
|
||||
"""Test statistics reset functionality."""
|
||||
# Set some statistics
|
||||
processor._processing_stats.update({
|
||||
'strategies_processed': 5,
|
||||
'total_signals_generated': 25,
|
||||
'processing_time_seconds': 10.0
|
||||
})
|
||||
processor._result_cache['test'] = 'data'
|
||||
|
||||
processor._reset_stats()
|
||||
|
||||
# Verify all stats are reset
|
||||
assert processor._processing_stats['strategies_processed'] == 0
|
||||
assert processor._processing_stats['total_signals_generated'] == 0
|
||||
assert processor._processing_stats['processing_time_seconds'] == 0.0
|
||||
assert len(processor._result_cache) == 0
|
||||
|
||||
def test_calculate_warmup_period_ema_strategy(self, processor):
|
||||
"""Test warm-up period calculation for EMA strategy."""
|
||||
strategy_configs = [
|
||||
{
|
||||
'name': 'ema_crossover',
|
||||
'fast_period': 12,
|
||||
'slow_period': 26
|
||||
}
|
||||
]
|
||||
|
||||
warmup = processor._calculate_warmup_period(strategy_configs)
|
||||
|
||||
# Should be max(12, 26) + 10 safety buffer = 36
|
||||
assert warmup == 36
|
||||
|
||||
def test_calculate_warmup_period_macd_strategy(self, processor):
|
||||
"""Test warm-up period calculation for MACD strategy."""
|
||||
strategy_configs = [
|
||||
{
|
||||
'name': 'macd_trend',
|
||||
'slow_period': 26,
|
||||
'signal_period': 9
|
||||
}
|
||||
]
|
||||
|
||||
warmup = processor._calculate_warmup_period(strategy_configs)
|
||||
|
||||
# Should be max(26, 9) + 10 MACD buffer + 10 safety buffer = 46
|
||||
assert warmup == 46
|
||||
|
||||
def test_calculate_warmup_period_rsi_strategy(self, processor):
|
||||
"""Test warm-up period calculation for RSI strategy."""
|
||||
strategy_configs = [
|
||||
{
|
||||
'name': 'rsi_momentum',
|
||||
'period': 14
|
||||
}
|
||||
]
|
||||
|
||||
warmup = processor._calculate_warmup_period(strategy_configs)
|
||||
|
||||
# Should be 14 + 5 RSI buffer + 10 safety buffer = 29
|
||||
assert warmup == 29
|
||||
|
||||
def test_calculate_warmup_period_multiple_strategies(self, processor):
|
||||
"""Test warm-up period calculation with multiple strategies."""
|
||||
strategy_configs = [
|
||||
{'name': 'ema_crossover', 'slow_period': 26},
|
||||
{'name': 'rsi_momentum', 'period': 14},
|
||||
{'name': 'macd_trend', 'slow_period': 26, 'signal_period': 9}
|
||||
]
|
||||
|
||||
warmup = processor._calculate_warmup_period(strategy_configs)
|
||||
|
||||
# Should be max of all strategies: 46 (from MACD)
|
||||
assert warmup == 46
|
||||
|
||||
def test_calculate_warmup_period_unknown_strategy(self, processor):
|
||||
"""Test warm-up period calculation for unknown strategy type."""
|
||||
strategy_configs = [
|
||||
{
|
||||
'name': 'custom_strategy',
|
||||
'some_param': 100
|
||||
}
|
||||
]
|
||||
|
||||
warmup = processor._calculate_warmup_period(strategy_configs)
|
||||
|
||||
# Should be 30 default + 10 safety buffer = 40
|
||||
assert warmup == 40
|
||||
|
||||
def test_process_large_dataset_streaming_with_warmup(self, processor, sample_strategy_configs, sample_strategy_results):
|
||||
"""Test streaming processing with warm-up period handling."""
|
||||
# Mock the warm-up calculation
|
||||
with patch.object(processor, '_calculate_warmup_period') as mock_warmup:
|
||||
mock_warmup.return_value = 10 # 10 days warm-up
|
||||
|
||||
# Mock the parallel processing method
|
||||
with patch.object(processor, 'process_strategies_parallel') as mock_parallel:
|
||||
mock_parallel.return_value = {
|
||||
'test_strategy': sample_strategy_results
|
||||
}
|
||||
|
||||
# Mock the trimming method
|
||||
with patch.object(processor, '_trim_warmup_from_results') as mock_trim:
|
||||
mock_trim.return_value = {'test_strategy': sample_strategy_results}
|
||||
|
||||
# Test streaming with 60 days split into 30-day chunks
|
||||
stream = processor.process_large_dataset_streaming_with_warmup(
|
||||
strategy_configs=sample_strategy_configs,
|
||||
symbols=['BTC-USDT'],
|
||||
timeframe='1h',
|
||||
total_days_back=60 # Should create 2 chunks
|
||||
)
|
||||
|
||||
# Collect all chunks
|
||||
chunks = list(stream)
|
||||
|
||||
assert len(chunks) == 2 # 60 days / 30 days per chunk
|
||||
|
||||
# Verify parallel processing was called with correct parameters
|
||||
assert mock_parallel.call_count == 2
|
||||
|
||||
# First chunk should not have warm-up, second should
|
||||
first_call_args = mock_parallel.call_args_list[0]
|
||||
second_call_args = mock_parallel.call_args_list[1]
|
||||
|
||||
# First chunk: 30 days (no warm-up)
|
||||
assert first_call_args[1]['days_back'] == 30
|
||||
|
||||
# Second chunk: 30 + 10 warm-up = 40 days
|
||||
assert second_call_args[1]['days_back'] == 40
|
||||
|
||||
# Trimming should only be called for second chunk
|
||||
assert mock_trim.call_count == 1
|
||||
|
||||
def test_trim_warmup_from_results(self, processor, sample_strategy_results):
|
||||
"""Test trimming warm-up period from results."""
|
||||
# Create test results with multiple signals
|
||||
extended_results = sample_strategy_results * 10 # 10 results total
|
||||
chunk_results = {
|
||||
'strategy1': extended_results,
|
||||
'strategy2': sample_strategy_results * 5 # 5 results
|
||||
}
|
||||
|
||||
trimmed = processor._trim_warmup_from_results(
|
||||
chunk_results=chunk_results,
|
||||
warmup_days=10,
|
||||
target_start_days=30,
|
||||
target_end_days=60
|
||||
)
|
||||
|
||||
# Verify trimming occurred
|
||||
assert len(trimmed['strategy1']) <= len(extended_results)
|
||||
assert len(trimmed['strategy2']) <= len(sample_strategy_results * 5)
|
||||
|
||||
# Results should be sorted by timestamp
|
||||
for strategy_name, results in trimmed.items():
|
||||
if len(results) > 1:
|
||||
timestamps = [r.timestamp for r in results]
|
||||
assert timestamps == sorted(timestamps)
|
||||
|
||||
def test_streaming_with_warmup_chunk_size_adjustment(self, processor, sample_strategy_configs):
|
||||
"""Test automatic chunk size adjustment when too small for warm-up."""
|
||||
# Set up small chunk size relative to warm-up
|
||||
processor.config.chunk_size_days = 15 # Small chunk size
|
||||
|
||||
with patch.object(processor, '_calculate_warmup_period') as mock_warmup:
|
||||
mock_warmup.return_value = 30 # Large warm-up period
|
||||
|
||||
with patch.object(processor, 'process_strategies_parallel') as mock_parallel:
|
||||
mock_parallel.return_value = {}
|
||||
|
||||
# This should trigger chunk size adjustment
|
||||
stream = processor.process_large_dataset_streaming_with_warmup(
|
||||
strategy_configs=sample_strategy_configs,
|
||||
symbols=['BTC-USDT'],
|
||||
timeframe='1h',
|
||||
total_days_back=90
|
||||
)
|
||||
|
||||
# Consume the stream to trigger processing
|
||||
list(stream)
|
||||
|
||||
# Verify warning was logged about chunk size adjustment
|
||||
# (In a real implementation, you might want to capture log messages)
|
||||
1068
tests/strategies/test_data_integration.py
Normal file
1068
tests/strategies/test_data_integration.py
Normal file
File diff suppressed because it is too large
Load Diff
558
tests/strategies/test_realtime_execution.py
Normal file
558
tests/strategies/test_realtime_execution.py
Normal file
@@ -0,0 +1,558 @@
|
||||
"""
|
||||
Tests for real-time strategy execution pipeline.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import pandas as pd
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from unittest.mock import Mock, patch, MagicMock
|
||||
import time
|
||||
from queue import Queue, Empty
|
||||
import threading
|
||||
|
||||
from strategies.realtime_execution import (
|
||||
RealTimeStrategyProcessor,
|
||||
StrategySignalBroadcaster,
|
||||
RealTimeConfig,
|
||||
StrategyExecutionContext,
|
||||
RealTimeSignal,
|
||||
get_realtime_strategy_processor,
|
||||
initialize_realtime_strategy_system,
|
||||
shutdown_realtime_strategy_system
|
||||
)
|
||||
from strategies.data_types import StrategyResult, StrategySignal, SignalType
|
||||
from data.common.data_types import OHLCVCandle
|
||||
|
||||
|
||||
class TestRealTimeConfig:
|
||||
"""Test RealTimeConfig dataclass."""
|
||||
|
||||
def test_default_config(self):
|
||||
"""Test default configuration values."""
|
||||
config = RealTimeConfig()
|
||||
|
||||
assert config.refresh_interval_seconds == 30
|
||||
assert config.max_strategies_concurrent == 5
|
||||
assert config.incremental_calculation == True
|
||||
assert config.signal_batch_size == 100
|
||||
assert config.enable_signal_broadcasting == True
|
||||
assert config.max_signal_queue_size == 1000
|
||||
assert config.chart_update_throttle_ms == 1000
|
||||
assert config.error_retry_attempts == 3
|
||||
assert config.error_retry_delay_seconds == 5
|
||||
|
||||
def test_custom_config(self):
|
||||
"""Test custom configuration values."""
|
||||
config = RealTimeConfig(
|
||||
refresh_interval_seconds=15,
|
||||
max_strategies_concurrent=3,
|
||||
incremental_calculation=False,
|
||||
signal_batch_size=50
|
||||
)
|
||||
|
||||
assert config.refresh_interval_seconds == 15
|
||||
assert config.max_strategies_concurrent == 3
|
||||
assert config.incremental_calculation == False
|
||||
assert config.signal_batch_size == 50
|
||||
|
||||
|
||||
class TestStrategyExecutionContext:
|
||||
"""Test StrategyExecutionContext dataclass."""
|
||||
|
||||
def test_context_creation(self):
|
||||
"""Test strategy execution context creation."""
|
||||
context = StrategyExecutionContext(
|
||||
strategy_name="ema_crossover",
|
||||
strategy_config={"short_period": 12, "long_period": 26},
|
||||
symbol="BTC-USDT",
|
||||
timeframe="1h"
|
||||
)
|
||||
|
||||
assert context.strategy_name == "ema_crossover"
|
||||
assert context.strategy_config == {"short_period": 12, "long_period": 26}
|
||||
assert context.symbol == "BTC-USDT"
|
||||
assert context.timeframe == "1h"
|
||||
assert context.exchange == "okx"
|
||||
assert context.last_calculation_time is None
|
||||
assert context.consecutive_errors == 0
|
||||
assert context.is_active == True
|
||||
|
||||
def test_context_with_custom_exchange(self):
|
||||
"""Test context with custom exchange."""
|
||||
context = StrategyExecutionContext(
|
||||
strategy_name="rsi",
|
||||
strategy_config={"period": 14},
|
||||
symbol="ETH-USDT",
|
||||
timeframe="4h",
|
||||
exchange="binance"
|
||||
)
|
||||
|
||||
assert context.exchange == "binance"
|
||||
|
||||
|
||||
class TestRealTimeSignal:
|
||||
"""Test RealTimeSignal dataclass."""
|
||||
|
||||
def test_signal_creation(self):
|
||||
"""Test real-time signal creation."""
|
||||
# Create mock strategy result
|
||||
strategy_result = Mock(spec=StrategyResult)
|
||||
strategy_result.timestamp = datetime.now(timezone.utc)
|
||||
strategy_result.confidence = 0.8
|
||||
|
||||
# Create context
|
||||
context = StrategyExecutionContext(
|
||||
strategy_name="macd",
|
||||
strategy_config={"fast_period": 12},
|
||||
symbol="BTC-USDT",
|
||||
timeframe="1d"
|
||||
)
|
||||
|
||||
# Create signal
|
||||
signal = RealTimeSignal(
|
||||
strategy_result=strategy_result,
|
||||
context=context
|
||||
)
|
||||
|
||||
assert signal.strategy_result == strategy_result
|
||||
assert signal.context == context
|
||||
assert signal.chart_update_required == True
|
||||
assert isinstance(signal.generation_time, datetime)
|
||||
|
||||
|
||||
class TestStrategySignalBroadcaster:
|
||||
"""Test StrategySignalBroadcaster class."""
|
||||
|
||||
@pytest.fixture
|
||||
def config(self):
|
||||
"""Test configuration."""
|
||||
return RealTimeConfig(
|
||||
signal_batch_size=5,
|
||||
max_signal_queue_size=10,
|
||||
chart_update_throttle_ms=100
|
||||
)
|
||||
|
||||
@pytest.fixture
|
||||
def mock_db_ops(self):
|
||||
"""Mock database operations."""
|
||||
with patch('strategies.realtime_execution.get_database_operations') as mock:
|
||||
db_ops = Mock()
|
||||
db_ops.strategy = Mock()
|
||||
db_ops.strategy.store_signals_batch = Mock(return_value=5)
|
||||
mock.return_value = db_ops
|
||||
yield db_ops
|
||||
|
||||
@pytest.fixture
|
||||
def broadcaster(self, config, mock_db_ops):
|
||||
"""Create broadcaster instance."""
|
||||
return StrategySignalBroadcaster(config)
|
||||
|
||||
def test_broadcaster_initialization(self, broadcaster, config):
|
||||
"""Test broadcaster initialization."""
|
||||
assert broadcaster.config == config
|
||||
assert broadcaster._is_running == False
|
||||
assert broadcaster._chart_update_callback is None
|
||||
|
||||
def test_start_stop_broadcaster(self, broadcaster):
|
||||
"""Test starting and stopping broadcaster."""
|
||||
assert not broadcaster._is_running
|
||||
|
||||
broadcaster.start()
|
||||
assert broadcaster._is_running
|
||||
assert broadcaster._processing_thread is not None
|
||||
|
||||
broadcaster.stop()
|
||||
assert not broadcaster._is_running
|
||||
|
||||
def test_broadcast_signal(self, broadcaster):
|
||||
"""Test broadcasting signals."""
|
||||
# Create test signal
|
||||
strategy_result = Mock(spec=StrategyResult)
|
||||
context = StrategyExecutionContext(
|
||||
strategy_name="test",
|
||||
strategy_config={},
|
||||
symbol="BTC-USDT",
|
||||
timeframe="1h"
|
||||
)
|
||||
signal = RealTimeSignal(strategy_result=strategy_result, context=context)
|
||||
|
||||
# Broadcast signal
|
||||
success = broadcaster.broadcast_signal(signal)
|
||||
assert success == True
|
||||
|
||||
# Check queue has signal
|
||||
assert broadcaster._signal_queue.qsize() == 1
|
||||
|
||||
def test_broadcast_signal_queue_full(self, config, mock_db_ops):
|
||||
"""Test broadcasting when queue is full."""
|
||||
# Create broadcaster with very small queue
|
||||
small_config = RealTimeConfig(max_signal_queue_size=1)
|
||||
broadcaster = StrategySignalBroadcaster(small_config)
|
||||
|
||||
# Create test signals
|
||||
strategy_result = Mock(spec=StrategyResult)
|
||||
context = StrategyExecutionContext(
|
||||
strategy_name="test",
|
||||
strategy_config={},
|
||||
symbol="BTC-USDT",
|
||||
timeframe="1h"
|
||||
)
|
||||
signal1 = RealTimeSignal(strategy_result=strategy_result, context=context)
|
||||
signal2 = RealTimeSignal(strategy_result=strategy_result, context=context)
|
||||
|
||||
# Fill queue
|
||||
success1 = broadcaster.broadcast_signal(signal1)
|
||||
assert success1 == True
|
||||
|
||||
# Try to overfill queue
|
||||
success2 = broadcaster.broadcast_signal(signal2)
|
||||
assert success2 == False # Should fail due to full queue
|
||||
|
||||
def test_set_chart_update_callback(self, broadcaster):
|
||||
"""Test setting chart update callback."""
|
||||
callback = Mock()
|
||||
broadcaster.set_chart_update_callback(callback)
|
||||
assert broadcaster._chart_update_callback == callback
|
||||
|
||||
def test_get_signal_stats(self, broadcaster):
|
||||
"""Test getting signal statistics."""
|
||||
stats = broadcaster.get_signal_stats()
|
||||
|
||||
assert 'queue_size' in stats
|
||||
assert 'chart_queue_size' in stats
|
||||
assert 'is_running' in stats
|
||||
assert 'last_chart_updates' in stats
|
||||
assert stats['is_running'] == False
|
||||
|
||||
|
||||
class TestRealTimeStrategyProcessor:
|
||||
"""Test RealTimeStrategyProcessor class."""
|
||||
|
||||
@pytest.fixture
|
||||
def config(self):
|
||||
"""Test configuration."""
|
||||
return RealTimeConfig(
|
||||
max_strategies_concurrent=2,
|
||||
error_retry_attempts=2
|
||||
)
|
||||
|
||||
@pytest.fixture
|
||||
def mock_dependencies(self):
|
||||
"""Mock all external dependencies."""
|
||||
mocks = {}
|
||||
|
||||
with patch('strategies.realtime_execution.StrategyDataIntegrator') as mock_integrator:
|
||||
mocks['data_integrator'] = Mock()
|
||||
mock_integrator.return_value = mocks['data_integrator']
|
||||
|
||||
with patch('strategies.realtime_execution.MarketDataIntegrator') as mock_market:
|
||||
mocks['market_integrator'] = Mock()
|
||||
mock_market.return_value = mocks['market_integrator']
|
||||
|
||||
with patch('strategies.realtime_execution.StrategyFactory') as mock_factory:
|
||||
mocks['strategy_factory'] = Mock()
|
||||
mock_factory.return_value = mocks['strategy_factory']
|
||||
|
||||
yield mocks
|
||||
|
||||
@pytest.fixture
|
||||
def processor(self, config, mock_dependencies):
|
||||
"""Create processor instance."""
|
||||
return RealTimeStrategyProcessor(config)
|
||||
|
||||
def test_processor_initialization(self, processor, config):
|
||||
"""Test processor initialization."""
|
||||
assert processor.config == config
|
||||
assert processor._execution_contexts == {}
|
||||
assert processor._performance_stats['total_calculations'] == 0
|
||||
|
||||
def test_start_stop_processor(self, processor):
|
||||
"""Test starting and stopping processor."""
|
||||
processor.start()
|
||||
assert processor.signal_broadcaster._is_running == True
|
||||
|
||||
processor.stop()
|
||||
assert processor.signal_broadcaster._is_running == False
|
||||
|
||||
def test_register_strategy(self, processor):
|
||||
"""Test registering strategy for real-time execution."""
|
||||
context_id = processor.register_strategy(
|
||||
strategy_name="ema_crossover",
|
||||
strategy_config={"short_period": 12, "long_period": 26},
|
||||
symbol="BTC-USDT",
|
||||
timeframe="1h"
|
||||
)
|
||||
|
||||
expected_id = "ema_crossover_BTC-USDT_1h_okx"
|
||||
assert context_id == expected_id
|
||||
assert context_id in processor._execution_contexts
|
||||
|
||||
context = processor._execution_contexts[context_id]
|
||||
assert context.strategy_name == "ema_crossover"
|
||||
assert context.symbol == "BTC-USDT"
|
||||
assert context.timeframe == "1h"
|
||||
assert context.is_active == True
|
||||
|
||||
def test_unregister_strategy(self, processor):
|
||||
"""Test unregistering strategy."""
|
||||
# Register first
|
||||
context_id = processor.register_strategy(
|
||||
strategy_name="rsi",
|
||||
strategy_config={"period": 14},
|
||||
symbol="ETH-USDT",
|
||||
timeframe="4h"
|
||||
)
|
||||
|
||||
assert context_id in processor._execution_contexts
|
||||
|
||||
# Unregister
|
||||
success = processor.unregister_strategy(context_id)
|
||||
assert success == True
|
||||
assert context_id not in processor._execution_contexts
|
||||
|
||||
# Try to unregister again
|
||||
success2 = processor.unregister_strategy(context_id)
|
||||
assert success2 == False
|
||||
|
||||
def test_execute_realtime_update_no_strategies(self, processor):
|
||||
"""Test real-time update with no registered strategies."""
|
||||
signals = processor.execute_realtime_update("BTC-USDT", "1h")
|
||||
assert signals == []
|
||||
|
||||
def test_execute_realtime_update_with_strategies(self, processor, mock_dependencies):
|
||||
"""Test real-time update with registered strategies."""
|
||||
# Mock strategy calculation results
|
||||
mock_result = Mock(spec=StrategyResult)
|
||||
mock_result.timestamp = datetime.now(timezone.utc)
|
||||
mock_result.confidence = 0.8
|
||||
|
||||
mock_dependencies['data_integrator'].calculate_strategy_signals.return_value = [mock_result]
|
||||
|
||||
# Register strategy
|
||||
processor.register_strategy(
|
||||
strategy_name="ema_crossover",
|
||||
strategy_config={"short_period": 12, "long_period": 26},
|
||||
symbol="BTC-USDT",
|
||||
timeframe="1h"
|
||||
)
|
||||
|
||||
# Execute update
|
||||
signals = processor.execute_realtime_update("BTC-USDT", "1h")
|
||||
|
||||
assert len(signals) == 1
|
||||
assert isinstance(signals[0], RealTimeSignal)
|
||||
assert signals[0].strategy_result == mock_result
|
||||
|
||||
def test_get_active_strategies(self, processor):
|
||||
"""Test getting active strategies."""
|
||||
# Register some strategies
|
||||
processor.register_strategy("ema", {}, "BTC-USDT", "1h")
|
||||
processor.register_strategy("rsi", {}, "ETH-USDT", "4h")
|
||||
|
||||
active = processor.get_active_strategies()
|
||||
assert len(active) == 2
|
||||
|
||||
# Pause one strategy
|
||||
context_id = list(active.keys())[0]
|
||||
processor.pause_strategy(context_id)
|
||||
|
||||
active_after_pause = processor.get_active_strategies()
|
||||
assert len(active_after_pause) == 1
|
||||
|
||||
def test_pause_resume_strategy(self, processor):
|
||||
"""Test pausing and resuming strategies."""
|
||||
context_id = processor.register_strategy("macd", {}, "BTC-USDT", "1d")
|
||||
|
||||
# Pause strategy
|
||||
success = processor.pause_strategy(context_id)
|
||||
assert success == True
|
||||
assert not processor._execution_contexts[context_id].is_active
|
||||
|
||||
# Resume strategy
|
||||
success = processor.resume_strategy(context_id)
|
||||
assert success == True
|
||||
assert processor._execution_contexts[context_id].is_active
|
||||
|
||||
# Test with invalid context_id
|
||||
invalid_success = processor.pause_strategy("invalid_id")
|
||||
assert invalid_success == False
|
||||
|
||||
def test_get_performance_stats(self, processor):
|
||||
"""Test getting performance statistics."""
|
||||
stats = processor.get_performance_stats()
|
||||
|
||||
assert 'total_calculations' in stats
|
||||
assert 'successful_calculations' in stats
|
||||
assert 'failed_calculations' in stats
|
||||
assert 'average_calculation_time_ms' in stats
|
||||
assert 'signals_generated' in stats
|
||||
assert 'queue_size' in stats # From signal broadcaster
|
||||
|
||||
|
||||
class TestSingletonAndInitialization:
|
||||
"""Test singleton pattern and system initialization."""
|
||||
|
||||
def test_get_realtime_strategy_processor_singleton(self):
|
||||
"""Test that processor is singleton."""
|
||||
# Clean up any existing processor
|
||||
shutdown_realtime_strategy_system()
|
||||
|
||||
processor1 = get_realtime_strategy_processor()
|
||||
processor2 = get_realtime_strategy_processor()
|
||||
|
||||
assert processor1 is processor2
|
||||
|
||||
# Clean up
|
||||
shutdown_realtime_strategy_system()
|
||||
|
||||
def test_initialize_realtime_strategy_system(self):
|
||||
"""Test system initialization."""
|
||||
# Clean up any existing processor
|
||||
shutdown_realtime_strategy_system()
|
||||
|
||||
config = RealTimeConfig(max_strategies_concurrent=2)
|
||||
processor = initialize_realtime_strategy_system(config)
|
||||
|
||||
assert processor is not None
|
||||
assert processor.signal_broadcaster._is_running == True
|
||||
|
||||
# Clean up
|
||||
shutdown_realtime_strategy_system()
|
||||
|
||||
def test_shutdown_realtime_strategy_system(self):
|
||||
"""Test system shutdown."""
|
||||
# Initialize system
|
||||
processor = initialize_realtime_strategy_system()
|
||||
assert processor.signal_broadcaster._is_running == True
|
||||
|
||||
# Shutdown
|
||||
shutdown_realtime_strategy_system()
|
||||
|
||||
# Verify shutdown
|
||||
# Note: After shutdown, the global processor is set to None
|
||||
# So we can't check the processor state, but we can verify
|
||||
# a new processor is created on next call
|
||||
new_processor = get_realtime_strategy_processor()
|
||||
assert new_processor is not None
|
||||
|
||||
|
||||
class TestIntegration:
|
||||
"""Integration tests for real-time execution pipeline."""
|
||||
|
||||
@pytest.fixture
|
||||
def integration_config(self):
|
||||
"""Configuration for integration tests."""
|
||||
return RealTimeConfig(
|
||||
signal_batch_size=2,
|
||||
max_signal_queue_size=5,
|
||||
chart_update_throttle_ms=50
|
||||
)
|
||||
|
||||
def test_end_to_end_signal_flow(self, integration_config):
|
||||
"""Test complete signal flow from strategy to storage."""
|
||||
with patch('strategies.realtime_execution.get_database_operations') as mock_db:
|
||||
# Setup mocks
|
||||
db_ops = Mock()
|
||||
db_ops.strategy = Mock()
|
||||
db_ops.strategy.store_signals_batch = Mock(return_value=2)
|
||||
mock_db.return_value = db_ops
|
||||
|
||||
# Create processor
|
||||
processor = RealTimeStrategyProcessor(integration_config)
|
||||
processor.start()
|
||||
|
||||
try:
|
||||
# Mock strategy calculation
|
||||
mock_result = Mock(spec=StrategyResult)
|
||||
mock_result.timestamp = datetime.now(timezone.utc)
|
||||
mock_result.confidence = 0.8
|
||||
mock_result.signal = Mock()
|
||||
mock_result.signal.signal_type = SignalType.BUY
|
||||
mock_result.price = 50000.0
|
||||
mock_result.metadata = {"test": True}
|
||||
|
||||
with patch.object(processor.data_integrator, 'calculate_strategy_signals') as mock_calc:
|
||||
mock_calc.return_value = [mock_result]
|
||||
|
||||
# Register strategy
|
||||
processor.register_strategy(
|
||||
strategy_name="test_strategy",
|
||||
strategy_config={"param": "value"},
|
||||
symbol="BTC-USDT",
|
||||
timeframe="1h"
|
||||
)
|
||||
|
||||
# Execute real-time update
|
||||
signals = processor.execute_realtime_update("BTC-USDT", "1h")
|
||||
|
||||
assert len(signals) == 1
|
||||
|
||||
# Wait for signal processing
|
||||
time.sleep(0.2) # Allow background processing
|
||||
|
||||
# Verify calculation was called
|
||||
mock_calc.assert_called_once()
|
||||
|
||||
finally:
|
||||
processor.stop()
|
||||
|
||||
def test_error_handling_and_retry(self, integration_config):
|
||||
"""Test error handling and retry mechanisms."""
|
||||
processor = RealTimeStrategyProcessor(integration_config)
|
||||
processor.start()
|
||||
|
||||
try:
|
||||
# Mock strategy calculation to raise error
|
||||
with patch.object(processor.data_integrator, 'calculate_strategy_signals') as mock_calc:
|
||||
mock_calc.side_effect = Exception("Test error")
|
||||
|
||||
# Register strategy
|
||||
context_id = processor.register_strategy(
|
||||
strategy_name="error_strategy",
|
||||
strategy_config={},
|
||||
symbol="BTC-USDT",
|
||||
timeframe="1h"
|
||||
)
|
||||
|
||||
# Execute multiple times to trigger error handling
|
||||
for _ in range(integration_config.error_retry_attempts + 1):
|
||||
processor.execute_realtime_update("BTC-USDT", "1h")
|
||||
|
||||
# Strategy should be disabled after max errors
|
||||
context = processor._execution_contexts[context_id]
|
||||
assert not context.is_active
|
||||
assert context.consecutive_errors >= integration_config.error_retry_attempts
|
||||
|
||||
finally:
|
||||
processor.stop()
|
||||
|
||||
def test_concurrent_strategy_execution(self, integration_config):
|
||||
"""Test concurrent execution of multiple strategies."""
|
||||
processor = RealTimeStrategyProcessor(integration_config)
|
||||
processor.start()
|
||||
|
||||
try:
|
||||
# Mock strategy calculations
|
||||
mock_result1 = Mock(spec=StrategyResult)
|
||||
mock_result1.timestamp = datetime.now(timezone.utc)
|
||||
mock_result1.confidence = 0.7
|
||||
|
||||
mock_result2 = Mock(spec=StrategyResult)
|
||||
mock_result2.timestamp = datetime.now(timezone.utc)
|
||||
mock_result2.confidence = 0.9
|
||||
|
||||
with patch.object(processor.data_integrator, 'calculate_strategy_signals') as mock_calc:
|
||||
mock_calc.side_effect = [[mock_result1], [mock_result2]]
|
||||
|
||||
# Register multiple strategies for same symbol/timeframe
|
||||
processor.register_strategy("strategy1", {}, "BTC-USDT", "1h")
|
||||
processor.register_strategy("strategy2", {}, "BTC-USDT", "1h")
|
||||
|
||||
# Execute update
|
||||
signals = processor.execute_realtime_update("BTC-USDT", "1h")
|
||||
|
||||
# Should get signals from both strategies
|
||||
assert len(signals) == 2
|
||||
|
||||
finally:
|
||||
processor.stop()
|
||||
478
tests/strategies/test_validation.py
Normal file
478
tests/strategies/test_validation.py
Normal file
@@ -0,0 +1,478 @@
|
||||
"""
|
||||
Tests for Strategy Signal Validation Pipeline
|
||||
|
||||
This module tests signal validation, filtering, and quality assessment
|
||||
functionality for strategy-generated signals.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from datetime import datetime, timezone
|
||||
from unittest.mock import patch
|
||||
|
||||
from strategies.validation import StrategySignalValidator, ValidationConfig
|
||||
from strategies.data_types import StrategySignal, SignalType
|
||||
|
||||
|
||||
class TestValidationConfig:
|
||||
"""Tests for ValidationConfig dataclass."""
|
||||
|
||||
def test_default_config(self):
|
||||
"""Test default validation configuration."""
|
||||
config = ValidationConfig()
|
||||
|
||||
assert config.min_confidence == 0.0
|
||||
assert config.max_confidence == 1.0
|
||||
assert config.required_metadata_fields == []
|
||||
assert config.allowed_signal_types == list(SignalType)
|
||||
assert config.price_tolerance_percent == 5.0
|
||||
|
||||
def test_custom_config(self):
|
||||
"""Test custom validation configuration."""
|
||||
config = ValidationConfig(
|
||||
min_confidence=0.3,
|
||||
max_confidence=0.9,
|
||||
required_metadata_fields=['indicator1', 'indicator2'],
|
||||
allowed_signal_types=[SignalType.BUY, SignalType.SELL],
|
||||
price_tolerance_percent=2.0
|
||||
)
|
||||
|
||||
assert config.min_confidence == 0.3
|
||||
assert config.max_confidence == 0.9
|
||||
assert config.required_metadata_fields == ['indicator1', 'indicator2']
|
||||
assert config.allowed_signal_types == [SignalType.BUY, SignalType.SELL]
|
||||
assert config.price_tolerance_percent == 2.0
|
||||
|
||||
|
||||
class TestStrategySignalValidator:
|
||||
"""Tests for StrategySignalValidator class."""
|
||||
|
||||
@pytest.fixture
|
||||
def validator(self):
|
||||
"""Create validator with default configuration."""
|
||||
return StrategySignalValidator()
|
||||
|
||||
@pytest.fixture
|
||||
def strict_validator(self):
|
||||
"""Create validator with strict configuration."""
|
||||
config = ValidationConfig(
|
||||
min_confidence=0.5,
|
||||
max_confidence=1.0,
|
||||
required_metadata_fields=['rsi', 'macd'],
|
||||
allowed_signal_types=[SignalType.BUY, SignalType.SELL]
|
||||
)
|
||||
return StrategySignalValidator(config)
|
||||
|
||||
@pytest.fixture
|
||||
def valid_signal(self):
|
||||
"""Create a valid strategy signal for testing."""
|
||||
return StrategySignal(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='BTC-USDT',
|
||||
timeframe='1h',
|
||||
signal_type=SignalType.BUY,
|
||||
price=50000.0,
|
||||
confidence=0.8,
|
||||
metadata={'rsi': 30, 'macd': 0.05}
|
||||
)
|
||||
|
||||
def test_initialization(self, validator):
|
||||
"""Test validator initialization."""
|
||||
assert validator.config is not None
|
||||
assert validator.logger is not None
|
||||
assert validator._validation_stats['total_signals_validated'] == 0
|
||||
assert validator._validation_stats['valid_signals'] == 0
|
||||
assert validator._validation_stats['invalid_signals'] == 0
|
||||
|
||||
def test_validate_valid_signal(self, validator, valid_signal):
|
||||
"""Test validation of a completely valid signal."""
|
||||
is_valid, errors = validator.validate_signal(valid_signal)
|
||||
|
||||
assert is_valid is True
|
||||
assert errors == []
|
||||
assert validator._validation_stats['total_signals_validated'] == 1
|
||||
assert validator._validation_stats['valid_signals'] == 1
|
||||
assert validator._validation_stats['invalid_signals'] == 0
|
||||
|
||||
def test_validate_invalid_confidence_low(self, validator, valid_signal):
|
||||
"""Test validation with confidence too low."""
|
||||
valid_signal.confidence = -0.1
|
||||
|
||||
is_valid, errors = validator.validate_signal(valid_signal)
|
||||
|
||||
assert is_valid is False
|
||||
assert len(errors) == 1
|
||||
assert "Invalid confidence" in errors[0]
|
||||
assert validator._validation_stats['invalid_signals'] == 1
|
||||
|
||||
def test_validate_invalid_confidence_high(self, validator, valid_signal):
|
||||
"""Test validation with confidence too high."""
|
||||
valid_signal.confidence = 1.5
|
||||
|
||||
is_valid, errors = validator.validate_signal(valid_signal)
|
||||
|
||||
assert is_valid is False
|
||||
assert len(errors) == 1
|
||||
assert "Invalid confidence" in errors[0]
|
||||
|
||||
def test_validate_invalid_signal_type(self, strict_validator, valid_signal):
|
||||
"""Test validation with disallowed signal type."""
|
||||
valid_signal.signal_type = SignalType.HOLD
|
||||
|
||||
is_valid, errors = strict_validator.validate_signal(valid_signal)
|
||||
|
||||
assert is_valid is False
|
||||
assert len(errors) == 1
|
||||
assert "Signal type" in errors[0] and "not in allowed types" in errors[0]
|
||||
|
||||
def test_validate_invalid_price(self, validator, valid_signal):
|
||||
"""Test validation with invalid price."""
|
||||
valid_signal.price = -100.0
|
||||
|
||||
is_valid, errors = validator.validate_signal(valid_signal)
|
||||
|
||||
assert is_valid is False
|
||||
assert len(errors) == 1
|
||||
assert "Invalid price" in errors[0]
|
||||
|
||||
def test_validate_missing_required_metadata(self, strict_validator, valid_signal):
|
||||
"""Test validation with missing required metadata."""
|
||||
valid_signal.metadata = {'rsi': 30} # Missing 'macd'
|
||||
|
||||
is_valid, errors = strict_validator.validate_signal(valid_signal)
|
||||
|
||||
assert is_valid is False
|
||||
assert len(errors) == 1
|
||||
assert "Missing required metadata fields" in errors[0]
|
||||
assert "macd" in errors[0]
|
||||
|
||||
def test_validate_multiple_errors(self, strict_validator, valid_signal):
|
||||
"""Test validation with multiple errors."""
|
||||
valid_signal.confidence = 1.5 # Too high
|
||||
valid_signal.price = -100.0 # Invalid
|
||||
valid_signal.signal_type = SignalType.HOLD # Not allowed
|
||||
valid_signal.metadata = {} # Missing required fields
|
||||
|
||||
is_valid, errors = strict_validator.validate_signal(valid_signal)
|
||||
|
||||
assert is_valid is False
|
||||
assert len(errors) == 4
|
||||
assert any("confidence" in error for error in errors)
|
||||
assert any("price" in error for error in errors)
|
||||
assert any("Signal type" in error for error in errors)
|
||||
assert any("Missing required metadata" in error for error in errors)
|
||||
|
||||
def test_validation_statistics_tracking(self, validator, valid_signal):
|
||||
"""Test that validation statistics are properly tracked."""
|
||||
# Validate multiple signals
|
||||
validator.validate_signal(valid_signal) # Valid
|
||||
|
||||
invalid_signal = valid_signal
|
||||
invalid_signal.confidence = 1.5 # Invalid
|
||||
validator.validate_signal(invalid_signal) # Invalid
|
||||
|
||||
stats = validator._validation_stats
|
||||
assert stats['total_signals_validated'] == 2
|
||||
assert stats['valid_signals'] == 1
|
||||
assert stats['invalid_signals'] == 1
|
||||
assert len(stats['validation_errors']) > 0
|
||||
|
||||
def test_validate_signals_batch(self, validator, valid_signal):
|
||||
"""Test batch validation of multiple signals."""
|
||||
# Create a mix of valid and invalid signals
|
||||
signals = [
|
||||
valid_signal, # Valid
|
||||
StrategySignal( # Invalid confidence
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='ETH-USDT',
|
||||
timeframe='1h',
|
||||
signal_type=SignalType.SELL,
|
||||
price=3000.0,
|
||||
confidence=1.5, # Invalid
|
||||
metadata={}
|
||||
),
|
||||
StrategySignal( # Valid
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='BNB-USDT',
|
||||
timeframe='1h',
|
||||
signal_type=SignalType.BUY,
|
||||
price=300.0,
|
||||
confidence=0.7,
|
||||
metadata={}
|
||||
)
|
||||
]
|
||||
|
||||
valid_signals, invalid_signals = validator.validate_signals_batch(signals)
|
||||
|
||||
assert len(valid_signals) == 2
|
||||
assert len(invalid_signals) == 1
|
||||
assert invalid_signals[0].confidence == 1.5
|
||||
|
||||
def test_filter_signals_by_confidence(self, validator, valid_signal):
|
||||
"""Test filtering signals by confidence threshold."""
|
||||
signals = [
|
||||
valid_signal, # confidence 0.8
|
||||
StrategySignal(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='ETH-USDT',
|
||||
timeframe='1h',
|
||||
signal_type=SignalType.SELL,
|
||||
price=3000.0,
|
||||
confidence=0.3, # Low confidence
|
||||
metadata={}
|
||||
),
|
||||
StrategySignal(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='BNB-USDT',
|
||||
timeframe='1h',
|
||||
signal_type=SignalType.BUY,
|
||||
price=300.0,
|
||||
confidence=0.9, # High confidence
|
||||
metadata={}
|
||||
)
|
||||
]
|
||||
|
||||
# Filter with threshold 0.5
|
||||
filtered_signals = validator.filter_signals_by_confidence(signals, min_confidence=0.5)
|
||||
|
||||
assert len(filtered_signals) == 2
|
||||
assert all(signal.confidence >= 0.5 for signal in filtered_signals)
|
||||
assert filtered_signals[0].confidence == 0.8
|
||||
assert filtered_signals[1].confidence == 0.9
|
||||
|
||||
def test_filter_signals_by_type(self, validator, valid_signal):
|
||||
"""Test filtering signals by allowed types."""
|
||||
signals = [
|
||||
valid_signal, # BUY
|
||||
StrategySignal(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='ETH-USDT',
|
||||
timeframe='1h',
|
||||
signal_type=SignalType.SELL,
|
||||
price=3000.0,
|
||||
confidence=0.8,
|
||||
metadata={}
|
||||
),
|
||||
StrategySignal(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='BNB-USDT',
|
||||
timeframe='1h',
|
||||
signal_type=SignalType.HOLD,
|
||||
price=300.0,
|
||||
confidence=0.7,
|
||||
metadata={}
|
||||
)
|
||||
]
|
||||
|
||||
# Filter to only allow BUY and SELL
|
||||
filtered_signals = validator.filter_signals_by_type(
|
||||
signals,
|
||||
allowed_types=[SignalType.BUY, SignalType.SELL]
|
||||
)
|
||||
|
||||
assert len(filtered_signals) == 2
|
||||
assert filtered_signals[0].signal_type == SignalType.BUY
|
||||
assert filtered_signals[1].signal_type == SignalType.SELL
|
||||
|
||||
def test_get_validation_statistics(self, validator, valid_signal):
|
||||
"""Test comprehensive validation statistics."""
|
||||
# Validate some signals to generate statistics
|
||||
validator.validate_signal(valid_signal) # Valid
|
||||
|
||||
invalid_signal = valid_signal
|
||||
invalid_signal.confidence = -0.1 # Invalid
|
||||
validator.validate_signal(invalid_signal) # Invalid
|
||||
|
||||
stats = validator.get_validation_statistics()
|
||||
|
||||
assert stats['total_signals_validated'] == 2
|
||||
assert stats['valid_signals'] == 1
|
||||
assert stats['invalid_signals'] == 1
|
||||
assert stats['validation_success_rate'] == 0.5
|
||||
assert stats['validation_failure_rate'] == 0.5
|
||||
assert 'validation_errors' in stats
|
||||
|
||||
def test_transform_signal_confidence(self, validator, valid_signal):
|
||||
"""Test signal confidence transformation."""
|
||||
original_confidence = valid_signal.confidence # 0.8
|
||||
|
||||
# Test confidence multiplier
|
||||
transformed_signal = validator.transform_signal_confidence(
|
||||
valid_signal,
|
||||
confidence_multiplier=1.2
|
||||
)
|
||||
|
||||
assert transformed_signal.confidence == original_confidence * 1.2
|
||||
assert transformed_signal.symbol == valid_signal.symbol
|
||||
assert transformed_signal.signal_type == valid_signal.signal_type
|
||||
assert transformed_signal.price == valid_signal.price
|
||||
|
||||
# Test confidence cap
|
||||
capped_signal = validator.transform_signal_confidence(
|
||||
valid_signal,
|
||||
confidence_multiplier=2.0, # Would exceed 1.0
|
||||
max_confidence=1.0
|
||||
)
|
||||
|
||||
assert capped_signal.confidence == 1.0 # Capped at max
|
||||
|
||||
def test_enrich_signal_metadata(self, validator, valid_signal):
|
||||
"""Test signal metadata enrichment."""
|
||||
additional_metadata = {
|
||||
'validation_timestamp': datetime.now(timezone.utc).isoformat(),
|
||||
'validation_status': 'approved',
|
||||
'risk_score': 0.2
|
||||
}
|
||||
|
||||
enriched_signal = validator.enrich_signal_metadata(valid_signal, additional_metadata)
|
||||
|
||||
# Original metadata should be preserved
|
||||
assert enriched_signal.metadata['rsi'] == 30
|
||||
assert enriched_signal.metadata['macd'] == 0.05
|
||||
|
||||
# New metadata should be added
|
||||
assert enriched_signal.metadata['validation_status'] == 'approved'
|
||||
assert enriched_signal.metadata['risk_score'] == 0.2
|
||||
assert 'validation_timestamp' in enriched_signal.metadata
|
||||
|
||||
# Other properties should remain unchanged
|
||||
assert enriched_signal.confidence == valid_signal.confidence
|
||||
assert enriched_signal.signal_type == valid_signal.signal_type
|
||||
|
||||
def test_transform_signals_batch(self, validator, valid_signal):
|
||||
"""Test batch signal transformation."""
|
||||
signals = [
|
||||
valid_signal,
|
||||
StrategySignal(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='ETH-USDT',
|
||||
timeframe='1h',
|
||||
signal_type=SignalType.SELL,
|
||||
price=3000.0,
|
||||
confidence=0.6,
|
||||
metadata={'ema': 2950}
|
||||
)
|
||||
]
|
||||
|
||||
additional_metadata = {'batch_id': 'test_batch_001'}
|
||||
|
||||
transformed_signals = validator.transform_signals_batch(
|
||||
signals,
|
||||
confidence_multiplier=1.1,
|
||||
additional_metadata=additional_metadata
|
||||
)
|
||||
|
||||
assert len(transformed_signals) == 2
|
||||
|
||||
# Check confidence transformation
|
||||
assert transformed_signals[0].confidence == 0.8 * 1.1
|
||||
assert transformed_signals[1].confidence == 0.6 * 1.1
|
||||
|
||||
# Check metadata enrichment
|
||||
assert transformed_signals[0].metadata['batch_id'] == 'test_batch_001'
|
||||
assert transformed_signals[1].metadata['batch_id'] == 'test_batch_001'
|
||||
|
||||
# Verify original metadata preserved
|
||||
assert transformed_signals[0].metadata['rsi'] == 30
|
||||
assert transformed_signals[1].metadata['ema'] == 2950
|
||||
|
||||
def test_calculate_signal_quality_metrics(self, validator, valid_signal):
|
||||
"""Test signal quality metrics calculation."""
|
||||
signals = [
|
||||
valid_signal, # confidence 0.8, has metadata
|
||||
StrategySignal(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='ETH-USDT',
|
||||
timeframe='1h',
|
||||
signal_type=SignalType.SELL,
|
||||
price=3000.0,
|
||||
confidence=0.9, # High confidence
|
||||
metadata={'volume_spike': True}
|
||||
),
|
||||
StrategySignal(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='BNB-USDT',
|
||||
timeframe='1h',
|
||||
signal_type=SignalType.HOLD,
|
||||
price=300.0,
|
||||
confidence=0.4, # Low confidence
|
||||
metadata=None # No metadata
|
||||
)
|
||||
]
|
||||
|
||||
metrics = validator.calculate_signal_quality_metrics(signals)
|
||||
|
||||
assert metrics['total_signals'] == 3
|
||||
assert metrics['confidence_metrics']['average'] == round((0.8 + 0.9 + 0.4) / 3, 3)
|
||||
assert metrics['confidence_metrics']['minimum'] == 0.4
|
||||
assert metrics['confidence_metrics']['maximum'] == 0.9
|
||||
assert metrics['confidence_metrics']['high_confidence_count'] == 2 # >= 0.7
|
||||
assert metrics['quality_score'] == round((2/3) * 100, 1) # 66.7%
|
||||
assert metrics['metadata_completeness_percentage'] == round((2/3) * 100, 1)
|
||||
|
||||
# Check signal type distribution
|
||||
assert metrics['signal_type_distribution']['buy'] == 1
|
||||
assert metrics['signal_type_distribution']['sell'] == 1
|
||||
assert metrics['signal_type_distribution']['hold'] == 1
|
||||
|
||||
# Check recommendations
|
||||
assert isinstance(metrics['recommendations'], list)
|
||||
assert len(metrics['recommendations']) > 0
|
||||
|
||||
def test_calculate_signal_quality_metrics_empty(self, validator):
|
||||
"""Test quality metrics with empty signal list."""
|
||||
metrics = validator.calculate_signal_quality_metrics([])
|
||||
|
||||
assert 'error' in metrics
|
||||
assert metrics['error'] == 'No signals provided for quality analysis'
|
||||
|
||||
def test_generate_quality_recommendations(self, validator):
|
||||
"""Test quality recommendation generation."""
|
||||
# Test low confidence signals
|
||||
low_confidence_signals = [
|
||||
StrategySignal(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
symbol='BTC-USDT',
|
||||
timeframe='1h',
|
||||
signal_type=SignalType.BUY,
|
||||
price=50000.0,
|
||||
confidence=0.3, # Low confidence
|
||||
metadata=None # No metadata
|
||||
)
|
||||
]
|
||||
|
||||
recommendations = validator._generate_quality_recommendations(low_confidence_signals)
|
||||
|
||||
assert any("confidence" in rec.lower() for rec in recommendations)
|
||||
assert any("metadata" in rec.lower() for rec in recommendations)
|
||||
|
||||
def test_generate_validation_report(self, validator, valid_signal):
|
||||
"""Test comprehensive validation report generation."""
|
||||
# Generate some validation activity
|
||||
validator.validate_signal(valid_signal) # Valid
|
||||
|
||||
invalid_signal = valid_signal
|
||||
invalid_signal.confidence = -0.1 # Invalid
|
||||
validator.validate_signal(invalid_signal) # Invalid
|
||||
|
||||
report = validator.generate_validation_report()
|
||||
|
||||
assert 'report_timestamp' in report
|
||||
assert 'validation_summary' in report
|
||||
assert 'error_analysis' in report
|
||||
assert 'configuration' in report
|
||||
assert 'health_status' in report
|
||||
|
||||
# Check validation summary
|
||||
summary = report['validation_summary']
|
||||
assert summary['total_validated'] == 2
|
||||
assert '50.0%' in summary['success_rate']
|
||||
assert '50.0%' in summary['failure_rate']
|
||||
|
||||
# Check configuration
|
||||
config = report['configuration']
|
||||
assert config['min_confidence'] == 0.0
|
||||
assert config['max_confidence'] == 1.0
|
||||
assert isinstance(config['allowed_signal_types'], list)
|
||||
|
||||
# Check health status
|
||||
assert report['health_status'] in ['good', 'needs_attention']
|
||||
Reference in New Issue
Block a user