TCPDashboard/tests/strategies/test_data_integration.py
Vasily.onl 8c23489ff0 4.0 - 4.0 Implement real-time strategy execution and data integration features
- Added `realtime_execution.py` for real-time strategy execution, enabling live signal generation and integration with the dashboard's chart refresh cycle.
- Introduced `data_integration.py` to manage market data orchestration, caching, and technical indicator calculations for strategy signal generation.
- Implemented `validation.py` for comprehensive validation and quality assessment of strategy-generated signals, ensuring reliability and consistency.
- Developed `batch_processing.py` to facilitate efficient backtesting of multiple strategies across large datasets with memory management and performance optimization.
- Updated `__init__.py` files to include new modules and ensure proper exports, enhancing modularity and maintainability.
- Enhanced unit tests for the new features, ensuring robust functionality and adherence to project standards.

These changes establish a solid foundation for real-time strategy execution and data integration, aligning with project goals for modularity, performance, and maintainability.
2025-06-12 18:29:39 +08:00

1068 lines
42 KiB
Python

"""
Unit tests for Strategy Data Integration module.
Tests the StrategyDataIntegrator class and its data orchestration capabilities.
"""
import pytest
import pandas as pd
from datetime import datetime, timezone, timedelta
from unittest.mock import Mock, patch, MagicMock
from typing import List, Dict, Any
from decimal import Decimal
from strategies.data_integration import (
StrategyDataIntegrator,
StrategyDataIntegrationConfig,
get_strategy_data_integrator
)
from strategies.data_types import StrategyResult, StrategySignal, SignalType
from data.common.data_types import OHLCVCandle
class TestStrategyDataIntegrationConfig:
"""Test configuration class for strategy data integration."""
def test_default_config(self):
"""Test default configuration values."""
config = StrategyDataIntegrationConfig()
assert config.default_days_back == 30
assert config.min_candles_required == 100
assert config.max_candles_limit == 5000
assert config.cache_timeout_minutes == 15
assert config.enable_data_validation is True
assert config.enable_sparse_data_handling is True
assert config.enable_indicator_caching is True
assert config.max_cached_indicators == 50
def test_custom_config(self):
"""Test custom configuration values."""
config = StrategyDataIntegrationConfig(
default_days_back=60,
min_candles_required=200,
cache_timeout_minutes=30,
enable_indicator_caching=False
)
assert config.default_days_back == 60
assert config.min_candles_required == 200
assert config.cache_timeout_minutes == 30
assert config.enable_indicator_caching is False
class TestStrategyDataIntegrator:
"""Test strategy data integrator functionality."""
@pytest.fixture
def mock_db_ops(self):
"""Create mock database operations."""
mock_db_ops = Mock()
mock_db_ops.market_data = Mock()
return mock_db_ops
@pytest.fixture
def mock_technical_indicators(self):
"""Create mock technical indicators."""
return Mock()
@pytest.fixture
def mock_strategy_factory(self):
"""Create mock strategy factory."""
return Mock()
@pytest.fixture
def sample_candles(self):
"""Create sample OHLCV candles for testing."""
candles = []
base_time = datetime.now(timezone.utc) - timedelta(days=10)
for i in range(100):
start_time = base_time + timedelta(hours=i)
end_time = start_time + timedelta(hours=1)
candles.append(OHLCVCandle(
symbol='BTC-USDT',
timeframe='1h',
start_time=start_time,
end_time=end_time,
open=Decimal(str(100.0 + i * 0.1)),
high=Decimal(str(101.0 + i * 0.1)),
low=Decimal(str(99.0 + i * 0.1)),
close=Decimal(str(100.5 + i * 0.1)),
volume=Decimal(str(1000.0 + i * 10)),
trade_count=10 + i,
exchange='okx'
))
return candles
@pytest.fixture
def sample_raw_candles(self):
"""Create sample raw candles from database."""
candles = []
base_time = datetime.now(timezone.utc) - timedelta(days=10)
for i in range(100):
timestamp = base_time + timedelta(hours=i)
candles.append({
'timestamp': timestamp,
'open': 100.0 + i * 0.1,
'high': 101.0 + i * 0.1,
'low': 99.0 + i * 0.1,
'close': 100.5 + i * 0.1,
'volume': 1000.0 + i * 10,
'symbol': 'BTC-USDT',
'timeframe': '1h',
'exchange': 'okx'
})
return candles
@pytest.fixture
def integrator(self, mock_db_ops, mock_technical_indicators, mock_strategy_factory):
"""Create strategy data integrator with mocked dependencies."""
config = StrategyDataIntegrationConfig()
with patch('strategies.data_integration.get_database_operations') as mock_get_db, \
patch('strategies.data_integration.TechnicalIndicators') as mock_ti, \
patch('strategies.data_integration.StrategyFactory') as mock_sf:
mock_get_db.return_value = mock_db_ops
mock_ti.return_value = mock_technical_indicators
mock_sf.return_value = mock_strategy_factory
integrator = StrategyDataIntegrator(config)
# Set the mocked objects
integrator.db_ops = mock_db_ops
integrator.technical_indicators = mock_technical_indicators
integrator.strategy_factory = mock_strategy_factory
return integrator
def test_initialization(self):
"""Test integrator initialization."""
config = StrategyDataIntegrationConfig(default_days_back=60)
with patch('strategies.data_integration.get_database_operations'), \
patch('strategies.data_integration.TechnicalIndicators'), \
patch('strategies.data_integration.StrategyFactory'), \
patch('pathlib.Path.exists', return_value=False): # Mock no persistent cache file
integrator = StrategyDataIntegrator(config)
assert integrator.config.default_days_back == 60
assert integrator._data_cache == {}
assert integrator._indicator_cache == {}
def test_prepare_dataframe_from_candles(self, integrator, sample_candles):
"""Test conversion of OHLCV candles to DataFrame."""
df = integrator._prepare_dataframe_from_candles(sample_candles)
assert len(df) == 100
assert list(df.columns) == ['open', 'high', 'low', 'close', 'volume']
assert df.index.name is None # timestamp index name is removed for cleaner appearance
assert df['open'].iloc[0] == 100.0
assert df['close'].iloc[-1] == 110.4 # 100.5 + 99 * 0.1
def test_prepare_dataframe_empty_candles(self, integrator):
"""Test DataFrame preparation with empty candles."""
df = integrator._prepare_dataframe_from_candles([])
assert df.empty
assert len(df) == 0
def test_validate_strategy_requirements_success(self, integrator):
"""Test successful strategy requirements validation."""
# Create valid DataFrame
data = {
'open': [100.0] * 150,
'high': [101.0] * 150,
'low': [99.0] * 150,
'close': [100.5] * 150,
'volume': [1000.0] * 150
}
df = pd.DataFrame(data)
result = integrator.validate_strategy_requirements(df, 'test_strategy')
assert result is True
def test_validate_strategy_requirements_insufficient_data(self, integrator):
"""Test validation failure due to insufficient data."""
# Create DataFrame with insufficient data
data = {
'open': [100.0] * 50, # Less than min_candles_required (100)
'high': [101.0] * 50,
'low': [99.0] * 50,
'close': [100.5] * 50,
'volume': [1000.0] * 50
}
df = pd.DataFrame(data)
result = integrator.validate_strategy_requirements(df, 'test_strategy')
assert result is False
def test_validate_strategy_requirements_missing_columns(self, integrator):
"""Test validation failure due to missing columns."""
# Create DataFrame with missing columns
data = {
'open': [100.0] * 150,
'high': [101.0] * 150,
# Missing 'low', 'close', 'volume'
}
df = pd.DataFrame(data)
result = integrator.validate_strategy_requirements(df, 'test_strategy')
assert result is False
def test_validate_strategy_requirements_invalid_prices(self, integrator):
"""Test validation failure due to invalid price data."""
# Create DataFrame with invalid prices
data = {
'open': [100.0, 0.0, 102.0] + [100.0] * 147, # Zero price
'high': [101.0] * 150,
'low': [99.0] * 150,
'close': [100.5] * 150,
'volume': [1000.0] * 150
}
df = pd.DataFrame(data)
result = integrator.validate_strategy_requirements(df, 'test_strategy')
assert result is False
@patch('strategies.data_integration.convert_database_candles_to_ohlcv')
def test_get_strategy_data_success(self, mock_convert, integrator, sample_raw_candles, sample_candles):
"""Test successful strategy data retrieval."""
# Setup mocks
integrator.db_ops.market_data.get_candles.return_value = sample_raw_candles
mock_convert.return_value = sample_candles
# Call method
result_df = integrator.get_strategy_data('BTC-USDT', '1h')
# Verify results
assert not result_df.empty
assert len(result_df) == 100
assert list(result_df.columns) == ['open', 'high', 'low', 'close', 'volume']
# Verify database call
integrator.db_ops.market_data.get_candles.assert_called_once()
call_args = integrator.db_ops.market_data.get_candles.call_args
assert call_args[1]['symbol'] == 'BTC-USDT'
assert call_args[1]['timeframe'] == '1h'
assert call_args[1]['exchange'] == 'okx'
def test_get_strategy_data_no_raw_candles(self, integrator):
"""Test strategy data retrieval with no raw candles."""
# Setup mock to return empty list
integrator.db_ops.market_data.get_candles.return_value = []
# Call method
result_df = integrator.get_strategy_data('BTC-USDT', '1h')
# Verify empty result
assert result_df.empty
@patch('strategies.data_integration.convert_database_candles_to_ohlcv')
def test_get_strategy_data_no_ohlcv_candles(self, mock_convert, integrator, sample_raw_candles):
"""Test strategy data retrieval with no OHLCV candles after conversion."""
# Setup mocks
integrator.db_ops.market_data.get_candles.return_value = sample_raw_candles
mock_convert.return_value = [] # Empty OHLCV candles
# Call method
result_df = integrator.get_strategy_data('BTC-USDT', '1h')
# Verify empty result
assert result_df.empty
def test_get_strategy_data_caching(self, integrator):
"""Test data caching functionality."""
# Create cached data
cached_df = pd.DataFrame({
'open': [100.0] * 10,
'high': [101.0] * 10,
'low': [99.0] * 10,
'close': [100.5] * 10,
'volume': [1000.0] * 10
})
cache_key = "market_data_BTC-USDT_1h_30_okx"
integrator._data_cache[cache_key] = {
'dataframe': cached_df,
'timestamp': datetime.now(timezone.utc)
}
# Call method
result_df = integrator.get_strategy_data('BTC-USDT', '1h')
# Verify cached data is returned
assert not result_df.empty
assert len(result_df) == 10
# Verify database was not called
integrator.db_ops.market_data.get_candles.assert_not_called()
def test_calculate_strategy_signals_success(self, integrator):
"""Test successful strategy signal calculation."""
# Setup market data
market_df = pd.DataFrame({
'open': [100.0] * 150,
'high': [101.0] * 150,
'low': [99.0] * 150,
'close': [100.5] * 150,
'volume': [1000.0] * 150
})
# Mock strategy results
mock_result = StrategyResult(
timestamp=datetime.now(timezone.utc),
symbol='BTC-USDT',
timeframe='1h',
strategy_name='test_strategy',
signals=[],
indicators_used={},
metadata={}
)
# Setup mocks
integrator.get_strategy_data = Mock(return_value=market_df)
integrator.validate_strategy_requirements = Mock(return_value=True)
integrator.strategy_factory.calculate_strategy_signals.return_value = [mock_result]
# Call method
results = integrator.calculate_strategy_signals(
strategy_name='test_strategy',
strategy_config={'param1': 'value1'},
symbol='BTC-USDT',
timeframe='1h'
)
# Verify results
assert len(results) == 1
assert results[0].strategy_name == 'test_strategy'
assert 'symbol' in results[0].metadata
assert results[0].metadata['symbol'] == 'BTC-USDT'
assert results[0].metadata['data_points_used'] == 150
def test_calculate_strategy_signals_no_data(self, integrator):
"""Test strategy signal calculation with no market data."""
# Setup mocks
integrator.get_strategy_data = Mock(return_value=pd.DataFrame())
# Call method
results = integrator.calculate_strategy_signals(
strategy_name='test_strategy',
strategy_config={},
symbol='BTC-USDT',
timeframe='1h'
)
# Verify empty results
assert len(results) == 0
# Note: validate_strategy_requirements is not called when get_strategy_data returns empty DataFrame
def test_calculate_strategy_signals_insufficient_data(self, integrator):
"""Test strategy signal calculation with insufficient data."""
# Setup market data
market_df = pd.DataFrame({
'open': [100.0] * 50, # Insufficient data
'high': [101.0] * 50,
'low': [99.0] * 50,
'close': [100.5] * 50,
'volume': [1000.0] * 50
})
# Setup mocks
integrator.get_strategy_data = Mock(return_value=market_df)
integrator.validate_strategy_requirements = Mock(return_value=False)
# Call method
results = integrator.calculate_strategy_signals(
strategy_name='test_strategy',
strategy_config={},
symbol='BTC-USDT',
timeframe='1h'
)
# Verify empty results
assert len(results) == 0
integrator.strategy_factory.calculate_strategy_signals.assert_not_called()
def test_cache_management(self, integrator):
"""Test cache management functionality."""
# Test caching
cache_key = "test_key"
test_data = {
'test': 'data',
'timestamp': datetime.now(timezone.utc)
}
integrator._cache_data(cache_key, test_data)
assert cache_key in integrator._data_cache
# Test cache retrieval
cached_data = integrator._get_cached_data(cache_key)
assert cached_data is not None
assert cached_data['test'] == 'data'
# Test cache expiration
expired_data = {
'test': 'expired',
'timestamp': datetime.now(timezone.utc) - timedelta(hours=1) # Expired
}
integrator._cache_data("expired_key", expired_data)
cached_expired = integrator._get_cached_data("expired_key")
assert cached_expired is None # Should be None due to expiration
assert "expired_key" not in integrator._data_cache # Should be removed
def test_clear_cache(self, integrator):
"""Test cache clearing functionality."""
# Add some cached data
integrator._data_cache['key1'] = {'data': 'test1'}
integrator._indicator_cache['key2'] = {'data': 'test2'}
# Clear cache
integrator.clear_cache()
# Verify cache is cleared
assert len(integrator._data_cache) == 0
assert len(integrator._indicator_cache) == 0
def test_get_cache_stats(self, integrator):
"""Test cache statistics retrieval."""
# Add some cached data
integrator._data_cache['key1'] = {'data': 'test1'}
integrator._indicator_cache['key2'] = {'data': 'test2'}
# Get stats
stats = integrator.get_cache_stats()
# Verify stats
assert stats['data_cache_size'] == 1
assert stats['indicator_cache_size'] == 1
assert 'config' in stats
assert stats['config']['cache_timeout_minutes'] == 15
def test_calculate_indicators_batch(self, integrator):
"""Test batch indicator calculation functionality."""
# Create test market data
market_df = pd.DataFrame({
'open': [100.0] * 150,
'high': [101.0] * 150,
'low': [99.0] * 150,
'close': [100.5] * 150,
'volume': [1000.0] * 150
})
# Mock indicator configurations
indicator_configs = [
{'type': 'sma', 'period': 20},
{'type': 'ema', 'period': 12},
{'type': 'rsi', 'period': 14}
]
# Mock technical indicators responses
mock_sma_result = pd.DataFrame({'sma': [100.0] * 150})
mock_ema_result = pd.DataFrame({'ema': [100.2] * 150})
mock_rsi_result = pd.DataFrame({'rsi': [50.0] * 150})
integrator.technical_indicators.calculate.side_effect = [
mock_sma_result,
mock_ema_result,
mock_rsi_result
]
# Call method
results = integrator.calculate_indicators_batch(market_df, indicator_configs)
# Verify results
assert len(results) == 3
assert 'sma_period_20' in results
assert 'ema_period_12' in results
assert 'rsi_period_14' in results
# Verify TechnicalIndicators was called correctly
assert integrator.technical_indicators.calculate.call_count == 3
def test_calculate_indicators_batch_with_caching(self, integrator):
"""Test batch indicator calculation with caching."""
# Create test market data
market_df = pd.DataFrame({
'open': [100.0] * 150,
'high': [101.0] * 150,
'low': [99.0] * 150,
'close': [100.5] * 150,
'volume': [1000.0] * 150
})
# Mock indicator configuration
indicator_configs = [{'type': 'sma', 'period': 20}]
# Mock technical indicators response
mock_result = pd.DataFrame({'sma': [100.0] * 150})
integrator.technical_indicators.calculate.return_value = mock_result
# First call - should calculate and cache
results1 = integrator.calculate_indicators_batch(market_df, indicator_configs)
assert len(results1) == 1
assert integrator.technical_indicators.calculate.call_count == 1
# Second call - should use cache
results2 = integrator.calculate_indicators_batch(market_df, indicator_configs)
assert len(results2) == 1
assert integrator.technical_indicators.calculate.call_count == 1 # No additional calls
# Verify cached result is returned
pd.testing.assert_frame_equal(results1['sma_period_20'], results2['sma_period_20'])
def test_create_indicator_key(self, integrator):
"""Test indicator key generation."""
# Test with parameters
config1 = {'type': 'sma', 'period': 20, 'price_column': 'close'}
key1 = integrator._create_indicator_key(config1)
assert key1 == 'sma_period_20_price_column_close'
# Test without parameters
config2 = {'type': 'macd'}
key2 = integrator._create_indicator_key(config2)
assert key2 == 'macd'
# Test consistent key generation (order shouldn't matter)
config3 = {'type': 'rsi', 'price_column': 'close', 'period': 14}
config4 = {'type': 'rsi', 'period': 14, 'price_column': 'close'}
key3 = integrator._create_indicator_key(config3)
key4 = integrator._create_indicator_key(config4)
assert key3 == key4
def test_indicator_caching_functionality(self, integrator):
"""Test indicator caching mechanisms."""
# Create test data
market_df = pd.DataFrame({
'open': [100.0] * 10,
'high': [101.0] * 10,
'low': [99.0] * 10,
'close': [100.5] * 10,
'volume': [1000.0] * 10
})
test_result = pd.DataFrame({'test': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
# Test caching
integrator._cache_indicator_result('test_key', test_result, market_df)
# Test cache retrieval
cached_result = integrator._get_cached_indicator('test_key', market_df)
assert cached_result is not None
pd.testing.assert_frame_equal(cached_result, test_result)
# Test cache miss
missing_result = integrator._get_cached_indicator('missing_key', market_df)
assert missing_result is None
# Test cache invalidation with different data size
different_df = pd.DataFrame({
'open': [100.0] * 5, # Different size
'high': [101.0] * 5,
'low': [99.0] * 5,
'close': [100.5] * 5,
'volume': [1000.0] * 5
})
invalid_result = integrator._get_cached_indicator('test_key', different_df)
assert invalid_result is None
def test_calculate_strategy_signals_enhanced(self, integrator):
"""Test enhanced strategy signal calculation with vectorized operations."""
# Setup market data
market_df = pd.DataFrame({
'open': [100.0] * 150,
'high': [101.0] * 150,
'low': [99.0] * 150,
'close': [100.5] * 150,
'volume': [1000.0] * 150
})
# Mock strategy
mock_strategy = Mock()
mock_strategy.get_required_indicators.return_value = [
{'type': 'sma', 'period': 20}
]
mock_result = StrategyResult(
timestamp=datetime.now(timezone.utc),
symbol='BTC-USDT',
timeframe='1h',
strategy_name='test_strategy',
signals=[],
indicators_used={},
metadata={}
)
mock_strategy.calculate.return_value = [mock_result]
# Setup mocks
integrator.get_strategy_data = Mock(return_value=market_df)
integrator.validate_strategy_requirements = Mock(return_value=True)
integrator.strategy_factory.create_strategy.return_value = mock_strategy
integrator.calculate_indicators_batch = Mock(return_value={'sma_period_20': pd.DataFrame({'sma': [100.0] * 150})})
# Call enhanced method
results = integrator.calculate_strategy_signals_enhanced(
strategy_name='test_strategy',
strategy_config={'param1': 'value1'},
symbol='BTC-USDT',
timeframe='1h'
)
# Verify results
assert len(results) == 1
assert results[0].strategy_name == 'test_strategy'
assert 'enhanced_calculation' in results[0].metadata
assert results[0].metadata['enhanced_calculation'] is True
assert results[0].metadata['indicators_calculated'] == 1
# Verify method calls
integrator.calculate_indicators_batch.assert_called_once()
mock_strategy.calculate.assert_called_once()
def test_vectorized_dataframe_construction_performance(self, integrator, sample_candles):
"""Test that vectorized DataFrame construction works correctly."""
# This test verifies the vectorized approach produces same results as iterative
df = integrator._prepare_dataframe_from_candles(sample_candles)
# Verify structure
assert len(df) == 100
assert list(df.columns) == ['open', 'high', 'low', 'close', 'volume']
assert df.index.name is None
# Verify data integrity (should be same as iterative approach)
assert df['open'].iloc[0] == 100.0
assert df['close'].iloc[-1] == 110.4 # 100.5 + 99 * 0.1
# Verify all data types are numeric
for col in ['open', 'high', 'low', 'close', 'volume']:
assert pd.api.types.is_numeric_dtype(df[col])
# Verify no NaN values
assert not df.isnull().any().any()
def test_enhanced_calculation_error_handling(self, integrator):
"""Test error handling in enhanced calculation methods."""
# Test with invalid strategy name
integrator.get_strategy_data = Mock(return_value=pd.DataFrame({'open': [100.0] * 150, 'high': [101.0] * 150, 'low': [99.0] * 150, 'close': [100.5] * 150, 'volume': [1000.0] * 150}))
integrator.validate_strategy_requirements = Mock(return_value=True)
integrator.strategy_factory.create_strategy.return_value = None # Strategy creation fails
results = integrator.calculate_strategy_signals_enhanced(
strategy_name='invalid_strategy',
strategy_config={},
symbol='BTC-USDT',
timeframe='1h'
)
assert len(results) == 0
# Test indicator batch calculation with empty data
empty_results = integrator.calculate_indicators_batch(
pd.DataFrame(), # Empty DataFrame
[{'type': 'sma', 'period': 20}]
)
assert len(empty_results) == 0
def test_cache_size_management(self, integrator):
"""Test that indicator cache properly manages its size."""
# Create test data
market_df = pd.DataFrame({
'open': [100.0] * 10,
'high': [101.0] * 10,
'low': [99.0] * 10,
'close': [100.5] * 10,
'volume': [1000.0] * 10
})
test_result = pd.DataFrame({'test': [1] * 10})
# Add more indicators than max_cached_indicators (50)
for i in range(60):
integrator._cache_indicator_result(f'test_key_{i}', test_result, market_df)
# Verify cache size is managed
assert len(integrator._indicator_cache) <= integrator.config.max_cached_indicators
# Verify cache stats
stats = integrator.get_cache_stats()
assert stats['indicator_cache_size'] <= integrator.config.max_cached_indicators
def test_analyze_indicator_dependencies(self, integrator):
"""Test indicator dependency analysis"""
indicator_configs = [
{'type': 'sma', 'period': 20},
{'type': 'ema', 'period': 12},
{'type': 'macd', 'fast': 12, 'slow': 26, 'signal': 9},
{'type': 'rsi', 'period': 14},
{'type': 'bollinger_bands', 'period': 20, 'std': 2}
]
dependencies = integrator.analyze_indicator_dependencies(indicator_configs)
# Check that dependencies are properly analyzed
assert isinstance(dependencies, dict)
assert len(dependencies) == len(indicator_configs)
# All current indicators should have no external dependencies
for deps in dependencies.values():
assert isinstance(deps, list)
assert len(deps) == 0 # No external dependencies currently
def test_resolve_calculation_order(self, integrator):
"""Test calculation order resolution"""
indicator_configs = [
{'type': 'macd', 'fast': 12, 'slow': 26, 'signal': 9},
{'type': 'sma', 'period': 20},
{'type': 'bollinger_bands', 'period': 20, 'std': 2},
{'type': 'ema', 'period': 12},
{'type': 'rsi', 'period': 14},
{'type': 'sma', 'period': 10} # Another SMA with different period
]
ordered_configs = integrator.resolve_calculation_order(indicator_configs)
# Check that all indicators are included
assert len(ordered_configs) == len(indicator_configs)
# Check that SMA comes before more complex indicators
sma_indices = [i for i, config in enumerate(ordered_configs) if config['type'] == 'sma']
macd_indices = [i for i, config in enumerate(ordered_configs) if config['type'] == 'macd']
# SMA should come before MACD
if sma_indices and macd_indices:
assert max(sma_indices) < min(macd_indices)
# Within SMA group, smaller periods should come first
sma_configs = [config for config in ordered_configs if config['type'] == 'sma']
if len(sma_configs) > 1:
periods = [config['period'] for config in sma_configs]
assert periods == sorted(periods)
def test_calculate_indicators_orchestrated(self, integrator):
"""Test orchestrated indicator calculation"""
# Create test data
test_data = pd.DataFrame({
'open': [100.0 + i * 0.1 for i in range(150)],
'high': [101.0 + i * 0.1 for i in range(150)],
'low': [99.0 + i * 0.1 for i in range(150)],
'close': [100.5 + i * 0.1 for i in range(150)],
'volume': [1000.0 + i * 10 for i in range(150)]
})
indicator_configs = [
{'type': 'sma', 'period': 5},
{'type': 'ema', 'period': 10},
{'type': 'rsi', 'period': 14}
]
# Mock technical indicators to return proper data
def mock_calculate(indicator_type, df, **kwargs):
if indicator_type == 'sma':
return pd.DataFrame({'sma': [100.0] * len(df)})
elif indicator_type == 'ema':
return pd.DataFrame({'ema': [101.0] * len(df)})
elif indicator_type == 'rsi':
return pd.DataFrame({'rsi': [50.0] * len(df)})
return pd.DataFrame()
integrator.technical_indicators.calculate.side_effect = mock_calculate
# Test with caching enabled
indicators_data = integrator.calculate_indicators_orchestrated(
market_df=test_data,
indicator_configs=indicator_configs,
enable_caching=True
)
# Verify results
assert isinstance(indicators_data, dict)
assert len(indicators_data) == 3
# Check that each indicator has data
for indicator_key, data in indicators_data.items():
assert isinstance(data, pd.DataFrame)
assert not data.empty
# Test second call to verify caching
indicators_data_cached = integrator.calculate_indicators_orchestrated(
market_df=test_data,
indicator_configs=indicator_configs,
enable_caching=True
)
# Results should be identical
assert len(indicators_data_cached) == len(indicators_data)
# Test with caching disabled
indicators_data_no_cache = integrator.calculate_indicators_orchestrated(
market_df=test_data,
indicator_configs=indicator_configs,
enable_caching=False
)
assert len(indicators_data_no_cache) == len(indicators_data)
def test_calculate_indicators_orchestrated_empty_data(self, integrator):
"""Test orchestrated calculation with empty data"""
empty_df = pd.DataFrame()
indicator_configs = [{'type': 'sma', 'period': 5}]
result = integrator.calculate_indicators_orchestrated(
market_df=empty_df,
indicator_configs=indicator_configs
)
assert isinstance(result, dict)
assert len(result) == 0
def test_calculate_indicators_orchestrated_error_handling(self, integrator):
"""Test orchestrated calculation error handling"""
test_data = pd.DataFrame({
'open': [100.0 + i * 0.1 for i in range(150)],
'high': [101.0 + i * 0.1 for i in range(150)],
'low': [99.0 + i * 0.1 for i in range(150)],
'close': [100.5 + i * 0.1 for i in range(150)],
'volume': [1000.0 + i * 10 for i in range(150)]
})
# Include invalid indicator type
indicator_configs = [
{'type': 'sma', 'period': 5},
{'type': 'invalid_indicator', 'period': 10}
]
indicators_data = integrator.calculate_indicators_orchestrated(
market_df=test_data,
indicator_configs=indicator_configs,
enable_caching=True
)
# Should handle errors gracefully
assert isinstance(indicators_data, dict)
# Valid indicator should still be calculated
valid_keys = [k for k in indicators_data.keys() if 'sma' in k.lower()]
assert len(valid_keys) > 0
def test_calculate_strategy_signals_orchestrated(self, integrator):
"""Test fully orchestrated strategy signal calculation"""
# Mock database operations to return test data
test_data = pd.DataFrame({
'timestamp': pd.date_range(start='2023-01-01', periods=150, freq='1h'),
'open': [100.0 + i * 0.1 for i in range(150)],
'high': [101.0 + i * 0.1 for i in range(150)],
'low': [99.0 + i * 0.1 for i in range(150)],
'close': [100.5 + i * 0.1 for i in range(150)],
'volume': [1000.0 + i * 10 for i in range(150)]
})
def mock_get_candles(*args, **kwargs):
return [
type('OHLCVCandle', (), {
'start_time': row['timestamp'],
'end_time': row['timestamp'] + pd.Timedelta(minutes=1),
'open': row['open'],
'high': row['high'],
'low': row['low'],
'close': row['close'],
'volume': row['volume']
})()
for _, row in test_data.iterrows()
]
with patch.object(integrator.db_ops.market_data, 'get_candles', side_effect=mock_get_candles):
results = integrator.calculate_strategy_signals_orchestrated(
strategy_name='ema_crossover',
strategy_config={'fast_period': 5, 'slow_period': 10},
symbol='BTC/USDT',
timeframe='1m',
days_back=1,
enable_caching=True
)
# Verify results
assert isinstance(results, list)
if results: # Only check if we have results
for result in results:
assert isinstance(result, StrategyResult)
# Check metadata includes orchestration info
assert result.metadata is not None
assert result.metadata.get('calculation_method') == 'orchestrated'
assert result.metadata.get('orchestrated_calculation') is True
assert 'symbol' in result.metadata
assert 'timeframe' in result.metadata
assert 'data_points_used' in result.metadata
assert 'indicators_calculated' in result.metadata
def test_calculate_strategy_signals_orchestrated_no_data(self, integrator):
"""Test orchestrated calculation with no market data"""
def mock_get_candles_empty(*args, **kwargs):
return []
with patch.object(integrator.db_ops.market_data, 'get_candles', side_effect=mock_get_candles_empty):
results = integrator.calculate_strategy_signals_orchestrated(
strategy_name='ema_crossover',
strategy_config={'fast_period': 5, 'slow_period': 10},
symbol='BTC/USDT',
timeframe='1m'
)
assert isinstance(results, list)
assert len(results) == 0
def test_calculate_strategy_signals_orchestrated_invalid_strategy(self, integrator):
"""Test orchestrated calculation with invalid strategy"""
test_data = pd.DataFrame({
'timestamp': pd.date_range(start='2023-01-01', periods=150, freq='1h'),
'open': [100.0 + i * 0.1 for i in range(150)],
'high': [101.0 + i * 0.1 for i in range(150)],
'low': [99.0 + i * 0.1 for i in range(150)],
'close': [100.5 + i * 0.1 for i in range(150)],
'volume': [1000.0 + i * 10 for i in range(150)]
})
def mock_get_candles(*args, **kwargs):
return [
type('OHLCVCandle', (), {
'start_time': row['timestamp'],
'end_time': row['timestamp'] + pd.Timedelta(minutes=1),
'open': row['open'],
'high': row['high'],
'low': row['low'],
'close': row['close'],
'volume': row['volume']
})()
for _, row in test_data.iterrows()
]
with patch.object(integrator.db_ops.market_data, 'get_candles', side_effect=mock_get_candles):
results = integrator.calculate_strategy_signals_orchestrated(
strategy_name='nonexistent_strategy',
strategy_config={},
symbol='BTC/USDT',
timeframe='1m'
)
assert isinstance(results, list)
assert len(results) == 0
def test_get_calculation_performance_stats(self, integrator):
"""Test calculation performance statistics retrieval"""
stats = integrator.get_calculation_performance_stats()
# Should return performance statistics structure
assert 'cache_performance' in stats
assert 'available_methods' in stats
assert 'recommended_method' in stats
assert 'performance_tips' in stats
# Check available methods
available_methods = stats['available_methods']
assert 'calculate_strategy_signals' in available_methods
assert 'calculate_strategy_signals_enhanced' in available_methods
assert 'calculate_strategy_signals_orchestrated' in available_methods
# Check recommended method
assert stats['recommended_method'] == 'calculate_strategy_signals_orchestrated'
def test_cache_persistence(self, integrator):
"""Test cache persistence functionality"""
# Clear any existing cache
integrator.clear_cache()
# Add some data to cache
test_df = pd.DataFrame({
'sma': [1.0, 2.0, 3.0],
'timestamp': pd.date_range('2023-01-01', periods=3, freq='1h')
})
# Cache an indicator result
integrator._cache_indicator_result('test_sma', test_df, test_df)
# Verify it's in memory cache
assert len(integrator._indicator_cache) == 1
# Save to persistent storage
integrator._save_persistent_cache()
# Verify cache file exists
assert integrator._persistent_cache_file.exists()
# Clear memory cache
integrator._indicator_cache.clear()
assert len(integrator._indicator_cache) == 0
# Load from persistent storage
integrator._load_persistent_cache()
# Verify data was restored
assert len(integrator._indicator_cache) == 1
assert 'test_sma' in integrator._indicator_cache
def test_cross_strategy_cache_sharing(self, integrator):
"""Test cross-strategy cache sharing functionality"""
# Clear any existing cache
integrator.clear_cache()
# Create test indicator data
test_df = pd.DataFrame({
'sma': [10.0, 11.0, 12.0],
'value': [100.0, 101.0, 102.0]
})
# Share an indicator result
integrator.share_indicator_result('shared_sma_20', test_df, len(test_df))
# Verify it was shared
cached_result = integrator.get_shared_indicator_cache('shared_sma_20', len(test_df))
assert cached_result is not None
assert len(cached_result) == len(test_df)
assert 'sma' in cached_result.columns
# Check sharing statistics
stats = integrator.get_cache_sharing_stats()
assert stats['shared_cache_entries'] == 1
assert stats['total_cached_indicators'] == 1
assert stats['sharing_efficiency'] == 1.0
# Test cache miss
missing_result = integrator.get_shared_indicator_cache('nonexistent_indicator')
assert missing_result is None
class TestFactoryFunction:
"""Test factory function for strategy data integrator."""
def test_get_strategy_data_integrator_default(self):
"""Test factory function with default configuration."""
with patch('strategies.data_integration.get_database_operations'), \
patch('strategies.data_integration.TechnicalIndicators'), \
patch('strategies.data_integration.StrategyFactory'):
integrator = get_strategy_data_integrator()
assert isinstance(integrator, StrategyDataIntegrator)
assert integrator.config.default_days_back == 30
def test_get_strategy_data_integrator_custom_config(self):
"""Test factory function with custom configuration."""
config = StrategyDataIntegrationConfig(default_days_back=60)
with patch('strategies.data_integration.get_database_operations'), \
patch('strategies.data_integration.TechnicalIndicators'), \
patch('strategies.data_integration.StrategyFactory'):
integrator = get_strategy_data_integrator(config)
assert isinstance(integrator, StrategyDataIntegrator)
assert integrator.config.default_days_back == 60