Add initial implementation of the Orderflow Backtest System with OBI and CVD metrics integration, including core modules for storage, strategies, and visualization. Introduced persistent metrics storage in SQLite, optimized memory usage, and enhanced documentation.

This commit is contained in:
Simon Moisy
2025-08-26 17:22:07 +08:00
parent 63f723820a
commit fa6df78c1e
52 changed files with 7039 additions and 1 deletions

View File

@@ -0,0 +1,93 @@
"""Tests for main.py integration with metrics system."""
import sys
import sqlite3
import tempfile
from pathlib import Path
from unittest.mock import patch, MagicMock
sys.path.append(str(Path(__file__).resolve().parents[1]))
# Mock typer to avoid import issues in tests
sys.modules['typer'] = MagicMock()
from storage import Storage
from strategies import DefaultStrategy
def test_strategy_database_integration():
"""Test that strategy gets database path set correctly in main workflow."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp_file:
db_path = Path(tmp_file.name)
try:
# Create minimal test database
with sqlite3.connect(str(db_path)) as conn:
conn.execute("""
CREATE TABLE book (
id INTEGER PRIMARY KEY,
bids TEXT NOT NULL,
asks TEXT NOT NULL,
timestamp INTEGER NOT NULL
)
""")
conn.execute("""
CREATE TABLE trades (
id INTEGER PRIMARY KEY,
trade_id REAL NOT NULL,
price REAL NOT NULL,
size REAL NOT NULL,
side TEXT NOT NULL,
timestamp INTEGER NOT NULL
)
""")
# Insert minimal test data
bids = "[(50000.0, 10.0, 0, 1)]"
asks = "[(50001.0, 5.0, 0, 1)]"
conn.execute("INSERT INTO book (id, bids, asks, timestamp) VALUES (?, ?, ?, ?)",
(1, bids, asks, 1000))
conn.execute("INSERT INTO trades (id, trade_id, price, size, side, timestamp) VALUES (?, ?, ?, ?, ?, ?)",
(1, 1.0, 50000.0, 3.0, "buy", 1000))
conn.commit()
# Test the integration workflow
storage = Storage("BTC-USDT")
strategy = DefaultStrategy("BTC-USDT")
# This simulates the main.py workflow
strategy.set_db_path(db_path) # This is what main.py now does
storage.build_booktick_from_db(db_path, None) # This calculates and stores metrics
# Verify strategy can access stored metrics
assert strategy._db_path == db_path
# Verify metrics were stored by attempting to load them
metrics = strategy.load_stored_metrics(1000, 1000)
assert len(metrics) == 1
assert metrics[0].timestamp == 1000
# Verify strategy can be called (this is what main.py does)
strategy.on_booktick(storage.book) # Should use stored metrics
finally:
db_path.unlink(missing_ok=True)
def test_strategy_backwards_compatibility():
"""Test that strategy still works without database path (backwards compatibility)."""
storage = Storage("BTC-USDT")
strategy = DefaultStrategy("BTC-USDT")
# Don't set database path - should fall back to real-time calculation
# This ensures existing code that doesn't use metrics still works
# Create empty book
assert len(storage.book.snapshots) == 0
# Strategy should handle this gracefully
strategy.on_booktick(storage.book) # Should not crash
# Verify OBI calculation still works
obi_values = strategy.compute_OBI(storage.book)
assert obi_values == [] # Empty book should return empty list

View File

@@ -0,0 +1,108 @@
"""Tests for main.py visualization workflow."""
import sys
import sqlite3
import tempfile
from pathlib import Path
from unittest.mock import patch, MagicMock
sys.path.append(str(Path(__file__).resolve().parents[1]))
# Mock typer to avoid import issues in tests
sys.modules['typer'] = MagicMock()
from storage import Storage
from strategies import DefaultStrategy
from visualizer import Visualizer
def test_main_workflow_separation():
"""Test that main.py workflow properly separates strategy and visualization."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp_file:
db_path = Path(tmp_file.name)
try:
# Create minimal test database
with sqlite3.connect(str(db_path)) as conn:
conn.execute("""
CREATE TABLE book (
id INTEGER PRIMARY KEY,
bids TEXT NOT NULL,
asks TEXT NOT NULL,
timestamp INTEGER NOT NULL
)
""")
conn.execute("""
CREATE TABLE trades (
id INTEGER PRIMARY KEY,
trade_id REAL NOT NULL,
price REAL NOT NULL,
size REAL NOT NULL,
side TEXT NOT NULL,
timestamp INTEGER NOT NULL
)
""")
# Insert minimal test data
bids = "[(50000.0, 10.0, 0, 1)]"
asks = "[(50001.0, 5.0, 0, 1)]"
conn.execute("INSERT INTO book (id, bids, asks, timestamp) VALUES (?, ?, ?, ?)",
(1, bids, asks, 1000))
conn.execute("INSERT INTO trades (id, trade_id, price, size, side, timestamp) VALUES (?, ?, ?, ?, ?, ?)",
(1, 1.0, 50000.0, 3.0, "buy", 1000))
conn.commit()
# Test the new main.py workflow
storage = Storage("BTC-USDT")
strategy = DefaultStrategy("BTC-USDT") # No visualization parameter
# Mock visualizer to avoid GUI issues in tests
with patch('matplotlib.pyplot.subplots') as mock_subplots:
mock_fig = type('MockFig', (), {'canvas': type('MockCanvas', (), {'draw_idle': lambda: None})()})()
mock_axes = [type('MockAx', (), {'clear': lambda: None})() for _ in range(4)]
mock_subplots.return_value = (mock_fig, tuple(mock_axes))
visualizer = Visualizer(window_seconds=60, max_bars=500)
# This simulates the new main.py workflow
strategy.set_db_path(db_path)
visualizer.set_db_path(db_path)
storage.build_booktick_from_db(db_path, None)
# Strategy analyzes metrics (no visualization)
strategy.on_booktick(storage.book)
# Verify strategy has database path but no visualizer
assert strategy._db_path == db_path
assert not hasattr(strategy, 'visualizer') or strategy.visualizer is None
# Verify visualizer can access database
assert visualizer._db_path == db_path
# Verify visualizer can load metrics
metrics = visualizer._load_stored_metrics(1000, 1000)
assert len(metrics) == 1
# Test visualization update (should work independently)
with patch.object(visualizer, '_draw') as mock_draw:
visualizer.update_from_book(storage.book)
mock_draw.assert_called_once()
finally:
db_path.unlink(missing_ok=True)
def test_strategy_has_no_visualization_dependency():
"""Test that strategy no longer depends on visualization."""
strategy = DefaultStrategy("BTC-USDT")
# Strategy should not have visualizer attribute
assert not hasattr(strategy, 'visualizer') or strategy.visualizer is None
# Strategy should work without any visualization setup
from models import Book
book = Book()
# Should not raise any errors
strategy.on_booktick(book)

View File

@@ -0,0 +1,142 @@
"""Tests for MetricCalculator OBI calculation and best bid/ask extraction."""
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parents[1]))
from models import MetricCalculator, BookSnapshot, OrderbookLevel, Trade
def test_calculate_obi_normal_case():
"""Test OBI calculation with normal bid and ask volumes."""
# Create test snapshot with more bid volume than ask volume
snapshot = BookSnapshot(
id=1,
timestamp=1000,
bids={
50000.0: OrderbookLevel(price=50000.0, size=10.0, liquidation_count=0, order_count=1),
49999.0: OrderbookLevel(price=49999.0, size=5.0, liquidation_count=0, order_count=1),
},
asks={
50001.0: OrderbookLevel(price=50001.0, size=3.0, liquidation_count=0, order_count=1),
50002.0: OrderbookLevel(price=50002.0, size=2.0, liquidation_count=0, order_count=1),
},
)
# Total bid volume = 15.0, total ask volume = 5.0
# OBI = (15 - 5) / (15 + 5) = 10 / 20 = 0.5
obi = MetricCalculator.calculate_obi(snapshot)
assert obi == 0.5
def test_calculate_obi_zero_volume():
"""Test OBI calculation when there's no volume."""
snapshot = BookSnapshot(id=1, timestamp=1000, bids={}, asks={})
obi = MetricCalculator.calculate_obi(snapshot)
assert obi == 0.0
def test_calculate_obi_ask_heavy():
"""Test OBI calculation with more ask volume than bid volume."""
snapshot = BookSnapshot(
id=1,
timestamp=1000,
bids={
50000.0: OrderbookLevel(price=50000.0, size=2.0, liquidation_count=0, order_count=1),
},
asks={
50001.0: OrderbookLevel(price=50001.0, size=8.0, liquidation_count=0, order_count=1),
},
)
# Total bid volume = 2.0, total ask volume = 8.0
# OBI = (2 - 8) / (2 + 8) = -6 / 10 = -0.6
obi = MetricCalculator.calculate_obi(snapshot)
assert obi == -0.6
def test_get_best_bid_ask_normal():
"""Test best bid/ask extraction with normal orderbook."""
snapshot = BookSnapshot(
id=1,
timestamp=1000,
bids={
50000.0: OrderbookLevel(price=50000.0, size=1.0, liquidation_count=0, order_count=1),
49999.0: OrderbookLevel(price=49999.0, size=1.0, liquidation_count=0, order_count=1),
49998.0: OrderbookLevel(price=49998.0, size=1.0, liquidation_count=0, order_count=1),
},
asks={
50001.0: OrderbookLevel(price=50001.0, size=1.0, liquidation_count=0, order_count=1),
50002.0: OrderbookLevel(price=50002.0, size=1.0, liquidation_count=0, order_count=1),
50003.0: OrderbookLevel(price=50003.0, size=1.0, liquidation_count=0, order_count=1),
},
)
best_bid, best_ask = MetricCalculator.get_best_bid_ask(snapshot)
assert best_bid == 50000.0 # Highest bid price
assert best_ask == 50001.0 # Lowest ask price
def test_get_best_bid_ask_empty():
"""Test best bid/ask extraction with empty orderbook."""
snapshot = BookSnapshot(id=1, timestamp=1000, bids={}, asks={})
best_bid, best_ask = MetricCalculator.get_best_bid_ask(snapshot)
assert best_bid is None
assert best_ask is None
def test_calculate_volume_delta_buy_heavy():
"""Test volume delta calculation with more buy volume than sell volume."""
trades = [
Trade(id=1, trade_id=1.0, price=50000.0, size=10.0, side="buy", timestamp=1000),
Trade(id=2, trade_id=2.0, price=50001.0, size=5.0, side="buy", timestamp=1000),
Trade(id=3, trade_id=3.0, price=49999.0, size=3.0, side="sell", timestamp=1000),
]
# Buy volume = 15.0, Sell volume = 3.0
# Volume Delta = 15.0 - 3.0 = 12.0
vd = MetricCalculator.calculate_volume_delta(trades)
assert vd == 12.0
def test_calculate_volume_delta_sell_heavy():
"""Test volume delta calculation with more sell volume than buy volume."""
trades = [
Trade(id=1, trade_id=1.0, price=50000.0, size=2.0, side="buy", timestamp=1000),
Trade(id=2, trade_id=2.0, price=49999.0, size=8.0, side="sell", timestamp=1000),
]
# Buy volume = 2.0, Sell volume = 8.0
# Volume Delta = 2.0 - 8.0 = -6.0
vd = MetricCalculator.calculate_volume_delta(trades)
assert vd == -6.0
def test_calculate_volume_delta_no_trades():
"""Test volume delta calculation with no trades."""
trades = []
vd = MetricCalculator.calculate_volume_delta(trades)
assert vd == 0.0
def test_calculate_cvd_incremental():
"""Test incremental CVD calculation."""
# Start with zero CVD
cvd1 = MetricCalculator.calculate_cvd(0.0, 10.0)
assert cvd1 == 10.0
# Add more volume delta
cvd2 = MetricCalculator.calculate_cvd(cvd1, -5.0)
assert cvd2 == 5.0
# Continue accumulating
cvd3 = MetricCalculator.calculate_cvd(cvd2, 15.0)
assert cvd3 == 20.0
def test_calculate_cvd_reset_functionality():
"""Test CVD reset by starting from 0.0."""
# Simulate reset by passing 0.0 as previous CVD
cvd_after_reset = MetricCalculator.calculate_cvd(0.0, 25.0)
assert cvd_after_reset == 25.0

View File

@@ -0,0 +1,126 @@
"""Tests for SQLiteMetricsRepository table creation and schema validation."""
import sys
import sqlite3
import tempfile
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parents[1]))
from repositories.sqlite_metrics_repository import SQLiteMetricsRepository
from models import Metric
def test_create_metrics_table():
"""Test that metrics table is created with proper schema and indexes."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp_file:
db_path = Path(tmp_file.name)
try:
repo = SQLiteMetricsRepository(db_path)
with repo.connect() as conn:
# Create metrics table
repo.create_metrics_table(conn)
# Verify table exists
assert repo.table_exists(conn, "metrics")
# Verify table schema
cursor = conn.cursor()
cursor.execute("PRAGMA table_info(metrics)")
columns = cursor.fetchall()
# Check expected columns exist
column_names = [col[1] for col in columns]
expected_columns = ["id", "snapshot_id", "timestamp", "obi", "cvd", "best_bid", "best_ask"]
for col in expected_columns:
assert col in column_names, f"Column {col} missing from metrics table"
# Verify indexes exist
cursor.execute("PRAGMA index_list(metrics)")
indexes = cursor.fetchall()
index_names = [idx[1] for idx in indexes]
assert "idx_metrics_timestamp" in index_names
assert "idx_metrics_snapshot_id" in index_names
finally:
db_path.unlink(missing_ok=True)
def test_insert_metrics_batch():
"""Test batch insertion of metrics data."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp_file:
db_path = Path(tmp_file.name)
try:
repo = SQLiteMetricsRepository(db_path)
with repo.connect() as conn:
# Create metrics table
repo.create_metrics_table(conn)
# Create test metrics
metrics = [
Metric(snapshot_id=1, timestamp=1000, obi=0.5, cvd=100.0, best_bid=50000.0, best_ask=50001.0),
Metric(snapshot_id=2, timestamp=1001, obi=-0.2, cvd=150.0, best_bid=50002.0, best_ask=50003.0),
Metric(snapshot_id=3, timestamp=1002, obi=0.0, cvd=125.0), # No best_bid/ask
]
# Insert batch
repo.insert_metrics_batch(conn, metrics)
conn.commit()
# Verify insertion
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM metrics")
count = cursor.fetchone()[0]
assert count == 3
# Verify data integrity
cursor.execute("SELECT snapshot_id, timestamp, obi, cvd, best_bid, best_ask FROM metrics ORDER BY timestamp")
rows = cursor.fetchall()
assert rows[0] == (1, "1000", 0.5, 100.0, 50000.0, 50001.0)
assert rows[1] == (2, "1001", -0.2, 150.0, 50002.0, 50003.0)
assert rows[2] == (3, "1002", 0.0, 125.0, None, None)
finally:
db_path.unlink(missing_ok=True)
def test_load_metrics_by_timerange():
"""Test loading metrics within a timestamp range."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp_file:
db_path = Path(tmp_file.name)
try:
repo = SQLiteMetricsRepository(db_path)
with repo.connect() as conn:
# Create metrics table and insert test data
repo.create_metrics_table(conn)
metrics = [
Metric(snapshot_id=1, timestamp=1000, obi=0.1, cvd=10.0, best_bid=50000.0, best_ask=50001.0),
Metric(snapshot_id=2, timestamp=1005, obi=0.2, cvd=20.0, best_bid=50002.0, best_ask=50003.0),
Metric(snapshot_id=3, timestamp=1010, obi=0.3, cvd=30.0, best_bid=50004.0, best_ask=50005.0),
Metric(snapshot_id=4, timestamp=1015, obi=0.4, cvd=40.0, best_bid=50006.0, best_ask=50007.0),
]
repo.insert_metrics_batch(conn, metrics)
conn.commit()
# Test timerange query - should get middle 2 records
loaded_metrics = repo.load_metrics_by_timerange(conn, 1003, 1012)
assert len(loaded_metrics) == 2
assert loaded_metrics[0].timestamp == 1005
assert loaded_metrics[0].obi == 0.2
assert loaded_metrics[1].timestamp == 1010
assert loaded_metrics[1].obi == 0.3
# Test edge cases
assert len(repo.load_metrics_by_timerange(conn, 2000, 3000)) == 0 # No data
assert len(repo.load_metrics_by_timerange(conn, 1000, 1000)) == 1 # Single record
finally:
db_path.unlink(missing_ok=True)

View File

@@ -0,0 +1,17 @@
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parents[1]))
from parsers.orderbook_parser import OrderbookParser
def test_parse_side_malformed_text_does_not_raise():
parser = OrderbookParser(debug=False)
side = {}
# Malformed text that literal_eval cannot parse
bad_text = "[[100.0, 'missing tuple closing'"
# Should not raise; should simply log an error and leave side empty
parser.parse_side(bad_text, side)
assert side == {}

View File

@@ -0,0 +1,53 @@
import sys
from pathlib import Path
import sqlite3
sys.path.append(str(Path(__file__).resolve().parents[1]))
from repositories.sqlite_repository import SQLiteOrderflowRepository
def test_iterate_book_rows_batches(tmp_path):
db_path = tmp_path / "iter.db"
with sqlite3.connect(str(db_path)) as conn:
c = conn.cursor()
c.execute(
"""
CREATE TABLE book (
id INTEGER PRIMARY KEY,
bids TEXT NOT NULL,
asks TEXT NOT NULL,
timestamp INTEGER NOT NULL
)
"""
)
c.execute(
"""
CREATE TABLE trades (
id INTEGER PRIMARY KEY,
trade_id REAL NOT NULL,
price REAL NOT NULL,
size REAL NOT NULL,
side TEXT NOT NULL,
timestamp INTEGER NOT NULL
)
"""
)
# Insert 12 rows to ensure multiple fetchmany batches (repo uses 5000, but iteration still correct)
bids = str([(100.0, 1.0, 0, 1)])
asks = str([(101.0, 1.0, 0, 1)])
for i in range(12):
c.execute("INSERT INTO book (id, bids, asks, timestamp) VALUES (?, ?, ?, ?)", (i + 1, bids, asks, 1000 + i))
conn.commit()
repo = SQLiteOrderflowRepository(db_path)
with repo.connect() as conn:
rows = list(repo.iterate_book_rows(conn))
assert len(rows) == 12
# Ensure ordering by timestamp ascending
timestamps = [r[3] for r in rows]
assert timestamps == sorted(timestamps)
# count_rows allowlist should work
assert repo.count_rows(conn, "book") == 12

View File

@@ -0,0 +1,83 @@
from pathlib import Path
from datetime import datetime, timezone
import sqlite3
import sys
# Ensure project root is on sys.path for direct module imports
sys.path.append(str(Path(__file__).resolve().parents[1]))
from storage import Storage
def _init_db(path: Path) -> None:
with sqlite3.connect(str(path)) as conn:
c = conn.cursor()
c.execute(
"""
CREATE TABLE IF NOT EXISTS book (
id INTEGER PRIMARY KEY,
bids TEXT NOT NULL,
asks TEXT NOT NULL,
timestamp INTEGER NOT NULL
)
"""
)
c.execute(
"""
CREATE TABLE IF NOT EXISTS trades (
id INTEGER PRIMARY KEY,
trade_id REAL NOT NULL,
price REAL NOT NULL,
size REAL NOT NULL,
side TEXT NOT NULL,
timestamp INTEGER NOT NULL
)
"""
)
conn.commit()
def test_storage_builds_snapshots_and_attaches_trades(tmp_path):
db_path = tmp_path / "test.db"
_init_db(db_path)
ts = 1_725_000_000
# Insert one valid book row and one invalid (empty asks) that should be ignored
bids = str([(100.0, 1.0, 0, 1), (99.5, 2.0, 0, 1)])
asks = str([(100.5, 1.5, 0, 1), (101.0, 1.0, 0, 1)])
invalid_asks = str([])
with sqlite3.connect(str(db_path)) as conn:
c = conn.cursor()
c.execute("INSERT INTO book (id, bids, asks, timestamp) VALUES (?, ?, ?, ?)", (1, bids, asks, ts))
c.execute("INSERT INTO book (id, bids, asks, timestamp) VALUES (?, ?, ?, ?)", (2, bids, invalid_asks, ts + 1))
# Insert trades for ts
c.execute(
"INSERT INTO trades (id, trade_id, price, size, side, timestamp) VALUES (?, ?, ?, ?, ?, ?)",
(1, 1.0, 100.25, 0.5, "buy", ts),
)
c.execute(
"INSERT INTO trades (id, trade_id, price, size, side, timestamp) VALUES (?, ?, ?, ?, ?, ?)",
(2, 2.0, 100.75, 0.75, "sell", ts),
)
conn.commit()
storage = Storage("BTC-USDT")
db_date = datetime.fromtimestamp(ts, tz=timezone.utc)
storage.build_booktick_from_db(db_path, db_date)
# Only one snapshot should be included (the valid one with non-empty asks)
assert len(storage.book.snapshots) == 1
snap = storage.book.snapshots[0]
assert snap.timestamp == ts
assert len(snap.bids) == 2
assert len(snap.asks) == 2
# Trades should be attached for the same timestamp
assert len(snap.trades) == 2
sides = sorted(t.side for t in snap.trades)
assert sides == ["buy", "sell"]

View File

@@ -0,0 +1,88 @@
"""Tests for Storage metrics integration."""
import sys
import sqlite3
import tempfile
from pathlib import Path
from datetime import datetime
sys.path.append(str(Path(__file__).resolve().parents[1]))
from storage import Storage
from repositories.sqlite_metrics_repository import SQLiteMetricsRepository
def test_storage_calculates_and_stores_metrics():
"""Test that Storage calculates and stores metrics during build_booktick_from_db."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp_file:
db_path = Path(tmp_file.name)
try:
# Create test database with minimal data
with sqlite3.connect(str(db_path)) as conn:
# Create tables
conn.execute("""
CREATE TABLE book (
id INTEGER PRIMARY KEY,
bids TEXT NOT NULL,
asks TEXT NOT NULL,
timestamp INTEGER NOT NULL
)
""")
conn.execute("""
CREATE TABLE trades (
id INTEGER PRIMARY KEY,
trade_id REAL NOT NULL,
price REAL NOT NULL,
size REAL NOT NULL,
side TEXT NOT NULL,
timestamp INTEGER NOT NULL
)
""")
# Insert test data
bids = "[(50000.0, 10.0, 0, 1), (49999.0, 5.0, 0, 1)]" # 15.0 total bid volume
asks = "[(50001.0, 3.0, 0, 1), (50002.0, 2.0, 0, 1)]" # 5.0 total ask volume
conn.execute("INSERT INTO book (id, bids, asks, timestamp) VALUES (?, ?, ?, ?)",
(1, bids, asks, 1000))
# Add trades for CVD calculation
conn.execute("INSERT INTO trades (id, trade_id, price, size, side, timestamp) VALUES (?, ?, ?, ?, ?, ?)",
(1, 1.0, 50000.0, 8.0, "buy", 1000))
conn.execute("INSERT INTO trades (id, trade_id, price, size, side, timestamp) VALUES (?, ?, ?, ?, ?, ?)",
(2, 2.0, 50001.0, 3.0, "sell", 1000))
conn.commit()
# Test Storage metrics integration
storage = Storage("BTC-USDT")
storage.build_booktick_from_db(db_path, datetime.now())
# Verify metrics were calculated and stored
metrics_repo = SQLiteMetricsRepository(db_path)
with metrics_repo.connect() as conn:
# Check metrics table exists
assert metrics_repo.table_exists(conn, "metrics")
# Load calculated metrics
metrics = metrics_repo.load_metrics_by_timerange(conn, 1000, 1000)
assert len(metrics) == 1
metric = metrics[0]
# Verify OBI calculation: (15 - 5) / (15 + 5) = 0.5
assert abs(metric.obi - 0.5) < 0.001
# Verify CVD calculation: buy(8.0) - sell(3.0) = 5.0
assert abs(metric.cvd - 5.0) < 0.001
# Verify best bid/ask
assert metric.best_bid == 50000.0
assert metric.best_ask == 50001.0
# Verify book was also populated (backward compatibility)
assert len(storage.book.snapshots) == 1
finally:
db_path.unlink(missing_ok=True)

View File

@@ -0,0 +1,48 @@
import sys
from pathlib import Path
import pytest
# Ensure project root is on sys.path for direct module imports
sys.path.append(str(Path(__file__).resolve().parents[1]))
from storage import Storage, OrderbookLevel
def test_parse_orderbook_side_happy_path():
storage = Storage("BTC-USDT")
text = str([
(100.0, 1.5, 0, 2),
(101.0, 2.25, 1, 3),
])
side = {}
storage._parse_orderbook_side(text, side)
assert 100.0 in side and 101.0 in side
level_100 = side[100.0]
level_101 = side[101.0]
assert isinstance(level_100, OrderbookLevel)
assert level_100.price == 100.0
assert level_100.size == 1.5
assert level_100.liquidation_count == 0
assert level_100.order_count == 2
assert level_101.size == 2.25
assert level_101.liquidation_count == 1
assert level_101.order_count == 3
def test_parse_orderbook_side_ignores_zero_size():
storage = Storage("BTC-USDT")
text = str([
(100.0, 0.0, 0, 0),
(101.0, 1.0, 0, 1),
])
side = {}
storage._parse_orderbook_side(text, side)
assert 100.0 not in side
assert 101.0 in side

View File

@@ -0,0 +1,112 @@
"""Tests for DefaultStrategy metrics integration."""
import sys
import sqlite3
import tempfile
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parents[1]))
from strategies import DefaultStrategy
from models import Book, BookSnapshot, OrderbookLevel, Metric
from repositories.sqlite_metrics_repository import SQLiteMetricsRepository
def test_strategy_uses_metric_calculator():
"""Test that strategy uses MetricCalculator for OBI calculation."""
strategy = DefaultStrategy("BTC-USDT")
# Create test book with snapshots
book = Book()
snapshot = BookSnapshot(
id=1,
timestamp=1000,
bids={50000.0: OrderbookLevel(price=50000.0, size=10.0, liquidation_count=0, order_count=1)},
asks={50001.0: OrderbookLevel(price=50001.0, size=5.0, liquidation_count=0, order_count=1)},
)
book.add_snapshot(snapshot)
# Test OBI calculation
obi_values = strategy.compute_OBI(book)
assert len(obi_values) == 1
# OBI = (10 - 5) / (10 + 5) = 0.333...
assert abs(obi_values[0] - 0.333333) < 0.001
def test_strategy_loads_stored_metrics():
"""Test that strategy can load stored metrics from database."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp_file:
db_path = Path(tmp_file.name)
try:
# Create test database with metrics
metrics_repo = SQLiteMetricsRepository(db_path)
with metrics_repo.connect() as conn:
metrics_repo.create_metrics_table(conn)
# Insert test metrics
test_metrics = [
Metric(snapshot_id=1, timestamp=1000, obi=0.1, cvd=10.0, best_bid=50000.0, best_ask=50001.0),
Metric(snapshot_id=2, timestamp=1001, obi=0.2, cvd=15.0, best_bid=50002.0, best_ask=50003.0),
Metric(snapshot_id=3, timestamp=1002, obi=0.3, cvd=20.0, best_bid=50004.0, best_ask=50005.0),
]
metrics_repo.insert_metrics_batch(conn, test_metrics)
conn.commit()
# Test strategy loading
strategy = DefaultStrategy("BTC-USDT")
strategy.set_db_path(db_path)
loaded_metrics = strategy.load_stored_metrics(1000, 1002)
assert len(loaded_metrics) == 3
assert loaded_metrics[0].obi == 0.1
assert loaded_metrics[0].cvd == 10.0
assert loaded_metrics[-1].obi == 0.3
assert loaded_metrics[-1].cvd == 20.0
finally:
db_path.unlink(missing_ok=True)
def test_strategy_metrics_summary():
"""Test that strategy generates correct metrics summary."""
strategy = DefaultStrategy("BTC-USDT")
# Create test metrics
metrics = [
Metric(snapshot_id=1, timestamp=1000, obi=0.1, cvd=10.0),
Metric(snapshot_id=2, timestamp=1001, obi=-0.2, cvd=5.0),
Metric(snapshot_id=3, timestamp=1002, obi=0.3, cvd=15.0),
]
summary = strategy.get_metrics_summary(metrics)
assert summary["obi_min"] == -0.2
assert summary["obi_max"] == 0.3
assert abs(summary["obi_avg"] - 0.0667) < 0.001 # (0.1 + (-0.2) + 0.3) / 3
assert summary["cvd_start"] == 10.0
assert summary["cvd_end"] == 15.0
assert summary["cvd_change"] == 5.0 # 15.0 - 10.0
assert summary["total_snapshots"] == 3
def test_strategy_empty_metrics():
"""Test strategy behavior with empty metrics."""
strategy = DefaultStrategy("BTC-USDT")
# Test with empty book
book = Book()
obi_values = strategy.compute_OBI(book)
assert obi_values == []
# Test with empty metrics
summary = strategy.get_metrics_summary([])
assert summary == {}
# Test loading from non-existent database
strategy.set_db_path(Path("nonexistent.db"))
metrics = strategy.load_stored_metrics(1000, 2000)
assert metrics == []

View File

@@ -0,0 +1,112 @@
"""Tests for Visualizer metrics integration."""
import sys
import sqlite3
import tempfile
from pathlib import Path
from unittest.mock import patch
sys.path.append(str(Path(__file__).resolve().parents[1]))
from visualizer import Visualizer
from models import Book, BookSnapshot, OrderbookLevel, Metric
from repositories.sqlite_metrics_repository import SQLiteMetricsRepository
def test_visualizer_loads_metrics():
"""Test that visualizer can load stored metrics from database."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp_file:
db_path = Path(tmp_file.name)
try:
# Create test database with metrics
metrics_repo = SQLiteMetricsRepository(db_path)
with metrics_repo.connect() as conn:
metrics_repo.create_metrics_table(conn)
# Insert test metrics
test_metrics = [
Metric(snapshot_id=1, timestamp=1000, obi=0.1, cvd=10.0, best_bid=50000.0, best_ask=50001.0),
Metric(snapshot_id=2, timestamp=1060, obi=0.2, cvd=15.0, best_bid=50002.0, best_ask=50003.0),
Metric(snapshot_id=3, timestamp=1120, obi=-0.1, cvd=12.0, best_bid=50004.0, best_ask=50005.0),
]
metrics_repo.insert_metrics_batch(conn, test_metrics)
conn.commit()
# Test visualizer
visualizer = Visualizer(window_seconds=60, max_bars=200)
visualizer.set_db_path(db_path)
# Load metrics directly to test the method
loaded_metrics = visualizer._load_stored_metrics(1000, 1120)
assert len(loaded_metrics) == 3
assert loaded_metrics[0].obi == 0.1
assert loaded_metrics[0].cvd == 10.0
assert loaded_metrics[1].obi == 0.2
assert loaded_metrics[2].obi == -0.1
finally:
db_path.unlink(missing_ok=True)
def test_visualizer_handles_no_database():
"""Test that visualizer handles gracefully when no database path is set."""
visualizer = Visualizer(window_seconds=60, max_bars=200)
# No database path set - should return empty list
metrics = visualizer._load_stored_metrics(1000, 2000)
assert metrics == []
def test_visualizer_handles_invalid_database():
"""Test that visualizer handles invalid database paths gracefully."""
visualizer = Visualizer(window_seconds=60, max_bars=200)
visualizer.set_db_path(Path("nonexistent.db"))
# Should handle error gracefully and return empty list
metrics = visualizer._load_stored_metrics(1000, 2000)
assert metrics == []
@patch('matplotlib.pyplot.subplots')
def test_visualizer_creates_four_subplots(mock_subplots):
"""Test that visualizer creates four subplots for OHLC, Volume, OBI, and CVD."""
# Mock the subplots creation
mock_fig = type('MockFig', (), {})()
mock_ax_ohlc = type('MockAx', (), {})()
mock_ax_volume = type('MockAx', (), {})()
mock_ax_obi = type('MockAx', (), {})()
mock_ax_cvd = type('MockAx', (), {})()
mock_subplots.return_value = (mock_fig, (mock_ax_ohlc, mock_ax_volume, mock_ax_obi, mock_ax_cvd))
# Create visualizer
visualizer = Visualizer(window_seconds=60, max_bars=200)
# Verify subplots were created correctly
mock_subplots.assert_called_once_with(4, 1, figsize=(12, 10), sharex=True)
assert visualizer.ax_ohlc == mock_ax_ohlc
assert visualizer.ax_volume == mock_ax_volume
assert visualizer.ax_obi == mock_ax_obi
assert visualizer.ax_cvd == mock_ax_cvd
def test_visualizer_update_from_book_with_empty_book():
"""Test that visualizer handles empty book gracefully."""
with patch('matplotlib.pyplot.subplots') as mock_subplots:
# Mock the subplots creation
mock_fig = type('MockFig', (), {'canvas': type('MockCanvas', (), {'draw_idle': lambda: None})()})()
mock_axes = [type('MockAx', (), {'clear': lambda: None})() for _ in range(4)]
mock_subplots.return_value = (mock_fig, tuple(mock_axes))
visualizer = Visualizer(window_seconds=60, max_bars=200)
# Test with empty book
book = Book()
# Should handle gracefully without errors
with patch('logging.warning') as mock_warning:
visualizer.update_from_book(book)
mock_warning.assert_called_once_with("Book has no snapshots to visualize")