Add clean monitoring and production data collection scripts
- Introduced `monitor_clean.py` for monitoring database status with detailed logging and status updates. - Added `production_clean.py` for running OKX data collection with minimal console output and comprehensive logging. - Implemented command-line argument parsing for both scripts to customize monitoring intervals and collection durations. - Enhanced logging capabilities to provide clear insights into data collection and monitoring processes. - Updated documentation to include usage examples and descriptions for the new scripts, ensuring clarity for users.
This commit is contained in:
226
scripts/monitor_clean.py
Normal file
226
scripts/monitor_clean.py
Normal file
@@ -0,0 +1,226 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Clean Database Monitor
|
||||
|
||||
Provides clean status updates for production data collection
|
||||
with detailed logging to files.
|
||||
|
||||
Usage:
|
||||
python scripts/monitor_clean.py [--interval seconds]
|
||||
|
||||
Examples:
|
||||
# Check status once
|
||||
python scripts/monitor_clean.py
|
||||
|
||||
# Monitor every 60 seconds
|
||||
python scripts/monitor_clean.py --interval 60
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import argparse
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
# Set environment for clean output
|
||||
import os
|
||||
os.environ['DEBUG'] = 'false'
|
||||
|
||||
from database.connection import DatabaseManager
|
||||
from database.models import MarketData, RawTrade
|
||||
from sqlalchemy import func, desc
|
||||
from utils.logger import get_logger
|
||||
|
||||
class CleanMonitor:
|
||||
"""Clean database monitor for production use."""
|
||||
|
||||
def __init__(self):
|
||||
self.logger = get_logger("clean_monitor", verbose=False)
|
||||
self.db_manager = None
|
||||
|
||||
def connect(self) -> bool:
|
||||
"""Connect to database quietly."""
|
||||
try:
|
||||
self.db_manager = DatabaseManager()
|
||||
self.db_manager.initialize()
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Database connection failed: {e}")
|
||||
return False
|
||||
|
||||
def get_summary_stats(self) -> dict:
|
||||
"""Get essential statistics for console display."""
|
||||
try:
|
||||
with self.db_manager.get_session() as session:
|
||||
# Raw data count
|
||||
raw_count = session.query(func.count(RawTrade.id)).scalar()
|
||||
|
||||
# Candle count
|
||||
candle_count = session.query(func.count(MarketData.id)).scalar()
|
||||
|
||||
# Time range for raw data
|
||||
raw_oldest = session.query(func.min(RawTrade.timestamp)).scalar()
|
||||
raw_newest = session.query(func.max(RawTrade.timestamp)).scalar()
|
||||
|
||||
# Recent activity (last 5 minutes)
|
||||
from datetime import timedelta, timezone
|
||||
cutoff = datetime.now(timezone.utc) - timedelta(minutes=5)
|
||||
recent_raw = session.query(func.count(RawTrade.id)).filter(
|
||||
RawTrade.created_at >= cutoff
|
||||
).scalar()
|
||||
recent_candles = session.query(func.count(MarketData.id)).filter(
|
||||
MarketData.created_at >= cutoff
|
||||
).scalar()
|
||||
|
||||
# Timeframe breakdown
|
||||
timeframes = session.query(
|
||||
MarketData.timeframe,
|
||||
func.count(MarketData.id)
|
||||
).group_by(MarketData.timeframe).all()
|
||||
|
||||
# Latest prices
|
||||
latest_prices = {}
|
||||
for symbol in ['BTC-USDT', 'ETH-USDT']:
|
||||
latest = session.query(MarketData).filter(
|
||||
MarketData.symbol == symbol,
|
||||
MarketData.timeframe == '1m'
|
||||
).order_by(desc(MarketData.created_at)).first()
|
||||
|
||||
if latest:
|
||||
latest_prices[symbol] = {
|
||||
'price': float(latest.close),
|
||||
'time': latest.timestamp
|
||||
}
|
||||
|
||||
return {
|
||||
'raw_count': raw_count,
|
||||
'candle_count': candle_count,
|
||||
'raw_timespan': (raw_newest - raw_oldest).total_seconds() / 3600 if raw_oldest and raw_newest else 0,
|
||||
'recent_raw': recent_raw,
|
||||
'recent_candles': recent_candles,
|
||||
'timeframes': dict(timeframes),
|
||||
'latest_prices': latest_prices
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting stats: {e}")
|
||||
return {}
|
||||
|
||||
def print_status(self):
|
||||
"""Print clean status summary."""
|
||||
stats = self.get_summary_stats()
|
||||
if not stats:
|
||||
print("❌ Unable to get database statistics")
|
||||
return
|
||||
|
||||
print("\n" + "="*50)
|
||||
print(f"📊 DATA COLLECTION STATUS - {datetime.now().strftime('%H:%M:%S')}")
|
||||
print("="*50)
|
||||
|
||||
# Main metrics
|
||||
raw_count = stats.get('raw_count', 0)
|
||||
candle_count = stats.get('candle_count', 0)
|
||||
timespan = stats.get('raw_timespan', 0)
|
||||
|
||||
print(f"📈 Raw Data: {raw_count:,} entries ({timespan:.1f} hours)")
|
||||
|
||||
# Candle breakdown
|
||||
timeframes = stats.get('timeframes', {})
|
||||
if timeframes:
|
||||
tf_summary = ", ".join([f"{tf}:{count}" for tf, count in timeframes.items()])
|
||||
print(f"📊 Candles: {candle_count:,} total ({tf_summary})")
|
||||
else:
|
||||
print(f"📊 Candles: {candle_count:,} total")
|
||||
|
||||
# Recent activity
|
||||
recent_raw = stats.get('recent_raw', 0)
|
||||
recent_candles = stats.get('recent_candles', 0)
|
||||
print(f"🕐 Recent (5m): {recent_raw:,} raw, {recent_candles} candles")
|
||||
|
||||
# Latest prices
|
||||
latest_prices = stats.get('latest_prices', {})
|
||||
if latest_prices:
|
||||
print("💰 Latest Prices:")
|
||||
for symbol, data in latest_prices.items():
|
||||
price = data['price']
|
||||
time_str = data['time'].strftime('%H:%M:%S')
|
||||
print(f" {symbol}: ${price:,.2f} at {time_str}")
|
||||
|
||||
print("="*50)
|
||||
|
||||
def disconnect(self):
|
||||
"""Disconnect from database."""
|
||||
if self.db_manager:
|
||||
self.db_manager.close()
|
||||
|
||||
async def monitor_clean(interval: int = 0):
|
||||
"""Run clean monitoring."""
|
||||
|
||||
monitor = CleanMonitor()
|
||||
|
||||
try:
|
||||
if not monitor.connect():
|
||||
return False
|
||||
|
||||
if interval <= 0:
|
||||
# Single check
|
||||
monitor.print_status()
|
||||
return True
|
||||
|
||||
# Continuous monitoring
|
||||
print(f"📊 Monitoring every {interval} seconds (Ctrl+C to stop)")
|
||||
|
||||
while True:
|
||||
monitor.print_status()
|
||||
print(f"\n⏰ Next update in {interval} seconds...\n")
|
||||
await asyncio.sleep(interval)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n👋 Monitoring stopped")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Monitor error: {e}")
|
||||
return False
|
||||
finally:
|
||||
monitor.disconnect()
|
||||
|
||||
def main():
|
||||
"""Main entry point."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Clean Database Monitor",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Single status check
|
||||
python scripts/monitor_clean.py
|
||||
|
||||
# Monitor every minute
|
||||
python scripts/monitor_clean.py --interval 60
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--interval',
|
||||
type=int,
|
||||
default=0,
|
||||
help='Monitor interval in seconds (0 = single check, default: 0)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
success = asyncio.run(monitor_clean(args.interval))
|
||||
sys.exit(0 if success else 1)
|
||||
except KeyboardInterrupt:
|
||||
print("\n👋 Exiting...")
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
print(f"❌ Fatal error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
328
scripts/production_clean.py
Normal file
328
scripts/production_clean.py
Normal file
@@ -0,0 +1,328 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Clean Production OKX Data Collector
|
||||
|
||||
This script runs OKX data collection with minimal console output
|
||||
and comprehensive file logging for production use.
|
||||
|
||||
Usage:
|
||||
python scripts/production_clean.py [--hours duration]
|
||||
|
||||
Examples:
|
||||
# Run for 8 hours
|
||||
python scripts/production_clean.py --hours 8
|
||||
|
||||
# Run overnight (12 hours)
|
||||
python scripts/production_clean.py --hours 12
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
# Set environment variable to disable SQLAlchemy echo for clean production
|
||||
import os
|
||||
os.environ['DEBUG'] = 'false'
|
||||
|
||||
# Suppress SQLAlchemy verbose logging globally for production
|
||||
import logging
|
||||
logging.getLogger('sqlalchemy').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('sqlalchemy.engine').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('sqlalchemy.pool').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('sqlalchemy.dialects').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('sqlalchemy.orm').setLevel(logging.CRITICAL)
|
||||
|
||||
from data.exchanges.okx import OKXCollector
|
||||
from data.exchanges.okx.data_processor import OKXDataProcessor
|
||||
from data.collector_manager import CollectorManager
|
||||
from data.base_collector import DataType
|
||||
from data.common import CandleProcessingConfig
|
||||
from database.connection import init_database
|
||||
from utils.logger import get_logger
|
||||
|
||||
|
||||
class ProductionManager:
|
||||
"""Production manager for OKX data collection."""
|
||||
|
||||
def __init__(self, config_path: str = "config/okx_config.json"):
|
||||
self.config_path = config_path
|
||||
self.config = self._load_config()
|
||||
|
||||
# Configure clean logging - minimal console output, detailed file logs
|
||||
self.logger = get_logger("production_manager", verbose=False)
|
||||
|
||||
# Core components
|
||||
self.collector_manager = CollectorManager()
|
||||
self.collectors: List[OKXCollector] = []
|
||||
|
||||
# Runtime state
|
||||
self.running = False
|
||||
self.start_time = None
|
||||
self.statistics = {
|
||||
'collectors_created': 0,
|
||||
'uptime_seconds': 0
|
||||
}
|
||||
|
||||
self.logger.info(f"🚀 Production Manager initialized")
|
||||
self.logger.info(f"📁 Config: {config_path}")
|
||||
|
||||
def _load_config(self) -> dict:
|
||||
"""Load configuration from JSON file."""
|
||||
try:
|
||||
with open(self.config_path, 'r') as f:
|
||||
config = json.load(f)
|
||||
return config
|
||||
except Exception as e:
|
||||
print(f"❌ Failed to load config from {self.config_path}: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
async def create_collectors(self) -> bool:
|
||||
"""Create collectors for all enabled trading pairs."""
|
||||
try:
|
||||
enabled_pairs = [
|
||||
pair for pair in self.config['trading_pairs']
|
||||
if pair.get('enabled', True)
|
||||
]
|
||||
|
||||
self.logger.info(f"🎯 Creating collectors for {len(enabled_pairs)} trading pairs...")
|
||||
|
||||
for pair_config in enabled_pairs:
|
||||
symbol = pair_config['symbol']
|
||||
data_types = [DataType(dt) for dt in pair_config.get('data_types', ['trade'])]
|
||||
|
||||
self.logger.info(f"📈 Creating collector for {symbol} with data types: {[dt.value for dt in data_types]}")
|
||||
|
||||
# Create custom candle processing config for 1m and 5m timeframes
|
||||
# Note: 1s timeframes are not supported by the aggregation framework
|
||||
candle_config = CandleProcessingConfig(
|
||||
timeframes=['1m', '5m'],
|
||||
emit_incomplete_candles=False, # Only complete candles
|
||||
auto_save_candles=True
|
||||
)
|
||||
|
||||
# Create custom data processor with 1m/5m timeframes
|
||||
data_processor = OKXDataProcessor(
|
||||
symbol=symbol,
|
||||
config=candle_config,
|
||||
component_name=f"okx_processor_{symbol.replace('-', '_').lower()}"
|
||||
)
|
||||
|
||||
# Create OKX collector with custom processor
|
||||
collector = OKXCollector(
|
||||
symbol=symbol,
|
||||
data_types=data_types,
|
||||
component_name=f"okx_collector_{symbol.replace('-', '_').lower()}",
|
||||
auto_restart=self.config.get('data_collection', {}).get('auto_restart', True),
|
||||
health_check_interval=self.config.get('data_collection', {}).get('health_check_interval', 30.0),
|
||||
store_raw_data=self.config.get('data_collection', {}).get('store_raw_data', True)
|
||||
)
|
||||
|
||||
# Replace the default data processor with our custom one
|
||||
collector._data_processor = data_processor
|
||||
|
||||
# Add callbacks for processed data
|
||||
data_processor.add_trade_callback(collector._on_trade_processed)
|
||||
data_processor.add_candle_callback(collector._on_candle_processed)
|
||||
|
||||
# Add to manager
|
||||
self.collector_manager.add_collector(collector)
|
||||
self.collectors.append(collector)
|
||||
self.statistics['collectors_created'] += 1
|
||||
|
||||
self.logger.info(f"✅ Collector created for {symbol} with 1m/5m timeframes")
|
||||
|
||||
self.logger.info(f"🎉 All {len(self.collectors)} collectors created successfully")
|
||||
self.logger.info(f"📊 Collectors configured with 1m and 5m aggregation timeframes")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"❌ Failed to create collectors: {e}")
|
||||
return False
|
||||
|
||||
async def start(self) -> bool:
|
||||
"""Start all collectors and begin data collection."""
|
||||
try:
|
||||
self.start_time = time.time()
|
||||
self.running = True
|
||||
|
||||
self.logger.info("🚀 Starting production data collection...")
|
||||
|
||||
# Initialize global database managers
|
||||
self.logger.info("📊 Initializing database...")
|
||||
init_database()
|
||||
self.logger.info("✅ Database initialized successfully")
|
||||
|
||||
# Start collector manager
|
||||
success = await self.collector_manager.start()
|
||||
if not success:
|
||||
self.logger.error("❌ Failed to start collector manager")
|
||||
return False
|
||||
|
||||
self.logger.info("✅ All collectors started successfully")
|
||||
self.logger.info("📊 Data collection is now active with built-in processing")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"❌ Failed to start collectors: {e}")
|
||||
return False
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop all collectors gracefully."""
|
||||
try:
|
||||
self.logger.info("🛑 Stopping production data collection...")
|
||||
self.running = False
|
||||
|
||||
# Stop collector manager
|
||||
await self.collector_manager.stop()
|
||||
|
||||
self.logger.info("✅ All collectors stopped gracefully")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"❌ Error during shutdown: {e}")
|
||||
|
||||
|
||||
async def run_clean_production(duration_hours: float = 8.0):
|
||||
"""Run production collector with clean output."""
|
||||
|
||||
duration_seconds = int(duration_hours * 3600)
|
||||
|
||||
# Global state for signal handling
|
||||
shutdown_event = asyncio.Event()
|
||||
manager = None
|
||||
|
||||
def signal_handler(signum, frame):
|
||||
print(f"\n📡 Shutdown signal received, stopping gracefully...")
|
||||
shutdown_event.set()
|
||||
|
||||
# Set up signal handlers
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
# Header
|
||||
print("🚀 OKX PRODUCTION DATA COLLECTOR")
|
||||
print("="*50)
|
||||
print(f"⏱️ Duration: {duration_hours} hours")
|
||||
print(f"📊 Timeframes: 1m and 5m candles")
|
||||
print(f"💾 Database: Raw trades + aggregated candles")
|
||||
print(f"📝 Logs: logs/ directory")
|
||||
print("="*50)
|
||||
|
||||
# Create manager
|
||||
print("🎯 Initializing collector...")
|
||||
manager = ProductionManager("config/okx_config.json")
|
||||
|
||||
# Create collectors
|
||||
if not await manager.create_collectors():
|
||||
print("❌ Failed to create collectors")
|
||||
return False
|
||||
|
||||
# Start data collection
|
||||
print("🚀 Starting data collection...")
|
||||
if not await manager.start():
|
||||
print("❌ Failed to start data collection")
|
||||
return False
|
||||
|
||||
# Running status
|
||||
start_time = time.time()
|
||||
print("✅ Data collection active!")
|
||||
print(f"📈 Collecting: {len(manager.collectors)} trading pairs")
|
||||
print(f"📊 Monitor: python scripts/monitor_clean.py")
|
||||
print("-" * 50)
|
||||
|
||||
# Main monitoring loop
|
||||
last_update = time.time()
|
||||
update_interval = 600 # Update every 10 minutes
|
||||
|
||||
while not shutdown_event.is_set():
|
||||
# Wait for shutdown or timeout
|
||||
try:
|
||||
await asyncio.wait_for(shutdown_event.wait(), timeout=1.0)
|
||||
break
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
|
||||
# Check duration
|
||||
current_time = time.time()
|
||||
if current_time - start_time >= duration_seconds:
|
||||
print(f"⏰ Completed {duration_hours} hour run")
|
||||
break
|
||||
|
||||
# Periodic status update
|
||||
if current_time - last_update >= update_interval:
|
||||
elapsed_hours = (current_time - start_time) / 3600
|
||||
remaining_hours = duration_hours - elapsed_hours
|
||||
print(f"⏱️ Runtime: {elapsed_hours:.1f}h | Remaining: {remaining_hours:.1f}h")
|
||||
last_update = current_time
|
||||
|
||||
# Final summary
|
||||
total_runtime = (time.time() - start_time) / 3600
|
||||
print(f"\n📊 COLLECTION COMPLETE")
|
||||
print(f"⏱️ Total runtime: {total_runtime:.2f} hours")
|
||||
print(f"📈 Collectors: {len(manager.collectors)} active")
|
||||
print(f"📋 View results: python scripts/monitor_clean.py")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
return False
|
||||
|
||||
finally:
|
||||
if manager:
|
||||
print("🛑 Stopping collectors...")
|
||||
await manager.stop()
|
||||
print("✅ Shutdown complete")
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Clean Production OKX Data Collector",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Run for 8 hours
|
||||
python scripts/production_clean.py --hours 8
|
||||
|
||||
# Run overnight (12 hours)
|
||||
python scripts/production_clean.py --hours 12
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--hours',
|
||||
type=float,
|
||||
default=8.0,
|
||||
help='Collection duration in hours (default: 8.0)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.hours <= 0:
|
||||
print("❌ Duration must be positive")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
success = asyncio.run(run_clean_production(args.hours))
|
||||
sys.exit(0 if success else 1)
|
||||
except KeyboardInterrupt:
|
||||
print("\n👋 Interrupted by user")
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
print(f"❌ Fatal error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user