Enhance logging capabilities across data collection components

- Added optional logger parameter to various classes including `BaseDataCollector`, `CollectorManager`, `RealTimeCandleProcessor`, and `BatchCandleProcessor` to support conditional logging.
- Implemented error-only logging mode, allowing components to log only error and critical messages when specified.
- Updated logging calls to utilize new helper methods for improved readability and maintainability.
- Enhanced documentation to include details on the new logging system and its usage across components.
- Ensured that child components inherit the logger from their parent components for consistent logging behavior.
This commit is contained in:
Vasily.onl
2025-06-01 14:42:29 +08:00
parent 0697be75da
commit bc13cfcbe0
11 changed files with 1179 additions and 350 deletions

View File

@@ -58,11 +58,11 @@ class ProductionManager:
self.config_path = config_path
self.config = self._load_config()
# Configure clean logging - minimal console output, detailed file logs
# Configure clean logging - minimal console output, error-only file logs
self.logger = get_logger("production_manager", verbose=False)
# Core components
self.collector_manager = CollectorManager()
# Core components with error-only logging
self.collector_manager = CollectorManager(logger=self.logger, log_errors_only=True)
self.collectors: List[OKXCollector] = []
# Runtime state
@@ -73,7 +73,7 @@ class ProductionManager:
'uptime_seconds': 0
}
self.logger.info(f"🚀 Production Manager initialized")
self.logger.info(f"🚀 Production Manager initialized with error-only logging")
self.logger.info(f"📁 Config: {config_path}")
def _load_config(self) -> dict:
@@ -110,21 +110,24 @@ class ProductionManager:
auto_save_candles=True
)
# Create custom data processor with 1m/5m timeframes
# Create custom data processor with error-only logging
data_processor = OKXDataProcessor(
symbol=symbol,
config=candle_config,
component_name=f"okx_processor_{symbol.replace('-', '_').lower()}"
component_name=f"okx_processor_{symbol.replace('-', '_').lower()}",
logger=self.logger
)
# Create OKX collector with custom processor
# Create OKX collector with error-only logging
collector = OKXCollector(
symbol=symbol,
data_types=data_types,
component_name=f"okx_collector_{symbol.replace('-', '_').lower()}",
auto_restart=self.config.get('data_collection', {}).get('auto_restart', True),
health_check_interval=self.config.get('data_collection', {}).get('health_check_interval', 30.0),
store_raw_data=self.config.get('data_collection', {}).get('store_raw_data', True)
store_raw_data=self.config.get('data_collection', {}).get('store_raw_data', True),
logger=self.logger,
log_errors_only=True
)
# Replace the default data processor with our custom one
@@ -139,9 +142,9 @@ class ProductionManager:
self.collectors.append(collector)
self.statistics['collectors_created'] += 1
self.logger.info(f"✅ Collector created for {symbol} with 1m/5m timeframes")
self.logger.info(f"✅ Collector created for {symbol} with 1m/5m timeframes and error-only logging")
self.logger.info(f"🎉 All {len(self.collectors)} collectors created successfully")
self.logger.info(f"🎉 All {len(self.collectors)} collectors created successfully with error-only logging")
self.logger.info(f"📊 Collectors configured with 1m and 5m aggregation timeframes")
return True
@@ -191,11 +194,9 @@ class ProductionManager:
self.logger.error(f"❌ Error during shutdown: {e}")
async def run_clean_production(duration_hours: float = 8.0):
async def run_clean_production(duration_hours: Optional[float] = None):
"""Run production collector with clean output."""
duration_seconds = int(duration_hours * 3600)
# Global state for signal handling
shutdown_event = asyncio.Event()
manager = None
@@ -212,7 +213,10 @@ async def run_clean_production(duration_hours: float = 8.0):
# Header
print("🚀 OKX PRODUCTION DATA COLLECTOR")
print("="*50)
print(f"⏱️ Duration: {duration_hours} hours")
if duration_hours:
print(f"⏱️ Duration: {duration_hours} hours")
else:
print(f"⏱️ Duration: Indefinite (until stopped)")
print(f"📊 Timeframes: 1m and 5m candles")
print(f"💾 Database: Raw trades + aggregated candles")
print(f"📝 Logs: logs/ directory")
@@ -238,6 +242,8 @@ async def run_clean_production(duration_hours: float = 8.0):
print("✅ Data collection active!")
print(f"📈 Collecting: {len(manager.collectors)} trading pairs")
print(f"📊 Monitor: python scripts/monitor_clean.py")
if not duration_hours:
print("⏹️ Stop: Ctrl+C")
print("-" * 50)
# Main monitoring loop
@@ -252,17 +258,22 @@ async def run_clean_production(duration_hours: float = 8.0):
except asyncio.TimeoutError:
pass
# Check duration
# Check duration if specified
current_time = time.time()
if current_time - start_time >= duration_seconds:
print(f"⏰ Completed {duration_hours} hour run")
break
if duration_hours:
duration_seconds = int(duration_hours * 3600)
if current_time - start_time >= duration_seconds:
print(f"⏰ Completed {duration_hours} hour run")
break
# Periodic status update
if current_time - last_update >= update_interval:
elapsed_hours = (current_time - start_time) / 3600
remaining_hours = duration_hours - elapsed_hours
print(f"⏱️ Runtime: {elapsed_hours:.1f}h | Remaining: {remaining_hours:.1f}h")
if duration_hours:
remaining_hours = duration_hours - elapsed_hours
print(f"⏱️ Runtime: {elapsed_hours:.1f}h | Remaining: {remaining_hours:.1f}h")
else:
print(f"⏱️ Runtime: {elapsed_hours:.1f}h | Mode: Continuous")
last_update = current_time
# Final summary
@@ -292,6 +303,9 @@ def main():
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Run indefinitely (until stopped with Ctrl+C)
python scripts/production_clean.py
# Run for 8 hours
python scripts/production_clean.py --hours 8
@@ -303,13 +317,13 @@ Examples:
parser.add_argument(
'--hours',
type=float,
default=8.0,
help='Collection duration in hours (default: 8.0)'
default=None,
help='Collection duration in hours (default: indefinite until stopped manually)'
)
args = parser.parse_args()
if args.hours <= 0:
if args.hours is not None and args.hours <= 0:
print("❌ Duration must be positive")
sys.exit(1)