Refactor
This commit is contained in:
parent
1cdfe3973a
commit
837c505828
0
cycles/__init__.py
Normal file
0
cycles/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
0
cycles/utils/__init__.py
Normal file
0
cycles/utils/__init__.py
Normal file
128
cycles/utils/gsheets.py
Normal file
128
cycles/utils/gsheets.py
Normal file
@ -0,0 +1,128 @@
|
||||
import threading
|
||||
import time
|
||||
import queue
|
||||
from google.oauth2.service_account import Credentials
|
||||
import gspread
|
||||
import math
|
||||
import numpy as np
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
class GSheetBatchPusher(threading.Thread):
|
||||
|
||||
def __init__(self, queue, timestamp, spreadsheet_name, interval=60, logging=None):
|
||||
super().__init__(daemon=True)
|
||||
self.queue = queue
|
||||
self.timestamp = timestamp
|
||||
self.spreadsheet_name = spreadsheet_name
|
||||
self.interval = interval
|
||||
self._stop_event = threading.Event()
|
||||
self.logging = logging
|
||||
|
||||
def run(self):
|
||||
while not self._stop_event.is_set():
|
||||
self.push_all()
|
||||
time.sleep(self.interval)
|
||||
# Final push on stop
|
||||
self.push_all()
|
||||
|
||||
def stop(self):
|
||||
self._stop_event.set()
|
||||
|
||||
def push_all(self):
|
||||
batch_results = []
|
||||
batch_trades = []
|
||||
while True:
|
||||
try:
|
||||
results, trades = self.queue.get_nowait()
|
||||
batch_results.extend(results)
|
||||
batch_trades.extend(trades)
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
if batch_results or batch_trades:
|
||||
self.write_results_per_combination_gsheet(batch_results, batch_trades, self.timestamp, self.spreadsheet_name)
|
||||
|
||||
|
||||
def write_results_per_combination_gsheet(self, results_rows, trade_rows, timestamp, spreadsheet_name="GlimBit Backtest Results"):
|
||||
scopes = [
|
||||
"https://www.googleapis.com/auth/spreadsheets",
|
||||
"https://www.googleapis.com/auth/drive"
|
||||
]
|
||||
creds = Credentials.from_service_account_file('credentials/service_account.json', scopes=scopes)
|
||||
gc = gspread.authorize(creds)
|
||||
sh = gc.open(spreadsheet_name)
|
||||
|
||||
try:
|
||||
worksheet = sh.worksheet("Results")
|
||||
except gspread.exceptions.WorksheetNotFound:
|
||||
worksheet = sh.add_worksheet(title="Results", rows="1000", cols="20")
|
||||
|
||||
# Clear the worksheet before writing new results
|
||||
worksheet.clear()
|
||||
|
||||
# Updated fieldnames to match your data rows
|
||||
fieldnames = [
|
||||
"timeframe", "stop_loss_pct", "n_trades", "n_stop_loss", "win_rate",
|
||||
"max_drawdown", "avg_trade", "profit_ratio", "initial_usd", "final_usd"
|
||||
]
|
||||
|
||||
def to_native(val):
|
||||
if isinstance(val, (np.generic, np.ndarray)):
|
||||
val = val.item()
|
||||
if hasattr(val, 'isoformat'):
|
||||
return val.isoformat()
|
||||
# Handle inf, -inf, nan
|
||||
if isinstance(val, float):
|
||||
if math.isinf(val):
|
||||
return "∞" if val > 0 else "-∞"
|
||||
if math.isnan(val):
|
||||
return ""
|
||||
return val
|
||||
|
||||
# Write header if sheet is empty
|
||||
if len(worksheet.get_all_values()) == 0:
|
||||
worksheet.append_row(fieldnames)
|
||||
|
||||
for row in results_rows:
|
||||
values = [to_native(row.get(field, "")) for field in fieldnames]
|
||||
worksheet.append_row(values)
|
||||
|
||||
trades_fieldnames = [
|
||||
"entry_time", "exit_time", "entry_price", "exit_price", "profit_pct", "type"
|
||||
]
|
||||
trades_by_combo = defaultdict(list)
|
||||
|
||||
for trade in trade_rows:
|
||||
tf = trade.get("timeframe")
|
||||
sl = trade.get("stop_loss_pct")
|
||||
trades_by_combo[(tf, sl)].append(trade)
|
||||
|
||||
for (tf, sl), trades in trades_by_combo.items():
|
||||
sl_percent = int(round(sl * 100))
|
||||
sheet_name = f"Trades_{tf}_ST{sl_percent}%"
|
||||
|
||||
try:
|
||||
trades_ws = sh.worksheet(sheet_name)
|
||||
except gspread.exceptions.WorksheetNotFound:
|
||||
trades_ws = sh.add_worksheet(title=sheet_name, rows="1000", cols="20")
|
||||
|
||||
# Clear the trades worksheet before writing new trades
|
||||
trades_ws.clear()
|
||||
|
||||
if len(trades_ws.get_all_values()) == 0:
|
||||
trades_ws.append_row(trades_fieldnames)
|
||||
|
||||
for trade in trades:
|
||||
trade_row = [to_native(trade.get(field, "")) for field in trades_fieldnames]
|
||||
try:
|
||||
trades_ws.append_row(trade_row)
|
||||
except gspread.exceptions.APIError as e:
|
||||
if '429' in str(e):
|
||||
if self.logging is not None:
|
||||
self.logging.warning(f"Google Sheets API quota exceeded (429). Please wait one minute. Will retry on next batch push. Sheet: {sheet_name}")
|
||||
# Re-queue the failed batch for retry
|
||||
self.queue.put((results_rows, trade_rows))
|
||||
return # Stop pushing for this batch, will retry next interval
|
||||
else:
|
||||
raise
|
||||
155
cycles/utils/storage.py
Normal file
155
cycles/utils/storage.py
Normal file
@ -0,0 +1,155 @@
|
||||
import os
|
||||
import json
|
||||
import pandas as pd
|
||||
import csv
|
||||
from collections import defaultdict
|
||||
|
||||
RESULTS_DIR = "results"
|
||||
DATA_DIR = "data"
|
||||
|
||||
class Storage:
|
||||
|
||||
"""Storage class for storing and loading results and data"""
|
||||
def __init__(self, logging=None, results_dir=RESULTS_DIR, data_dir=DATA_DIR):
|
||||
|
||||
self.results_dir = results_dir
|
||||
self.data_dir = data_dir
|
||||
self.logging = logging
|
||||
|
||||
# Create directories if they don't exist
|
||||
os.makedirs(self.results_dir, exist_ok=True)
|
||||
os.makedirs(self.data_dir, exist_ok=True)
|
||||
|
||||
def load_data(self, file_path, start_date, stop_date):
|
||||
"""Load data with optimized dtypes and filtering, supporting CSV and JSON input
|
||||
Args:
|
||||
file_path: path to the data file
|
||||
start_date: start date
|
||||
stop_date: stop date
|
||||
Returns:
|
||||
pandas DataFrame
|
||||
"""
|
||||
# Determine file type
|
||||
_, ext = os.path.splitext(file_path)
|
||||
ext = ext.lower()
|
||||
try:
|
||||
if ext == ".json":
|
||||
with open(os.path.join(self.data_dir, file_path), 'r') as f:
|
||||
raw = json.load(f)
|
||||
data = pd.DataFrame(raw["Data"])
|
||||
# Convert columns to lowercase
|
||||
data.columns = data.columns.str.lower()
|
||||
# Convert timestamp to datetime
|
||||
data["timestamp"] = pd.to_datetime(data["timestamp"], unit="s")
|
||||
# Filter by date range
|
||||
data = data[(data["timestamp"] >= start_date) & (data["timestamp"] <= stop_date)]
|
||||
if self.logging is not None:
|
||||
self.logging.info(f"Data loaded from {file_path} for date range {start_date} to {stop_date}")
|
||||
return data.set_index("timestamp")
|
||||
else:
|
||||
# Define optimized dtypes
|
||||
dtypes = {
|
||||
'Open': 'float32',
|
||||
'High': 'float32',
|
||||
'Low': 'float32',
|
||||
'Close': 'float32',
|
||||
'Volume': 'float32'
|
||||
}
|
||||
# Read data with original capitalized column names
|
||||
data = pd.read_csv(os.path.join(self.data_dir, file_path), dtype=dtypes)
|
||||
# Convert timestamp to datetime
|
||||
data['Timestamp'] = pd.to_datetime(data['Timestamp'], unit='s')
|
||||
# Filter by date range
|
||||
data = data[(data['Timestamp'] >= start_date) & (data['Timestamp'] <= stop_date)]
|
||||
# Now convert column names to lowercase
|
||||
data.columns = data.columns.str.lower()
|
||||
if self.logging is not None:
|
||||
self.logging.info(f"Data loaded from {file_path} for date range {start_date} to {stop_date}")
|
||||
return data.set_index('timestamp')
|
||||
except Exception as e:
|
||||
if self.logging is not None:
|
||||
self.logging.error(f"Error loading data from {file_path}: {e}")
|
||||
return None
|
||||
|
||||
def format_row(self, row):
|
||||
"""Format a row for a combined results CSV file
|
||||
Args:
|
||||
row: row to format
|
||||
Returns:
|
||||
formatted row
|
||||
"""
|
||||
|
||||
return {
|
||||
"timeframe": row["timeframe"],
|
||||
"stop_loss_pct": f"{row['stop_loss_pct']*100:.2f}%",
|
||||
"n_trades": row["n_trades"],
|
||||
"n_stop_loss": row["n_stop_loss"],
|
||||
"win_rate": f"{row['win_rate']*100:.2f}%",
|
||||
"max_drawdown": f"{row['max_drawdown']*100:.2f}%",
|
||||
"avg_trade": f"{row['avg_trade']*100:.2f}%",
|
||||
"profit_ratio": f"{row['profit_ratio']*100:.2f}%",
|
||||
"final_usd": f"{row['final_usd']:.2f}",
|
||||
}
|
||||
|
||||
def write_results_chunk(self, filename, fieldnames, rows, write_header=False, initial_usd=None):
|
||||
"""Write a chunk of results to a CSV file
|
||||
Args:
|
||||
filename: filename to write to
|
||||
fieldnames: list of fieldnames
|
||||
rows: list of rows
|
||||
write_header: whether to write the header
|
||||
initial_usd: initial USD
|
||||
"""
|
||||
mode = 'w' if write_header else 'a'
|
||||
|
||||
with open(filename, mode, newline="") as csvfile:
|
||||
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
||||
if write_header:
|
||||
csvfile.write(f"# initial_usd: {initial_usd}\n")
|
||||
writer.writeheader()
|
||||
|
||||
for row in rows:
|
||||
# Only keep keys that are in fieldnames
|
||||
filtered_row = {k: v for k, v in row.items() if k in fieldnames}
|
||||
writer.writerow(filtered_row)
|
||||
|
||||
def write_results_combined(self, filename, fieldnames, rows):
|
||||
"""Write a combined results to a CSV file
|
||||
Args:
|
||||
filename: filename to write to
|
||||
fieldnames: list of fieldnames
|
||||
rows: list of rows
|
||||
"""
|
||||
fname = os.path.join(self.results_dir, filename)
|
||||
with open(fname, "w", newline="") as csvfile:
|
||||
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter='\t')
|
||||
writer.writeheader()
|
||||
for row in rows:
|
||||
writer.writerow(self.format_row(row))
|
||||
if self.logging is not None:
|
||||
self.logging.info(f"Combined results written to {fname}")
|
||||
|
||||
def write_trades(self, all_trade_rows, trades_fieldnames):
|
||||
"""Write trades to a CSV file
|
||||
Args:
|
||||
all_trade_rows: list of trade rows
|
||||
trades_fieldnames: list of trade fieldnames
|
||||
logging: logging object
|
||||
"""
|
||||
|
||||
trades_by_combo = defaultdict(list)
|
||||
for trade in all_trade_rows:
|
||||
tf = trade.get("timeframe")
|
||||
sl = trade.get("stop_loss_pct")
|
||||
trades_by_combo[(tf, sl)].append(trade)
|
||||
|
||||
for (tf, sl), trades in trades_by_combo.items():
|
||||
sl_percent = int(round(sl * 100))
|
||||
trades_filename = os.path.join(self.results_dir, f"trades_{tf}_ST{sl_percent}pct.csv")
|
||||
with open(trades_filename, "w", newline="") as csvfile:
|
||||
writer = csv.DictWriter(csvfile, fieldnames=trades_fieldnames)
|
||||
writer.writeheader()
|
||||
for trade in trades:
|
||||
writer.writerow({k: trade.get(k, "") for k in trades_fieldnames})
|
||||
if self.logging is not None:
|
||||
self.logging.info(f"Trades written to {trades_filename}")
|
||||
19
cycles/utils/system.py
Normal file
19
cycles/utils/system.py
Normal file
@ -0,0 +1,19 @@
|
||||
import os
|
||||
import psutil
|
||||
|
||||
class SystemUtils:
|
||||
|
||||
def __init__(self, logging=None):
|
||||
self.logging = logging
|
||||
|
||||
def get_optimal_workers(self):
|
||||
"""Determine optimal number of worker processes based on system resources"""
|
||||
cpu_count = os.cpu_count() or 4
|
||||
memory_gb = psutil.virtual_memory().total / (1024**3)
|
||||
# Heuristic: Use 75% of cores, but cap based on available memory
|
||||
# Assume each worker needs ~2GB for large datasets
|
||||
workers_by_memory = max(1, int(memory_gb / 2))
|
||||
workers_by_cpu = max(1, int(cpu_count * 0.75))
|
||||
if self.logging is not None:
|
||||
self.logging.info(f"Using {min(workers_by_cpu, workers_by_memory)} workers for processing")
|
||||
return min(workers_by_cpu, workers_by_memory)
|
||||
264
main.py
264
main.py
@ -1,21 +1,16 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from trend_detector_simple import TrendDetectorSimple
|
||||
import csv
|
||||
import logging
|
||||
import concurrent.futures
|
||||
import os
|
||||
import psutil
|
||||
import datetime
|
||||
import gspread
|
||||
from google.oauth2.service_account import Credentials
|
||||
from collections import defaultdict
|
||||
import threading
|
||||
import queue
|
||||
import time
|
||||
import math
|
||||
import json
|
||||
from taxes import Taxes
|
||||
|
||||
from cycles.trend_detector_simple import TrendDetectorSimple
|
||||
from cycles.taxes import Taxes
|
||||
from cycles.utils.storage import Storage
|
||||
from cycles.utils.gsheets import GSheetBatchPusher
|
||||
from cycles.utils.system import SystemUtils
|
||||
|
||||
# Set up logging
|
||||
logging.basicConfig(
|
||||
@ -30,85 +25,6 @@ logging.basicConfig(
|
||||
# Global queue for batching Google Sheets updates
|
||||
results_queue = queue.Queue()
|
||||
|
||||
# Background thread function to push updates every minute
|
||||
class GSheetBatchPusher(threading.Thread):
|
||||
def __init__(self, queue, timestamp, spreadsheet_name, interval=60):
|
||||
super().__init__(daemon=True)
|
||||
self.queue = queue
|
||||
self.timestamp = timestamp
|
||||
self.spreadsheet_name = spreadsheet_name
|
||||
self.interval = interval
|
||||
self._stop_event = threading.Event()
|
||||
|
||||
def run(self):
|
||||
while not self._stop_event.is_set():
|
||||
self.push_all()
|
||||
time.sleep(self.interval)
|
||||
# Final push on stop
|
||||
self.push_all()
|
||||
|
||||
def stop(self):
|
||||
self._stop_event.set()
|
||||
|
||||
def push_all(self):
|
||||
batch_results = []
|
||||
batch_trades = []
|
||||
while True:
|
||||
try:
|
||||
results, trades = self.queue.get_nowait()
|
||||
batch_results.extend(results)
|
||||
batch_trades.extend(trades)
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
if batch_results or batch_trades:
|
||||
write_results_per_combination_gsheet(batch_results, batch_trades, self.timestamp, self.spreadsheet_name)
|
||||
|
||||
def get_optimal_workers():
|
||||
"""Determine optimal number of worker processes based on system resources"""
|
||||
cpu_count = os.cpu_count() or 4
|
||||
memory_gb = psutil.virtual_memory().total / (1024**3)
|
||||
# Heuristic: Use 75% of cores, but cap based on available memory
|
||||
# Assume each worker needs ~2GB for large datasets
|
||||
workers_by_memory = max(1, int(memory_gb / 2))
|
||||
workers_by_cpu = max(1, int(cpu_count * 0.75))
|
||||
return min(workers_by_cpu, workers_by_memory)
|
||||
|
||||
def load_data(file_path, start_date, stop_date):
|
||||
"""Load data with optimized dtypes and filtering, supporting CSV and JSON input"""
|
||||
# Determine file type
|
||||
_, ext = os.path.splitext(file_path)
|
||||
ext = ext.lower()
|
||||
if ext == ".json":
|
||||
with open(file_path, 'r') as f:
|
||||
raw = json.load(f)
|
||||
data = pd.DataFrame(raw["Data"])
|
||||
# Convert columns to lowercase
|
||||
data.columns = data.columns.str.lower()
|
||||
# Convert timestamp to datetime
|
||||
data["timestamp"] = pd.to_datetime(data["timestamp"], unit="s")
|
||||
# Filter by date range
|
||||
data = data[(data["timestamp"] >= start_date) & (data["timestamp"] <= stop_date)]
|
||||
return data.set_index("timestamp")
|
||||
else:
|
||||
# Define optimized dtypes
|
||||
dtypes = {
|
||||
'Open': 'float32',
|
||||
'High': 'float32',
|
||||
'Low': 'float32',
|
||||
'Close': 'float32',
|
||||
'Volume': 'float32'
|
||||
}
|
||||
# Read data with original capitalized column names
|
||||
data = pd.read_csv(file_path, dtype=dtypes)
|
||||
# Convert timestamp to datetime
|
||||
data['Timestamp'] = pd.to_datetime(data['Timestamp'], unit='s')
|
||||
# Filter by date range
|
||||
data = data[(data['Timestamp'] >= start_date) & (data['Timestamp'] <= stop_date)]
|
||||
# Now convert column names to lowercase
|
||||
data.columns = data.columns.str.lower()
|
||||
return data.set_index('timestamp')
|
||||
|
||||
def process_timeframe_data(min1_df, df, stop_loss_pcts, rule_name, initial_usd, debug=False):
|
||||
"""Process the entire timeframe with all stop loss values (no monthly split)"""
|
||||
df = df.copy().reset_index(drop=True)
|
||||
@ -199,21 +115,6 @@ def process_timeframe(timeframe_info, debug=False):
|
||||
results_rows, all_trade_rows = process_timeframe_data(data_1min, df, [stop_loss_pct], rule, initial_usd, debug=debug)
|
||||
return results_rows, all_trade_rows
|
||||
|
||||
def write_results_chunk(filename, fieldnames, rows, write_header=False):
|
||||
"""Write a chunk of results to a CSV file"""
|
||||
mode = 'w' if write_header else 'a'
|
||||
|
||||
with open(filename, mode, newline="") as csvfile:
|
||||
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
||||
if write_header:
|
||||
csvfile.write(f"# initial_usd: {initial_usd}\n")
|
||||
writer.writeheader()
|
||||
|
||||
for row in rows:
|
||||
# Only keep keys that are in fieldnames
|
||||
filtered_row = {k: v for k, v in row.items() if k in fieldnames}
|
||||
writer.writerow(filtered_row)
|
||||
|
||||
def aggregate_results(all_rows):
|
||||
"""Aggregate results per stop_loss_pct and per rule (timeframe)"""
|
||||
from collections import defaultdict
|
||||
@ -250,87 +151,14 @@ def aggregate_results(all_rows):
|
||||
})
|
||||
return summary_rows
|
||||
|
||||
def write_results_per_combination_gsheet(results_rows, trade_rows, timestamp, spreadsheet_name="GlimBit Backtest Results"):
|
||||
scopes = [
|
||||
"https://www.googleapis.com/auth/spreadsheets",
|
||||
"https://www.googleapis.com/auth/drive"
|
||||
]
|
||||
creds = Credentials.from_service_account_file('credentials/service_account.json', scopes=scopes)
|
||||
gc = gspread.authorize(creds)
|
||||
sh = gc.open(spreadsheet_name)
|
||||
|
||||
try:
|
||||
worksheet = sh.worksheet("Results")
|
||||
except gspread.exceptions.WorksheetNotFound:
|
||||
worksheet = sh.add_worksheet(title="Results", rows="1000", cols="20")
|
||||
|
||||
# Clear the worksheet before writing new results
|
||||
worksheet.clear()
|
||||
|
||||
# Updated fieldnames to match your data rows
|
||||
fieldnames = [
|
||||
"timeframe", "stop_loss_pct", "n_trades", "n_stop_loss", "win_rate",
|
||||
"max_drawdown", "avg_trade", "profit_ratio", "initial_usd", "final_usd"
|
||||
]
|
||||
|
||||
def to_native(val):
|
||||
if isinstance(val, (np.generic, np.ndarray)):
|
||||
val = val.item()
|
||||
if hasattr(val, 'isoformat'):
|
||||
return val.isoformat()
|
||||
# Handle inf, -inf, nan
|
||||
if isinstance(val, float):
|
||||
if math.isinf(val):
|
||||
return "∞" if val > 0 else "-∞"
|
||||
if math.isnan(val):
|
||||
return ""
|
||||
return val
|
||||
|
||||
# Write header if sheet is empty
|
||||
if len(worksheet.get_all_values()) == 0:
|
||||
worksheet.append_row(fieldnames)
|
||||
|
||||
for row in results_rows:
|
||||
values = [to_native(row.get(field, "")) for field in fieldnames]
|
||||
worksheet.append_row(values)
|
||||
|
||||
trades_fieldnames = [
|
||||
"entry_time", "exit_time", "entry_price", "exit_price", "profit_pct", "type"
|
||||
]
|
||||
trades_by_combo = defaultdict(list)
|
||||
|
||||
for trade in trade_rows:
|
||||
tf = trade.get("timeframe")
|
||||
sl = trade.get("stop_loss_pct")
|
||||
trades_by_combo[(tf, sl)].append(trade)
|
||||
|
||||
for (tf, sl), trades in trades_by_combo.items():
|
||||
sl_percent = int(round(sl * 100))
|
||||
sheet_name = f"Trades_{tf}_ST{sl_percent}%"
|
||||
|
||||
try:
|
||||
trades_ws = sh.worksheet(sheet_name)
|
||||
except gspread.exceptions.WorksheetNotFound:
|
||||
trades_ws = sh.add_worksheet(title=sheet_name, rows="1000", cols="20")
|
||||
|
||||
# Clear the trades worksheet before writing new trades
|
||||
trades_ws.clear()
|
||||
|
||||
if len(trades_ws.get_all_values()) == 0:
|
||||
trades_ws.append_row(trades_fieldnames)
|
||||
|
||||
for trade in trades:
|
||||
trade_row = [to_native(trade.get(field, "")) for field in trades_fieldnames]
|
||||
try:
|
||||
trades_ws.append_row(trade_row)
|
||||
except gspread.exceptions.APIError as e:
|
||||
if '429' in str(e):
|
||||
logging.warning(f"Google Sheets API quota exceeded (429). Please wait one minute. Will retry on next batch push. Sheet: {sheet_name}")
|
||||
# Re-queue the failed batch for retry
|
||||
results_queue.put((results_rows, trade_rows))
|
||||
return # Stop pushing for this batch, will retry next interval
|
||||
else:
|
||||
raise
|
||||
def get_nearest_price(df, target_date):
|
||||
if len(df) == 0:
|
||||
return None, None
|
||||
target_ts = pd.to_datetime(target_date)
|
||||
nearest_idx = df.index.get_indexer([target_ts], method='nearest')[0]
|
||||
nearest_time = df.index[nearest_idx]
|
||||
price = df.iloc[nearest_idx]['close']
|
||||
return nearest_time, price
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Configuration
|
||||
@ -341,24 +169,16 @@ if __name__ == "__main__":
|
||||
initial_usd = 10000
|
||||
debug = False
|
||||
|
||||
results_dir = "results"
|
||||
os.makedirs(results_dir, exist_ok=True)
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M")
|
||||
|
||||
timeframes = ["1min", "5min"]
|
||||
storage = Storage(logging=logging)
|
||||
system_utils = SystemUtils(logging=logging)
|
||||
|
||||
timeframes = ["1D"]
|
||||
stop_loss_pcts = [0.01, 0.02, 0.03]
|
||||
|
||||
# Load data once
|
||||
data_1min = load_data('./data/btcusd_1-min_data.csv', start_date, stop_date)
|
||||
|
||||
def get_nearest_price(df, target_date):
|
||||
if len(df) == 0:
|
||||
return None, None
|
||||
target_ts = pd.to_datetime(target_date)
|
||||
nearest_idx = df.index.get_indexer([target_ts], method='nearest')[0]
|
||||
nearest_time = df.index[nearest_idx]
|
||||
price = df.iloc[nearest_idx]['close']
|
||||
return nearest_time, price
|
||||
data_1min = storage.load_data('btcusd_1-min_data.csv', start_date, stop_date)
|
||||
|
||||
nearest_start_time, start_price = get_nearest_price(data_1min, start_date)
|
||||
nearest_stop_time, stop_price = get_nearest_price(data_1min, stop_date)
|
||||
@ -372,8 +192,7 @@ if __name__ == "__main__":
|
||||
for stop_loss_pct in stop_loss_pcts
|
||||
]
|
||||
|
||||
workers = get_optimal_workers()
|
||||
logging.info(f"Using {workers} workers for processing")
|
||||
workers = system_utils.get_optimal_workers()
|
||||
|
||||
# Start the background batch pusher
|
||||
# spreadsheet_name = "GlimBit Backtest Results"
|
||||
@ -403,33 +222,12 @@ if __name__ == "__main__":
|
||||
# batch_pusher.push_all()
|
||||
|
||||
# Write all results to a single CSV file
|
||||
combined_filename = os.path.join(results_dir, f"{timestamp}_backtest_combined.csv")
|
||||
combined_filename = os.path.join(f"{timestamp}_backtest_combined.csv")
|
||||
combined_fieldnames = [
|
||||
"timeframe", "stop_loss_pct", "n_trades", "n_stop_loss", "win_rate",
|
||||
"max_drawdown", "avg_trade", "profit_ratio", "final_usd"
|
||||
]
|
||||
|
||||
def format_row(row):
|
||||
# Format percentages and floats as in your example
|
||||
return {
|
||||
"timeframe": row["timeframe"],
|
||||
"stop_loss_pct": f"{row['stop_loss_pct']*100:.2f}%",
|
||||
"n_trades": row["n_trades"],
|
||||
"n_stop_loss": row["n_stop_loss"],
|
||||
"win_rate": f"{row['win_rate']*100:.2f}%",
|
||||
"max_drawdown": f"{row['max_drawdown']*100:.2f}%",
|
||||
"avg_trade": f"{row['avg_trade']*100:.2f}%",
|
||||
"profit_ratio": f"{row['profit_ratio']*100:.2f}%",
|
||||
"final_usd": f"{row['final_usd']:.2f}",
|
||||
}
|
||||
|
||||
with open(combined_filename, "w", newline="") as csvfile:
|
||||
writer = csv.DictWriter(csvfile, fieldnames=combined_fieldnames, delimiter='\t')
|
||||
writer.writeheader()
|
||||
for row in all_results_rows:
|
||||
writer.writerow(format_row(row))
|
||||
|
||||
logging.info(f"Combined results written to {combined_filename}")
|
||||
storage.write_results_combined(combined_filename, combined_fieldnames, all_results_rows)
|
||||
|
||||
# --- Add taxes to combined results CSV ---
|
||||
# taxes = Taxes() # Default 20% tax rate
|
||||
@ -457,25 +255,11 @@ if __name__ == "__main__":
|
||||
# --- END: Collect all trades from each task ---
|
||||
|
||||
# Now, group all_trade_rows by (timeframe, stop_loss_pct)
|
||||
from collections import defaultdict
|
||||
trades_by_combo = defaultdict(list)
|
||||
for trade in all_trade_rows:
|
||||
tf = trade.get("timeframe")
|
||||
sl = trade.get("stop_loss_pct")
|
||||
trades_by_combo[(tf, sl)].append(trade)
|
||||
|
||||
|
||||
trades_fieldnames = [
|
||||
"entry_time", "exit_time", "entry_price", "exit_price", "profit_pct", "type"
|
||||
]
|
||||
|
||||
for (tf, sl), trades in trades_by_combo.items():
|
||||
sl_percent = int(round(sl * 100))
|
||||
trades_filename = os.path.join(results_dir, f"trades_{tf}_ST{sl_percent}pct.csv")
|
||||
with open(trades_filename, "w", newline="") as csvfile:
|
||||
writer = csv.DictWriter(csvfile, fieldnames=trades_fieldnames)
|
||||
writer.writeheader()
|
||||
for trade in trades:
|
||||
writer.writerow({k: trade.get(k, "") for k in trades_fieldnames})
|
||||
logging.info(f"Trades written to {trades_filename}")
|
||||
storage.write_trades(all_trade_rows, trades_fieldnames)
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user