This commit is contained in:
Dobromir Popov
2025-09-08 13:41:22 +03:00
parent c9fba56622
commit 56e857435c
5 changed files with 27 additions and 295 deletions

View File

@@ -1,98 +0,0 @@
#!/usr/bin/env python3
"""
Immediate Model Cleanup Script
This script will clean up all existing model files and prepare the system
for fresh training with the new model management system.
"""
import logging
import sys
from model_manager import ModelManager
def main():
"""Run the model cleanup"""
# Configure logging for better output
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
print("=" * 60)
print("GOGO2 MODEL CLEANUP SYSTEM")
print("=" * 60)
print()
print("This script will:")
print("1. Delete ALL existing model files (.pt, .pth)")
print("2. Remove ALL checkpoint directories")
print("3. Clear model backup directories")
print("4. Reset the model registry")
print("5. Create clean directory structure")
print()
print("WARNING: This action cannot be undone!")
print()
# Calculate current space usage first
try:
manager = ModelManager()
storage_stats = manager.get_storage_stats()
print(f"Current storage usage:")
print(f"- Models: {storage_stats['total_models']}")
print(f"- Size: {storage_stats['actual_size_mb']:.1f}MB")
print()
except Exception as e:
print(f"Error checking current storage: {e}")
print()
# Ask for confirmation
print("Type 'CLEANUP' to proceed with the cleanup:")
user_input = input("> ").strip()
if user_input != "CLEANUP":
print("Cleanup cancelled. No changes made.")
return
print()
print("Starting cleanup...")
print("-" * 40)
try:
# Create manager and run cleanup
manager = ModelManager()
cleanup_result = manager.cleanup_all_existing_models(confirm=True)
print()
print("=" * 60)
print("CLEANUP COMPLETE")
print("=" * 60)
print(f"Files deleted: {cleanup_result['deleted_files']}")
print(f"Space freed: {cleanup_result['freed_space_mb']:.1f} MB")
print(f"Directories cleaned: {len(cleanup_result['deleted_directories'])}")
if cleanup_result['errors']:
print(f"Errors encountered: {len(cleanup_result['errors'])}")
print("Errors:")
for error in cleanup_result['errors'][:5]: # Show first 5 errors
print(f" - {error}")
if len(cleanup_result['errors']) > 5:
print(f" ... and {len(cleanup_result['errors']) - 5} more")
print()
print("System is now ready for fresh model training!")
print("The following directories have been created:")
print("- models/best_models/")
print("- models/cnn/")
print("- models/rl/")
print("- models/checkpoints/")
print("- NN/models/saved/")
print()
print("New models will be automatically managed by the ModelManager.")
except Exception as e:
print(f"Error during cleanup: {e}")
logging.exception("Cleanup failed")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,71 +0,0 @@
#!/usr/bin/env python3
"""
Data Stream Status Checker
This script provides better information about the data stream status
when the dashboard is running.
"""
import requests
import json
import time
from datetime import datetime
def check_dashboard_status():
"""Check if dashboard is running and get basic status"""
try:
response = requests.get('http://127.0.0.1:8050', timeout=3)
if response.status_code == 200:
return True, "Dashboard is running"
else:
return False, f"Dashboard responded with status {response.status_code}"
except requests.exceptions.ConnectionError:
return False, "Dashboard not running (connection refused)"
except Exception as e:
return False, f"Error checking dashboard: {e}"
def main():
print("🔍 Data Stream Status Check")
print("=" * 50)
# Check if dashboard is running
dashboard_running, dashboard_msg = check_dashboard_status()
if dashboard_running:
print("✅ Dashboard Status: RUNNING")
print(f" URL: http://127.0.0.1:8050")
print(f" Message: {dashboard_msg}")
print()
print("📊 Data Stream Information:")
print(" The data stream monitor is running inside the dashboard process.")
print(" You should see data stream output in the dashboard console.")
print()
print("🔧 How to Access Data Stream:")
print(" 1. Check the dashboard console output for data stream samples")
print(" 2. The dashboard automatically starts data streaming")
print(" 3. Data is being collected and displayed in real-time")
print()
print("📝 Expected Console Output (in dashboard terminal):")
print(" =================================================")
print(" DATA STREAM SAMPLE - 16:10:30")
print(" =================================================")
print(" OHLCV (1m): ETH/USDT | O:4335.67 H:4338.92 L:4334.21 C:4336.67 V:125.8")
print(" TICK: ETH/USDT | Price:4336.67 Vol:0.0456 Side:buy")
print(" MODEL: DQN | Conf:0.78 Pred:BUY Loss:0.0234")
print(" =================================================")
print()
print("💡 Note: The data_stream_control.py script cannot access the")
print(" dashboard's data stream due to process isolation.")
print(" The data stream is active and working within the dashboard.")
else:
print("❌ Dashboard Status: NOT RUNNING")
print(f" Error: {dashboard_msg}")
print()
print("🔧 To start the dashboard:")
print(" python run_clean_dashboard.py")
print()
print(" Then check this status again.")
if __name__ == "__main__":
main()

View File

@@ -70,70 +70,11 @@ def test_trading_statistics():
logger.info(f" Avg losing trade: ${daily_stats.get('avg_losing_trade', 0.0):.2f}")
logger.info(f" Total P&L: ${daily_stats.get('total_pnl', 0.0):.2f}")
# Simulate some trades if we don't have any
# If no trades, we can't test calculations
if daily_stats.get('total_trades', 0) == 0:
logger.info("3. No trades found - simulating some test trades...")
# Add some mock trades to the trade history
from core.trading_executor import TradeRecord
from datetime import datetime
# Add a winning trade
winning_trade = TradeRecord(
symbol='ETH/USDT',
side='LONG',
quantity=0.01,
entry_price=2500.0,
exit_price=2550.0,
entry_time=datetime.now(),
exit_time=datetime.now(),
pnl=0.50, # $0.50 profit
fees=0.01,
confidence=0.8
)
trading_executor.trade_history.append(winning_trade)
# Add a losing trade
losing_trade = TradeRecord(
symbol='ETH/USDT',
side='LONG',
quantity=0.01,
entry_price=2500.0,
exit_price=2480.0,
entry_time=datetime.now(),
exit_time=datetime.now(),
pnl=-0.20, # $0.20 loss
fees=0.01,
confidence=0.7
)
trading_executor.trade_history.append(losing_trade)
# Get updated stats
daily_stats = trading_executor.get_daily_stats()
logger.info(" Updated statistics after adding test trades:")
logger.info(f" Total trades: {daily_stats.get('total_trades', 0)}")
logger.info(f" Winning trades: {daily_stats.get('winning_trades', 0)}")
logger.info(f" Losing trades: {daily_stats.get('losing_trades', 0)}")
logger.info(f" Win rate: {daily_stats.get('win_rate', 0.0) * 100:.1f}%")
logger.info(f" Avg winning trade: ${daily_stats.get('avg_winning_trade', 0.0):.2f}")
logger.info(f" Avg losing trade: ${daily_stats.get('avg_losing_trade', 0.0):.2f}")
logger.info(f" Total P&L: ${daily_stats.get('total_pnl', 0.0):.2f}")
# Verify calculations
expected_win_rate = 1/2 # 1 win out of 2 trades = 50%
expected_avg_win = 0.50
expected_avg_loss = -0.20
actual_win_rate = daily_stats.get('win_rate', 0.0)
actual_avg_win = daily_stats.get('avg_winning_trade', 0.0)
actual_avg_loss = daily_stats.get('avg_losing_trade', 0.0)
logger.info("4. Verifying calculations:")
logger.info(f" Win rate: Expected {expected_win_rate*100:.1f}%, Got {actual_win_rate*100:.1f}% ✅" if abs(actual_win_rate - expected_win_rate) < 0.01 else f" Win rate: Expected {expected_win_rate*100:.1f}%, Got {actual_win_rate*100:.1f}% ❌")
logger.info(f" Avg win: Expected ${expected_avg_win:.2f}, Got ${actual_avg_win:.2f}" if abs(actual_avg_win - expected_avg_win) < 0.01 else f" Avg win: Expected ${expected_avg_win:.2f}, Got ${actual_avg_win:.2f}")
logger.info(f" Avg loss: Expected ${expected_avg_loss:.2f}, Got ${actual_avg_loss:.2f}" if abs(actual_avg_loss - expected_avg_loss) < 0.01 else f" Avg loss: Expected ${expected_avg_loss:.2f}, Got ${actual_avg_loss:.2f}")
return True
logger.info("3. No trades found - cannot test calculations without real trading data")
logger.info(" Run the system and execute some real trades to test statistics")
return False
return True

View File

@@ -84,52 +84,10 @@ def test_win_rate_calculation():
trading_executor = TradingExecutor()
# Clear existing trades
trading_executor.trade_history = []
# Add test trades with meaningful P&L
logger.info("1. Adding test trades with meaningful P&L:")
# Add 3 winning trades
for i in range(3):
winning_trade = TradeRecord(
symbol='ETH/USDT',
side='LONG',
quantity=1.0,
entry_price=2500.0,
exit_price=2550.0,
entry_time=datetime.now(),
exit_time=datetime.now(),
pnl=50.0, # $50 profit with leverage
fees=1.0,
confidence=0.8,
hold_time_seconds=30.0 # 30 second hold
)
trading_executor.trade_history.append(winning_trade)
logger.info(f" Added winning trade #{i+1}: +$50.00 (30s hold)")
# Add 2 losing trades
for i in range(2):
losing_trade = TradeRecord(
symbol='ETH/USDT',
side='LONG',
quantity=1.0,
entry_price=2500.0,
exit_price=2475.0,
entry_time=datetime.now(),
exit_time=datetime.now(),
pnl=-25.0, # $25 loss with leverage
fees=1.0,
confidence=0.7,
hold_time_seconds=15.0 # 15 second hold
)
trading_executor.trade_history.append(losing_trade)
logger.info(f" Added losing trade #{i+1}: -$25.00 (15s hold)")
# Get statistics
# Get statistics from existing trades
stats = trading_executor.get_daily_stats()
logger.info("2. Calculated statistics:")
logger.info("1. Current trading statistics:")
logger.info(f" Total trades: {stats['total_trades']}")
logger.info(f" Winning trades: {stats['winning_trades']}")
logger.info(f" Losing trades: {stats['losing_trades']}")
@@ -137,21 +95,23 @@ def test_win_rate_calculation():
logger.info(f" Avg winning trade: ${stats['avg_winning_trade']:.2f}")
logger.info(f" Avg losing trade: ${stats['avg_losing_trade']:.2f}")
logger.info(f" Total P&L: ${stats['total_pnl']:.2f}")
# Verify calculations
expected_win_rate = 3/5 # 3 wins out of 5 trades = 60%
expected_avg_win = 50.0
expected_avg_loss = -25.0
logger.info("3. Verification:")
win_rate_ok = abs(stats['win_rate'] - expected_win_rate) < 0.01
avg_win_ok = abs(stats['avg_winning_trade'] - expected_avg_win) < 0.01
avg_loss_ok = abs(stats['avg_losing_trade'] - expected_avg_loss) < 0.01
logger.info(f" Win rate: Expected {expected_win_rate*100:.1f}%, Got {stats['win_rate']*100:.1f}% {'' if win_rate_ok else ''}")
logger.info(f" Avg win: Expected ${expected_avg_win:.2f}, Got ${stats['avg_winning_trade']:.2f} {'' if avg_win_ok else ''}")
logger.info(f" Avg loss: Expected ${expected_avg_loss:.2f}, Got ${stats['avg_losing_trade']:.2f} {'' if avg_loss_ok else ''}")
# If no trades, we can't verify calculations
if stats['total_trades'] == 0:
logger.info("2. No trades found - cannot verify calculations")
logger.info(" Run the system and execute real trades to test statistics")
return False
# Basic sanity checks on existing data
logger.info("2. Basic validation:")
win_rate_ok = 0.0 <= stats['win_rate'] <= 1.0
avg_win_ok = stats['avg_winning_trade'] >= 0 if stats['winning_trades'] > 0 else True
avg_loss_ok = stats['avg_losing_trade'] <= 0 if stats['losing_trades'] > 0 else True
logger.info(f" Win rate in valid range [0,1]: {'' if win_rate_ok else ''}")
logger.info(f" Avg win is positive when winning trades exist: {'' if avg_win_ok else ''}")
logger.info(f" Avg loss is negative when losing trades exist: {'' if avg_loss_ok else ''}")
return win_rate_ok and avg_win_ok and avg_loss_ok
def test_new_features():

View File

@@ -9,6 +9,6 @@ Start-Process powershell -ArgumentList "-Command python run_tensorboard.py" -Win
Write-Host "Starting TensorBoard... Please wait" -ForegroundColor Yellow
Start-Sleep -Seconds 5
# Start the live trading demo in the current window
Write-Host "Starting Live Trading Demo with mock data..." -ForegroundColor Green
python run_live_demo.py --symbol ETH/USDT --timeframe 1m --model models/trading_agent_best_pnl.pt --mock
# Start the live trading system in the current window
Write-Host "Starting Live Trading System..." -ForegroundColor Green
python main_clean.py --port 8051