LR module possibly working

This commit is contained in:
Dobromir Popov
2025-05-28 23:42:06 +03:00
parent de01d3665c
commit 6b7d7aec81
16 changed files with 5118 additions and 580 deletions

View File

@ -60,6 +60,10 @@ except ImportError:
'models': {}
}
def get_models_by_type(self, model_type: str):
"""Get models by type - fallback implementation returns empty dict"""
return {}
def register_model(self, model, weight=1.0):
return True
@ -305,7 +309,8 @@ class TradingDashboard:
], className="row g-2 mb-3"),
# Bottom row - Session performance and system status
html.Div([
html.Div([
# Session performance - 1/3 width
html.Div([
html.Div([
@ -313,10 +318,16 @@ class TradingDashboard:
html.I(className="fas fa-chart-pie me-2"),
"Session Performance"
], className="card-title mb-2"),
html.Button(
"Clear Session",
id="clear-history-btn",
className="btn btn-sm btn-outline-danger mb-2",
n_clicks=0
),
html.Div(id="session-performance")
], className="card-body p-2")
], className="card", style={"width": "32%"}),
# Closed Trades History - 1/3 width
html.Div([
html.Div([
@ -325,12 +336,6 @@ class TradingDashboard:
"Closed Trades History"
], className="card-title mb-2"),
html.Div([
html.Button(
"Clear History",
id="clear-history-btn",
className="btn btn-sm btn-outline-danger mb-2",
n_clicks=0
),
html.Div(
id="closed-trades-table",
style={"height": "300px", "overflowY": "auto"}

View File

@ -34,6 +34,7 @@ from core.config import get_config
from core.data_provider import DataProvider, MarketTick
from core.enhanced_orchestrator import EnhancedTradingOrchestrator, TradingAction
from core.trading_executor import TradingExecutor, Position, TradeRecord
from core.unified_data_stream import UnifiedDataStream, TrainingDataPacket, UIDataPacket
logger = logging.getLogger(__name__)
@ -242,339 +243,220 @@ class RealTimeScalpingDashboard:
"""Real-time scalping dashboard with WebSocket streaming and ultra-low latency"""
def __init__(self, data_provider: DataProvider = None, orchestrator: EnhancedTradingOrchestrator = None, trading_executor: TradingExecutor = None):
"""Initialize the real-time dashboard with WebSocket streaming and MEXC integration"""
"""Initialize the real-time scalping dashboard with unified data stream"""
self.config = get_config()
self.data_provider = data_provider or DataProvider()
self.orchestrator = orchestrator or EnhancedTradingOrchestrator(self.data_provider)
self.trading_executor = trading_executor or TradingExecutor()
self.orchestrator = orchestrator
self.trading_executor = trading_executor
# Verify universal data format compliance
logger.info("UNIVERSAL DATA FORMAT VERIFICATION:")
logger.info("Required 5 timeseries streams:")
logger.info(" 1. ETH/USDT ticks (1s)")
logger.info(" 2. ETH/USDT 1m")
logger.info(" 3. ETH/USDT 1h")
logger.info(" 4. ETH/USDT 1d")
logger.info(" 5. BTC/USDT ticks (reference)")
# Preload 300s of data for better initial performance
logger.info("PRELOADING 300s OF DATA FOR INITIAL PERFORMANCE:")
preload_results = self.data_provider.preload_all_symbols_data(['1s', '1m', '5m', '15m', '1h', '1d'])
# Log preload results
for symbol, timeframe_results in preload_results.items():
for timeframe, success in timeframe_results.items():
status = "OK" if success else "FAIL"
logger.info(f" {status} {symbol} {timeframe}")
# Test universal data adapter
try:
universal_stream = self.orchestrator.universal_adapter.get_universal_data_stream()
if universal_stream:
is_valid, issues = self.orchestrator.universal_adapter.validate_universal_format(universal_stream)
if is_valid:
logger.info("Universal data format validation PASSED")
logger.info(f" ETH ticks: {len(universal_stream.eth_ticks)} samples")
logger.info(f" ETH 1m: {len(universal_stream.eth_1m)} candles")
logger.info(f" ETH 1h: {len(universal_stream.eth_1h)} candles")
logger.info(f" ETH 1d: {len(universal_stream.eth_1d)} candles")
logger.info(f" BTC reference: {len(universal_stream.btc_ticks)} samples")
logger.info(f" Data quality: {universal_stream.metadata['data_quality']['overall_score']:.2f}")
else:
logger.warning(f"FAIL: Universal data format validation FAILED: {issues}")
else:
logger.warning("FAIL: Failed to get universal data stream")
except Exception as e:
logger.error(f"FAIL: Universal data format test failed: {e}")
# Initialize new trading session with MEXC integration
self.trading_session = TradingSession(trading_executor=self.trading_executor)
# Timezone setup
# Initialize timezone (Sofia timezone)
import pytz
self.timezone = pytz.timezone('Europe/Sofia')
# Dashboard state - now using session-based metrics
self.recent_decisions = []
# Initialize unified data stream for centralized data distribution
self.unified_stream = UnifiedDataStream(self.data_provider, self.orchestrator)
# Real-time price streaming data
self.live_prices = {
'ETH/USDT': 0.0,
'BTC/USDT': 0.0
}
# Register dashboard as data consumer
self.stream_consumer_id = self.unified_stream.register_consumer(
consumer_name="ScalpingDashboard",
callback=self._handle_unified_stream_data,
data_types=['ui_data', 'training_data', 'ticks', 'ohlcv']
)
# Real-time tick buffer for main chart (WebSocket direct feed)
self.live_tick_buffer = {
'ETH/USDT': [],
'BTC/USDT': []
}
self.max_tick_buffer_size = 200 # Keep last 200 ticks for main chart
# Real-time chart data (no caching - always fresh)
# This matches our universal format: ETH (1s, 1m, 1h, 1d) + BTC (1s)
self.chart_data = {
'ETH/USDT': {
'1s': pd.DataFrame(), # ETH ticks/1s data
'1m': pd.DataFrame(), # ETH 1m data
'1h': pd.DataFrame(), # ETH 1h data
'1d': pd.DataFrame() # ETH 1d data
},
'BTC/USDT': {
'1s': pd.DataFrame() # BTC reference ticks
}
}
# Training data structures (like the old dashboard)
self.tick_cache = deque(maxlen=900) # 15 minutes of ticks at 1 tick/second
self.one_second_bars = deque(maxlen=800) # 800 seconds of 1s bars
# Dashboard data storage (updated from unified stream)
self.tick_cache = deque(maxlen=2500)
self.one_second_bars = deque(maxlen=900)
self.current_prices = {}
self.is_streaming = False
self.training_data_available = False
# WebSocket streaming control - now using DataProvider centralized distribution
# Enhanced training integration
self.latest_training_data: Optional[TrainingDataPacket] = None
self.latest_ui_data: Optional[UIDataPacket] = None
# Trading session with MEXC integration
self.trading_session = TradingSession(trading_executor=trading_executor)
# Dashboard state
self.streaming = False
self.data_provider_subscriber_id = None
self.data_lock = Lock()
self.app = dash.Dash(__name__, external_stylesheets=[dbc.themes.CYBORG])
# Dynamic throttling control - more aggressive optimization
self.update_frequency = 5000 # Start with 2 seconds (2000ms) - more conservative
self.min_frequency = 500 # Maximum 5 seconds when heavily throttled
self.max_frequency = 10000 # Minimum 1 second when optimal
self.last_callback_time = 0
self.callback_duration_history = []
self.throttle_level = 0 # 0 = no throttle, 1-3 = increasing throttle levels (reduced from 5)
# Initialize missing attributes for callback functionality
self.data_lock = Lock()
self.live_prices = {'ETH/USDT': 0.0, 'BTC/USDT': 0.0}
self.chart_data = {
'ETH/USDT': {'1s': pd.DataFrame(), '1m': pd.DataFrame(), '1h': pd.DataFrame(), '1d': pd.DataFrame()},
'BTC/USDT': {'1s': pd.DataFrame()}
}
self.recent_decisions = deque(maxlen=50)
self.live_tick_buffer = {
'ETH/USDT': deque(maxlen=1000),
'BTC/USDT': deque(maxlen=1000)
}
self.max_tick_buffer_size = 1000
# Performance tracking
self.callback_performance = {
'total_calls': 0,
'successful_calls': 0,
'avg_duration': 0.0,
'last_update': datetime.now(),
'throttle_active': False,
'throttle_count': 0
}
# Throttling configuration
self.throttle_threshold = 50 # Max callbacks per minute
self.throttle_window = 60 # 1 minute window
self.callback_times = deque(maxlen=self.throttle_threshold)
# Initialize throttling attributes
self.throttle_level = 0
self.update_frequency = 2000 # Start with 2 seconds
self.max_frequency = 1000 # Fastest update (1 second)
self.min_frequency = 10000 # Slowest update (10 seconds)
self.consecutive_fast_updates = 0
self.consecutive_slow_updates = 0
self.callback_duration_history = []
self.last_callback_time = time.time()
self.last_known_state = None
# Create Dash app with real-time updates
self.app = dash.Dash(__name__,
external_stylesheets=['https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css'])
# WebSocket threads tracking
self.websocket_threads = []
# Inject JavaScript for debugging client-side data loading
self.app.index_string = '''
<!DOCTYPE html>
<html>
<head>
{%metas%}
<title>{%title%}</title>
{%favicon%}
{%css%}
<script>
// Debug logging for Dash callbacks
window.dashDebug = {
callbackCount: 0,
lastUpdate: null,
errors: [],
log: function(message, data) {
const timestamp = new Date().toISOString();
console.log(`[DASH DEBUG ${timestamp}] ${message}`, data || '');
// Store in window for inspection
if (!window.dashLogs) window.dashLogs = [];
window.dashLogs.push({timestamp, message, data});
// Keep only last 100 logs
if (window.dashLogs.length > 100) {
window.dashLogs = window.dashLogs.slice(-100);
}
},
logCallback: function(callbackId, inputs, outputs) {
this.callbackCount++;
this.lastUpdate = new Date();
this.log(`Callback #${this.callbackCount} - ID: ${callbackId}`, {
inputs: inputs,
outputs: outputs,
timestamp: this.lastUpdate
});
},
logError: function(error) {
this.errors.push({
timestamp: new Date(),
error: error.toString(),
stack: error.stack
});
this.log('ERROR', error);
}
};
// Override fetch to monitor _dash-update-component requests
const originalFetch = window.fetch;
window.fetch = function(...args) {
const url = args[0];
const options = args[1] || {};
if (typeof url === 'string' && url.includes('_dash-update-component')) {
window.dashDebug.log('FETCH REQUEST to _dash-update-component', {
url: url,
method: options.method || 'GET',
body: options.body ? JSON.parse(options.body) : null
});
return originalFetch.apply(this, args)
.then(response => {
window.dashDebug.log('FETCH RESPONSE from _dash-update-component', {
status: response.status,
statusText: response.statusText,
ok: response.ok
});
// Clone response to read body without consuming it
const clonedResponse = response.clone();
clonedResponse.json().then(data => {
window.dashDebug.log('RESPONSE DATA from _dash-update-component', data);
}).catch(err => {
window.dashDebug.log('ERROR parsing response JSON', err);
});
return response;
})
.catch(error => {
window.dashDebug.logError(error);
throw error;
});
}
return originalFetch.apply(this, args);
};
// Monitor DOM changes for component updates
document.addEventListener('DOMContentLoaded', function() {
window.dashDebug.log('DOM LOADED - Starting dashboard monitoring');
// Monitor specific elements for changes
const elementsToWatch = [
'current-balance',
'session-duration',
'eth-price',
'main-eth-1s-chart',
'actions-log'
];
elementsToWatch.forEach(elementId => {
const element = document.getElementById(elementId);
if (element) {
const observer = new MutationObserver(function(mutations) {
mutations.forEach(function(mutation) {
if (mutation.type === 'childList' || mutation.type === 'attributes') {
window.dashDebug.log(`ELEMENT UPDATED: ${elementId}`, {
type: mutation.type,
target: mutation.target.tagName,
newValue: element.textContent || element.innerHTML.substring(0, 100)
});
}
});
});
observer.observe(element, {
childList: true,
subtree: true,
attributes: true,
attributeOldValue: true
});
window.dashDebug.log(`WATCHING ELEMENT: ${elementId}`);
} else {
window.dashDebug.log(`ELEMENT NOT FOUND: ${elementId}`);
}
});
// Check for Dash app initialization
const checkDashApp = setInterval(() => {
if (window.dash_clientside) {
window.dashDebug.log('DASH CLIENTSIDE AVAILABLE');
clearInterval(checkDashApp);
}
if (window._dash_renderer) {
window.dashDebug.log('DASH RENDERER AVAILABLE');
clearInterval(checkDashApp);
}
}, 100);
// Log interval component status
setInterval(() => {
const intervalElement = document.querySelector('[data-dash-is-loading="true"]');
if (intervalElement) {
window.dashDebug.log('DASH COMPONENT LOADING', intervalElement.id);
}
// Log current callback status
window.dashDebug.log('STATUS CHECK', {
callbackCount: window.dashDebug.callbackCount,
lastUpdate: window.dashDebug.lastUpdate,
errorCount: window.dashDebug.errors.length,
dashRenderer: !!window._dash_renderer,
dashClientside: !!window.dash_clientside
});
}, 5000); // Every 5 seconds
});
// Helper function to get debug info
window.getDashDebugInfo = function() {
return {
callbackCount: window.dashDebug.callbackCount,
lastUpdate: window.dashDebug.lastUpdate,
errors: window.dashDebug.errors,
logs: window.dashLogs || [],
dashRenderer: !!window._dash_renderer,
dashClientside: !!window.dash_clientside
};
};
// Helper function to clear logs
window.clearDashLogs = function() {
window.dashLogs = [];
window.dashDebug.errors = [];
window.dashDebug.callbackCount = 0;
console.log('Dash debug logs cleared');
};
</script>
</head>
<body>
{%app_entry%}
<footer>
{%config%}
{%scripts%}
{%renderer%}
</footer>
<script>
// Additional debugging after Dash loads
document.addEventListener('DOMContentLoaded', function() {
setTimeout(() => {
window.dashDebug.log('DASH APP FULLY LOADED');
// Try to access Dash internals
if (window._dash_renderer && window._dash_renderer._store) {
window.dashDebug.log('DASH STORE AVAILABLE', Object.keys(window._dash_renderer._store.getState()));
}
}, 2000);
});
</script>
</body>
</html>
'''
# Setup layout and callbacks
# Setup dashboard
self._setup_layout()
self._setup_callbacks()
self._start_real_time_streaming()
# Initial data fetch to populate charts immediately
logger.info("Fetching initial data for all charts...")
self._refresh_live_data()
# Start streaming automatically
self._initialize_streaming()
# Start orchestrator trading thread
logger.info("Starting AI orchestrator trading thread...")
self._start_orchestrator_trading()
logger.info("Real-Time Scalping Dashboard initialized with unified data stream")
logger.info(f"Stream consumer ID: {self.stream_consumer_id}")
logger.info(f"Enhanced RL training integration: {'ENABLED' if orchestrator else 'DISABLED'}")
logger.info(f"MEXC trading: {'ENABLED' if trading_executor and trading_executor.trading_enabled else 'DISABLED'}")
def _initialize_streaming(self):
"""Initialize streaming and populate initial data"""
try:
logger.info("Initializing dashboard streaming and data...")
# Start unified data streaming
self._start_real_time_streaming()
# Initialize chart data with some basic data
self._initialize_chart_data()
# Start background data refresh
self._start_background_data_refresh()
logger.info("Dashboard streaming initialized successfully")
except Exception as e:
logger.error(f"Error initializing streaming: {e}")
def _initialize_chart_data(self):
"""Initialize chart data with basic data to prevent empty charts"""
try:
logger.info("Initializing chart data...")
# Get initial data for charts
for symbol in ['ETH/USDT', 'BTC/USDT']:
try:
# Get current price
current_price = self.data_provider.get_current_price(symbol)
if current_price and current_price > 0:
self.live_prices[symbol] = current_price
logger.info(f"Initial price for {symbol}: ${current_price:.2f}")
# Create initial tick data
initial_tick = {
'timestamp': datetime.now(),
'price': current_price,
'volume': 0.0,
'quantity': 0.0,
'side': 'buy',
'open': current_price,
'high': current_price,
'low': current_price,
'close': current_price
}
self.live_tick_buffer[symbol].append(initial_tick)
except Exception as e:
logger.warning(f"Error getting initial price for {symbol}: {e}")
# Set default price
default_price = 3500.0 if 'ETH' in symbol else 70000.0
self.live_prices[symbol] = default_price
# Get initial historical data for charts
for symbol in ['ETH/USDT', 'BTC/USDT']:
timeframes = ['1s', '1m', '1h', '1d'] if symbol == 'ETH/USDT' else ['1s']
for timeframe in timeframes:
try:
# Get historical data
data = self.data_provider.get_historical_data(symbol, timeframe, limit=100)
if data is not None and not data.empty:
self.chart_data[symbol][timeframe] = data
logger.info(f"Loaded {len(data)} candles for {symbol} {timeframe}")
else:
# Create empty DataFrame with proper structure
self.chart_data[symbol][timeframe] = pd.DataFrame(columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
logger.warning(f"No data available for {symbol} {timeframe}")
except Exception as e:
logger.warning(f"Error loading data for {symbol} {timeframe}: {e}")
self.chart_data[symbol][timeframe] = pd.DataFrame(columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
logger.info("Chart data initialization completed")
except Exception as e:
logger.error(f"Error initializing chart data: {e}")
def _start_background_data_refresh(self):
"""Start background data refresh thread"""
def background_refresh():
logger.info("Background data refresh thread started")
while True:
try:
# Refresh live prices
for symbol in ['ETH/USDT', 'BTC/USDT']:
try:
current_price = self.data_provider.get_current_price(symbol)
if current_price and current_price > 0:
with self.data_lock:
self.live_prices[symbol] = current_price
# Add to tick buffer
tick_data = {
'timestamp': datetime.now(),
'price': current_price,
'volume': 0.0,
'quantity': 0.0,
'side': 'buy',
'open': current_price,
'high': current_price,
'low': current_price,
'close': current_price
}
self.live_tick_buffer[symbol].append(tick_data)
except Exception as e:
logger.warning(f"Error refreshing price for {symbol}: {e}")
# Sleep for 5 seconds
time.sleep(5)
except Exception as e:
logger.error(f"Error in background refresh: {e}")
time.sleep(10)
# Start training data collection and model training
logger.info("Starting model training and data collection...")
self._start_training_data_collection()
logger.info("Real-Time Scalping Dashboard initialized with LIVE STREAMING")
logger.info("WebSocket price streaming enabled")
logger.info(f"Timezone: {self.timezone}")
logger.info(f"Session Balance: ${self.trading_session.starting_balance:.2f}")
logger.info("300s data preloading completed for faster initial performance")
# Start background thread
refresh_thread = Thread(target=background_refresh, daemon=True)
refresh_thread.start()
logger.info("Background data refresh thread started")
def _setup_layout(self):
"""Setup the ultra-fast real-time dashboard layout"""
@ -845,6 +727,7 @@ class RealTimeScalpingDashboard:
open_positions = html.P("No open positions", className="text-muted")
pnl = f"${dashboard_instance.trading_session.total_pnl:+.2f}"
total_fees = f"${dashboard_instance.trading_session.total_fees:.2f}"
win_rate = f"{dashboard_instance.trading_session.get_win_rate()*100:.1f}%"
total_trades = str(dashboard_instance.trading_session.total_trades)
last_action = dashboard_instance.trading_session.last_action or "WAITING"
@ -926,7 +809,7 @@ class RealTimeScalpingDashboard:
# Store last known state for throttling
result = (
current_balance, account_details, duration_str, open_positions, pnl, win_rate, total_trades, last_action, eth_price, btc_price, mexc_status,
current_balance, account_details, duration_str, open_positions, pnl, total_fees, win_rate, total_trades, last_action, eth_price, btc_price, mexc_status,
main_eth_chart, eth_1m_chart, eth_1h_chart, eth_1d_chart, btc_1s_chart,
model_training_status, orchestrator_status, training_events_log, actions_log, debug_status
)
@ -962,64 +845,13 @@ class RealTimeScalpingDashboard:
])
error_result = (
"$100.00", "Change: $0.00 (0.0%)", "00:00:00", "0", "$0.00", "0%", "0", "ERROR", "Loading...", "Loading...", "OFFLINE",
"$100.00", "Change: $0.00 (0.0%)", "00:00:00", "0", "$0.00", "$0.00", "0%", "0", "INIT", "Loading...", "Loading...", "OFFLINE",
empty_fig, empty_fig, empty_fig, empty_fig, empty_fig,
"Loading model status...", "Loading orchestrator status...", "Loading training events...",
"Loading real-time data...", error_debug
"Initializing models...", "Starting orchestrator...", "Loading events...",
"Waiting for data...", error_debug
)
# Store error state as last known state
dashboard_instance.last_known_state = error_result
return error_result
def _should_update_now(self, n_intervals):
"""Determine if we should update based on dynamic throttling"""
current_time = time.time()
# Always update the first few times
if n_intervals <= 3:
return True, "Initial updates"
# Check minimum time between updates
time_since_last = (current_time - self.last_callback_time) * 1000 # Convert to ms
expected_interval = self.update_frequency
# If we're being called too frequently, throttle
if time_since_last < expected_interval * 0.8: # 80% of expected interval
return False, f"Too frequent (last: {time_since_last:.0f}ms, expected: {expected_interval}ms)"
# If system is under load (based on throttle level), skip some updates
if self.throttle_level > 3: # Only start skipping at level 4+ (more lenient)
# Skip every 2nd, 3rd update etc. based on throttle level
skip_factor = min(self.throttle_level - 2, 2) # Max skip factor of 2
if n_intervals % skip_factor != 0:
return False, f"Throttled (level {self.throttle_level}, skip factor {skip_factor})"
return True, "Normal update"
def _get_last_known_state(self):
"""Return last known state for throttled updates"""
if self.last_known_state is not None:
return self.last_known_state
# Return minimal safe state if no previous state
empty_fig = {
'data': [],
'layout': {
'template': 'plotly_dark',
'title': 'Initializing...',
'paper_bgcolor': '#1e1e1e',
'plot_bgcolor': '#1e1e1e'
}
}
return (
"$100.00", "Change: $0.00 (0.0%)", "00:00:00", "0", "$0.00", "0%", "0", "INIT", "Loading...", "Loading...", "OFFLINE",
empty_fig, empty_fig, empty_fig, empty_fig, empty_fig,
"Initializing models...", "Starting orchestrator...", "Loading events...",
"Waiting for data...", html.P("Initializing dashboard...", className="text-info")
)
def _track_callback_performance(self, duration, success=True):
"""Track callback performance and adjust throttling dynamically"""
self.last_callback_time = time.time()
@ -1077,6 +909,51 @@ class RealTimeScalpingDashboard:
if len(self.callback_duration_history) % 10 == 0:
logger.info(f"PERFORMANCE SUMMARY: Avg: {avg_duration:.2f}s, Throttle: {self.throttle_level}, Frequency: {self.update_frequency}ms")
def _should_update_now(self, n_intervals):
"""Check if dashboard should update now based on throttling"""
current_time = time.time()
# Always allow first few updates
if n_intervals <= 3:
return True, "Initial updates"
# Check if enough time has passed based on update frequency
time_since_last = (current_time - self.last_callback_time) * 1000 # Convert to ms
if time_since_last < self.update_frequency:
return False, f"Throttled: {time_since_last:.0f}ms < {self.update_frequency}ms"
# Check throttle level
if self.throttle_level > 0:
# Skip some updates based on throttle level
if n_intervals % (self.throttle_level + 1) != 0:
return False, f"Throttle level {self.throttle_level}: skipping interval {n_intervals}"
return True, "Update allowed"
def _get_last_known_state(self):
"""Get last known state for throttled updates"""
if self.last_known_state:
return self.last_known_state
# Return safe default state
empty_fig = {
'data': [],
'layout': {
'template': 'plotly_dark',
'title': 'Loading...',
'paper_bgcolor': '#1e1e1e',
'plot_bgcolor': '#1e1e1e'
}
}
return (
"$100.00", "Change: $0.00 (0.0%)", "00:00:00", "No positions", "$0.00", "$0.00", "0.0%", "0", "WAITING",
"Loading...", "Loading...", "OFFLINE",
empty_fig, empty_fig, empty_fig, empty_fig, empty_fig,
"Initializing...", "Starting...", "Loading...", "Waiting...",
html.P("Initializing dashboard...", className="text-info")
)
def _reset_throttling(self):
"""Reset throttling to optimal settings"""
self.throttle_level = 0
@ -1087,43 +964,33 @@ class RealTimeScalpingDashboard:
logger.info(f"THROTTLING RESET: Level=0, Frequency={self.update_frequency}ms")
def _start_real_time_streaming(self):
"""Start real-time data streaming using DataProvider centralized distribution"""
logger.info("Starting real-time data streaming via DataProvider...")
"""Start real-time streaming using unified data stream"""
def start_streaming():
try:
logger.info("Starting unified data stream for dashboard")
# Start unified data streaming
asyncio.run(self.unified_stream.start_streaming())
# Start orchestrator trading if available
if self.orchestrator:
self._start_orchestrator_trading()
# Start enhanced training data collection
self._start_training_data_collection()
logger.info("Unified data streaming started successfully")
except Exception as e:
logger.error(f"Error starting unified data streaming: {e}")
# Start streaming in background thread
streaming_thread = Thread(target=start_streaming, daemon=True)
streaming_thread.start()
# Set streaming flag
self.streaming = True
# Start DataProvider real-time streaming
try:
# Start the DataProvider's WebSocket streaming
import asyncio
def start_streaming():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.data_provider.start_real_time_streaming())
streaming_thread = Thread(target=start_streaming, daemon=True)
streaming_thread.start()
# Subscribe to tick data from DataProvider
self.data_provider_subscriber_id = self.data_provider.subscribe_to_ticks(
callback=self._handle_data_provider_tick,
symbols=['ETH/USDT', 'BTC/USDT'],
subscriber_name="ScalpingDashboard"
)
logger.info(f"Subscribed to DataProvider tick stream: {self.data_provider_subscriber_id}")
except Exception as e:
logger.error(f"Failed to start DataProvider streaming: {e}")
# Fallback to HTTP polling only
logger.info("Falling back to HTTP polling only")
# Always start HTTP polling as backup
logger.info("Starting HTTP price polling as backup data source")
http_thread = Thread(target=self._http_price_polling, daemon=True)
http_thread.start()
# Start background data refresh thread
data_refresh_thread = Thread(target=self._background_data_updater, daemon=True)
data_refresh_thread.start()
logger.info("Real-time streaming initiated with unified data stream")
def _handle_data_provider_tick(self, tick: MarketTick):
"""Handle tick data from DataProvider"""
@ -2283,15 +2150,26 @@ class RealTimeScalpingDashboard:
logger.info(f"FIRE: {sofia_time} | Session trading decision: {decision.action} {decision.symbol} @ ${decision.price:.2f}")
def stop_streaming(self):
"""Stop all WebSocket streams"""
logger.info("STOP: Stopping real-time WebSocket streams...")
"""Stop streaming and cleanup"""
logger.info("Stopping dashboard streaming...")
self.streaming = False
for thread in self.websocket_threads:
if thread.is_alive():
thread.join(timeout=2)
# Stop unified data stream
if hasattr(self, 'unified_stream'):
asyncio.run(self.unified_stream.stop_streaming())
# Unregister as consumer
if hasattr(self, 'stream_consumer_id'):
self.unified_stream.unregister_consumer(self.stream_consumer_id)
logger.info("STREAM: WebSocket streams stopped")
# Stop any remaining WebSocket threads
if hasattr(self, 'websocket_threads'):
for thread in self.websocket_threads:
if thread.is_alive():
thread.join(timeout=2)
logger.info("Dashboard streaming stopped")
def run(self, host: str = '127.0.0.1', port: int = 8051, debug: bool = False):
"""Run the real-time dashboard"""
@ -2486,51 +2364,103 @@ class RealTimeScalpingDashboard:
logger.info("ORCHESTRATOR: Enhanced trading loop started with retrospective learning")
def _start_training_data_collection(self):
"""Start training data collection and model training"""
"""Start enhanced training data collection using unified stream"""
def training_loop():
try:
logger.info("Training data collection and model training started")
logger.info("Enhanced training data collection started with unified stream")
while True:
try:
# Collect tick data for training
self._collect_training_ticks()
# Get latest training data from unified stream
training_data = self.unified_stream.get_latest_training_data()
# Update context data in orchestrator
if hasattr(self.orchestrator, 'update_context_data'):
self.orchestrator.update_context_data()
# Initialize extrema trainer if not done
if hasattr(self.orchestrator, 'extrema_trainer'):
if not hasattr(self.orchestrator.extrema_trainer, '_initialized'):
self.orchestrator.extrema_trainer.initialize_context_data()
self.orchestrator.extrema_trainer._initialized = True
logger.info("Extrema trainer context data initialized")
# Run extrema detection
if hasattr(self.orchestrator, 'extrema_trainer'):
for symbol in self.orchestrator.symbols:
detected = self.orchestrator.extrema_trainer.detect_local_extrema(symbol)
if detected:
logger.info(f"Detected {len(detected)} extrema for {symbol}")
# Send training data to models periodically
if len(self.tick_cache) > 100: # Only when we have enough data
self._send_training_data_to_models()
if training_data:
# Send training data to enhanced RL pipeline
self._send_training_data_to_enhanced_rl(training_data)
# Update context data in orchestrator
if hasattr(self.orchestrator, 'update_context_data'):
self.orchestrator.update_context_data()
# Initialize extrema trainer if not done
if hasattr(self.orchestrator, 'extrema_trainer'):
if not hasattr(self.orchestrator.extrema_trainer, '_initialized'):
self.orchestrator.extrema_trainer.initialize_context_data()
self.orchestrator.extrema_trainer._initialized = True
logger.info("Extrema trainer context data initialized")
# Run extrema detection with real data
if hasattr(self.orchestrator, 'extrema_trainer'):
for symbol in self.orchestrator.symbols:
detected = self.orchestrator.extrema_trainer.detect_local_extrema(symbol)
if detected:
logger.info(f"Detected {len(detected)} extrema for {symbol}")
time.sleep(30) # Update every 30 seconds
except Exception as e:
logger.error(f"Error in training loop: {e}")
logger.error(f"Error in enhanced training loop: {e}")
time.sleep(10) # Wait before retrying
except Exception as e:
logger.error(f"Training loop failed: {e}")
logger.error(f"Enhanced training loop failed: {e}")
# Start training thread
# Start enhanced training thread
training_thread = Thread(target=training_loop, daemon=True)
training_thread.start()
logger.info("Training data collection thread started")
logger.info("Enhanced training data collection thread started")
def _send_training_data_to_enhanced_rl(self, training_data: TrainingDataPacket):
"""Send training data to enhanced RL training pipeline"""
try:
if not self.orchestrator:
return
# Extract comprehensive training data
market_state = training_data.market_state
universal_stream = training_data.universal_stream
if market_state and universal_stream:
# Send to enhanced RL trainer if available
if hasattr(self.orchestrator, 'enhanced_rl_trainer'):
# Create RL training step with comprehensive data
asyncio.run(self.orchestrator.enhanced_rl_trainer.training_step(universal_stream))
logger.debug("Sent comprehensive data to enhanced RL trainer")
# Send to extrema trainer for CNN training
if hasattr(self.orchestrator, 'extrema_trainer'):
extrema_data = self.orchestrator.extrema_trainer.get_extrema_training_data(count=50)
perfect_moves = self.orchestrator.extrema_trainer.get_perfect_moves_for_cnn(count=100)
if extrema_data:
logger.info(f"Enhanced RL: {len(extrema_data)} extrema training samples available")
if perfect_moves:
logger.info(f"Enhanced RL: {len(perfect_moves)} perfect moves for CNN training")
# Send to sensitivity learning DQN
if hasattr(self.orchestrator, 'sensitivity_learning_queue') and len(self.orchestrator.sensitivity_learning_queue) > 0:
logger.info("Enhanced RL: Sensitivity learning data available for DQN training")
# Get context features for models with real data
if hasattr(self.orchestrator, 'extrema_trainer'):
for symbol in self.orchestrator.symbols:
context_features = self.orchestrator.extrema_trainer.get_context_features_for_model(symbol)
if context_features is not None:
logger.debug(f"Enhanced RL: Context features available for {symbol}: {context_features.shape}")
# Log training data statistics
logger.info(f"Enhanced RL Training Data:")
logger.info(f" Tick cache: {len(training_data.tick_cache)} ticks")
logger.info(f" 1s bars: {len(training_data.one_second_bars)} bars")
logger.info(f" Multi-timeframe data: {len(training_data.multi_timeframe_data)} symbols")
logger.info(f" CNN features: {'Available' if training_data.cnn_features else 'Not available'}")
logger.info(f" CNN predictions: {'Available' if training_data.cnn_predictions else 'Not available'}")
logger.info(f" Market state: {'Available' if training_data.market_state else 'Not available'}")
logger.info(f" Universal stream: {'Available' if training_data.universal_stream else 'Not available'}")
except Exception as e:
logger.error(f"Error sending training data to enhanced RL: {e}")
def _collect_training_ticks(self):
"""Collect real tick data for training cache from data provider"""
@ -2607,6 +2537,35 @@ class RealTimeScalpingDashboard:
except Exception as e:
logger.error(f"Error sending training data to models: {e}")
def _handle_unified_stream_data(self, data_packet: Dict[str, Any]):
"""Handle data from unified stream"""
try:
# Extract UI data
if 'ui_data' in data_packet:
self.latest_ui_data = data_packet['ui_data']
self.current_prices = self.latest_ui_data.current_prices
self.is_streaming = self.latest_ui_data.streaming_status == 'LIVE'
self.training_data_available = self.latest_ui_data.training_data_available
# Extract training data
if 'training_data' in data_packet:
self.latest_training_data = data_packet['training_data']
# Extract tick data
if 'ticks' in data_packet:
ticks = data_packet['ticks']
for tick in ticks[-100:]: # Keep last 100 ticks
self.tick_cache.append(tick)
# Extract OHLCV data
if 'one_second_bars' in data_packet:
bars = data_packet['one_second_bars']
for bar in bars[-100:]: # Keep last 100 bars
self.one_second_bars.append(bar)
except Exception as e:
logger.error(f"Error handling unified stream data: {e}")
def create_scalping_dashboard(data_provider=None, orchestrator=None, trading_executor=None):
"""Create real-time dashboard instance with MEXC integration"""
return RealTimeScalpingDashboard(data_provider, orchestrator, trading_executor)