refactoring, predictions WIP
This commit is contained in:
267
web/cnn_dashboard.py
Normal file
267
web/cnn_dashboard.py
Normal file
@ -0,0 +1,267 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
CNN Trading Dashboard - Web UI Layer
|
||||
|
||||
This is a lightweight Dash application that provides the web interface
|
||||
for CNN pivot predictions. All business logic is handled by core modules.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
# Add core modules to path
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
||||
|
||||
import dash
|
||||
from dash import dcc, html, Input, Output, callback
|
||||
import dash_bootstrap_components as dbc
|
||||
|
||||
from core.chart_data_provider import ChartDataProvider
|
||||
|
||||
# Setup logging with ASCII-only output
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class CNNTradingDashboard:
|
||||
"""Lightweight Dash web interface for CNN trading predictions"""
|
||||
|
||||
def __init__(self):
|
||||
# Initialize Dash app
|
||||
self.app = dash.Dash(
|
||||
__name__,
|
||||
external_stylesheets=[dbc.themes.BOOTSTRAP],
|
||||
title="CNN Trading Dashboard"
|
||||
)
|
||||
|
||||
# Initialize core data provider
|
||||
self.data_provider = ChartDataProvider()
|
||||
|
||||
# Setup web interface
|
||||
self._setup_layout()
|
||||
self._setup_callbacks()
|
||||
|
||||
logger.info("CNN Trading Dashboard web interface initialized")
|
||||
|
||||
def _setup_layout(self):
|
||||
"""Setup the web dashboard layout"""
|
||||
self.app.layout = dbc.Container([
|
||||
# Header
|
||||
dbc.Row([
|
||||
dbc.Col([
|
||||
html.H1("CNN Trading Dashboard",
|
||||
className="text-center text-primary mb-2"),
|
||||
html.P("Real-time CNN pivot predictions for ETH/USDT trading",
|
||||
className="text-center text-muted mb-4")
|
||||
])
|
||||
]),
|
||||
|
||||
# Main chart
|
||||
dbc.Row([
|
||||
dbc.Col([
|
||||
dbc.Card([
|
||||
dbc.CardHeader([
|
||||
html.H4("Price Chart with CNN Predictions", className="mb-0")
|
||||
]),
|
||||
dbc.CardBody([
|
||||
dcc.Graph(
|
||||
id='main-chart',
|
||||
style={'height': '600px'},
|
||||
config={'displayModeBar': True}
|
||||
)
|
||||
])
|
||||
])
|
||||
], width=12)
|
||||
], className="mb-4"),
|
||||
|
||||
# Status panels
|
||||
dbc.Row([
|
||||
# CNN Status
|
||||
dbc.Col([
|
||||
dbc.Card([
|
||||
dbc.CardHeader([
|
||||
html.H5("CNN Prediction Status", className="mb-0")
|
||||
]),
|
||||
dbc.CardBody([
|
||||
html.Div(id='cnn-status')
|
||||
])
|
||||
])
|
||||
], width=4),
|
||||
|
||||
# Pivot Detection Status
|
||||
dbc.Col([
|
||||
dbc.Card([
|
||||
dbc.CardHeader([
|
||||
html.H5("Pivot Detection Status", className="mb-0")
|
||||
]),
|
||||
dbc.CardBody([
|
||||
html.Div(id='pivot-status')
|
||||
])
|
||||
])
|
||||
], width=4),
|
||||
|
||||
# Training Data Status
|
||||
dbc.Col([
|
||||
dbc.Card([
|
||||
dbc.CardHeader([
|
||||
html.H5("Training Data Capture", className="mb-0")
|
||||
]),
|
||||
dbc.CardBody([
|
||||
html.Div(id='training-status')
|
||||
])
|
||||
])
|
||||
], width=4)
|
||||
], className="mb-4"),
|
||||
|
||||
# System info
|
||||
dbc.Row([
|
||||
dbc.Col([
|
||||
dbc.Alert([
|
||||
html.H6("Legend:", className="mb-2"),
|
||||
html.Ul([
|
||||
html.Li("Hollow Red Circles: CNN HIGH pivot predictions"),
|
||||
html.Li("Hollow Green Circles: CNN LOW pivot predictions"),
|
||||
html.Li("Red Triangles: Actual HIGH pivots detected"),
|
||||
html.Li("Green Triangles: Actual LOW pivots detected"),
|
||||
html.Li("Circle/Triangle size indicates confidence/strength")
|
||||
], className="mb-0")
|
||||
], color="info", className="mb-3")
|
||||
])
|
||||
]),
|
||||
|
||||
# Auto-refresh interval
|
||||
dcc.Interval(
|
||||
id='refresh-interval',
|
||||
interval=5000, # Update every 5 seconds
|
||||
n_intervals=0
|
||||
)
|
||||
|
||||
], fluid=True)
|
||||
|
||||
def _setup_callbacks(self):
|
||||
"""Setup Dash callbacks for web interface updates"""
|
||||
|
||||
@self.app.callback(
|
||||
[Output('main-chart', 'figure'),
|
||||
Output('cnn-status', 'children'),
|
||||
Output('pivot-status', 'children'),
|
||||
Output('training-status', 'children')],
|
||||
[Input('refresh-interval', 'n_intervals')]
|
||||
)
|
||||
def update_dashboard(n_intervals):
|
||||
"""Main callback to update all dashboard components"""
|
||||
try:
|
||||
# Simulate price update
|
||||
self.data_provider.simulate_price_update()
|
||||
|
||||
# Get updated predictions and pivots
|
||||
predictions, pivots = self.data_provider.update_predictions_and_pivots()
|
||||
|
||||
# Create main chart
|
||||
fig = self.data_provider.create_price_chart()
|
||||
|
||||
# Add predictions and pivots to chart
|
||||
fig = self.data_provider.add_cnn_predictions_to_chart(fig, predictions)
|
||||
fig = self.data_provider.add_actual_pivots_to_chart(fig, pivots)
|
||||
|
||||
# Get status for info panels
|
||||
status = self.data_provider.get_current_status()
|
||||
|
||||
# Create status displays
|
||||
cnn_status = self._create_cnn_status_display(status.get('predictions', {}))
|
||||
pivot_status = self._create_pivot_status_display(status.get('pivots', {}))
|
||||
training_status = self._create_training_status_display(status.get('training', {}))
|
||||
|
||||
return fig, cnn_status, pivot_status, training_status
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating dashboard: {e}")
|
||||
# Return empty/default values on error
|
||||
return {}, "Error loading CNN status", "Error loading pivot status", "Error loading training status"
|
||||
|
||||
def _create_cnn_status_display(self, stats: dict) -> list:
|
||||
"""Create CNN status display components"""
|
||||
try:
|
||||
active_predictions = stats.get('active_predictions', 0)
|
||||
high_confidence = stats.get('high_confidence', 0)
|
||||
avg_confidence = stats.get('avg_confidence', 0)
|
||||
|
||||
return [
|
||||
html.P(f"Active Predictions: {active_predictions}", className="mb-1"),
|
||||
html.P(f"High Confidence: {high_confidence}", className="mb-1"),
|
||||
html.P(f"Average Confidence: {avg_confidence:.1%}", className="mb-1"),
|
||||
dbc.Progress(
|
||||
value=avg_confidence * 100,
|
||||
color="success" if avg_confidence > 0.7 else "warning" if avg_confidence > 0.5 else "danger",
|
||||
className="mb-2"
|
||||
),
|
||||
html.Small(f"Last Update: {datetime.now().strftime('%H:%M:%S')}",
|
||||
className="text-muted")
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating CNN status display: {e}")
|
||||
return [html.P("Error loading CNN status")]
|
||||
|
||||
def _create_pivot_status_display(self, stats: dict) -> list:
|
||||
"""Create pivot detection status display components"""
|
||||
try:
|
||||
total_pivots = stats.get('total_pivots', 0)
|
||||
high_pivots = stats.get('high_pivots', 0)
|
||||
low_pivots = stats.get('low_pivots', 0)
|
||||
confirmed = stats.get('confirmed_pivots', 0)
|
||||
|
||||
return [
|
||||
html.P(f"Total Pivots: {total_pivots}", className="mb-1"),
|
||||
html.P(f"HIGH Pivots: {high_pivots}", className="mb-1"),
|
||||
html.P(f"LOW Pivots: {low_pivots}", className="mb-1"),
|
||||
html.P(f"Confirmed: {confirmed}", className="mb-1"),
|
||||
dbc.Progress(
|
||||
value=(confirmed / max(total_pivots, 1)) * 100,
|
||||
color="success",
|
||||
className="mb-2"
|
||||
),
|
||||
html.Small("Williams Market Structure", className="text-muted")
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating pivot status display: {e}")
|
||||
return [html.P("Error loading pivot status")]
|
||||
|
||||
def _create_training_status_display(self, stats: dict) -> list:
|
||||
"""Create training data status display components"""
|
||||
try:
|
||||
captured_points = stats.get('captured_points', 0)
|
||||
price_accuracy = stats.get('avg_price_accuracy', 0)
|
||||
time_accuracy = stats.get('avg_time_accuracy', 0)
|
||||
|
||||
return [
|
||||
html.P(f"Data Points: {captured_points}", className="mb-1"),
|
||||
html.P(f"Price Accuracy: {price_accuracy:.1%}", className="mb-1"),
|
||||
html.P(f"Time Accuracy: {time_accuracy:.1%}", className="mb-1"),
|
||||
dbc.Progress(
|
||||
value=price_accuracy * 100,
|
||||
color="success" if price_accuracy > 0.8 else "warning" if price_accuracy > 0.6 else "danger",
|
||||
className="mb-2"
|
||||
),
|
||||
html.Small("Auto-saved every 5 points", className="text-muted")
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating training status display: {e}")
|
||||
return [html.P("Error loading training status")]
|
||||
|
||||
def run(self, host='127.0.0.1', port=8050, debug=False):
|
||||
"""Run the dashboard web server"""
|
||||
try:
|
||||
logger.info(f"Starting CNN Trading Dashboard at http://{host}:{port}")
|
||||
self.app.run_server(host=host, port=port, debug=debug)
|
||||
except Exception as e:
|
||||
logger.error(f"Error starting dashboard server: {e}")
|
||||
raise
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
dashboard = CNNTradingDashboard()
|
||||
dashboard.run(debug=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
10021
web/dashboard_backup.py
Normal file
10021
web/dashboard_backup.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -2568,6 +2568,241 @@
|
||||
# except Exception as e:
|
||||
# logger.error(f"Error handling unified stream data: {e}")
|
||||
|
||||
# def _get_cnn_pivot_predictions(self, symbol: str, df: pd.DataFrame) -> List[Dict[str, Any]]:
|
||||
# """Get CNN model predictions for next pivot points"""
|
||||
# try:
|
||||
# predictions = []
|
||||
|
||||
# if not hasattr(self, 'orchestrator') or not self.orchestrator:
|
||||
# return predictions
|
||||
#
|
||||
# # Check if orchestrator has CNN capabilities
|
||||
# if hasattr(self.orchestrator, 'pivot_rl_trainer') and self.orchestrator.pivot_rl_trainer:
|
||||
# if hasattr(self.orchestrator.pivot_rl_trainer, 'williams') and self.orchestrator.pivot_rl_trainer.williams:
|
||||
# williams = self.orchestrator.pivot_rl_trainer.williams
|
||||
#
|
||||
# if hasattr(williams, 'cnn_model') and williams.cnn_model:
|
||||
# # Get latest market data for CNN input
|
||||
# if not df.empty and len(df) >= 900: # CNN needs at least 900 timesteps
|
||||
# try:
|
||||
# # Prepare multi-timeframe input for CNN
|
||||
# current_time = datetime.now()
|
||||
#
|
||||
# # Create dummy pivot point for CNN input preparation
|
||||
# dummy_pivot = type('SwingPoint', (), {
|
||||
# 'timestamp': current_time,
|
||||
# 'price': df['close'].iloc[-1],
|
||||
# 'index': len(df) - 1,
|
||||
# 'swing_type': 'prediction_point',
|
||||
# 'strength': 1
|
||||
# })()
|
||||
#
|
||||
# # Prepare CNN input using Williams structure
|
||||
# cnn_input = williams._prepare_cnn_input(
|
||||
# dummy_pivot,
|
||||
# df.values, # OHLCV data context
|
||||
# None # No previous pivot details
|
||||
# )
|
||||
#
|
||||
# if cnn_input is not None and cnn_input.size > 0:
|
||||
# # Reshape for batch prediction
|
||||
# if len(cnn_input.shape) == 2:
|
||||
# cnn_input = np.expand_dims(cnn_input, axis=0)
|
||||
#
|
||||
# # Get CNN prediction
|
||||
# pred_output = williams.cnn_model.model.predict(cnn_input, verbose=0)
|
||||
#
|
||||
# if pred_output is not None and len(pred_output) > 0:
|
||||
# # Parse CNN output (10 outputs for 5 Williams levels)
|
||||
# # Each level has [type_probability, predicted_price]
|
||||
# current_price = df['close'].iloc[-1]
|
||||
#
|
||||
# for level_idx in range(min(5, len(pred_output[0]) // 2)):
|
||||
# type_prob = pred_output[0][level_idx * 2]
|
||||
# price_offset = pred_output[0][level_idx * 2 + 1]
|
||||
#
|
||||
# # Determine prediction type
|
||||
# is_high = type_prob > 0.5
|
||||
# confidence = abs(type_prob - 0.5) * 2 # Convert to 0-1 range
|
||||
#
|
||||
# # Calculate predicted price
|
||||
# predicted_price = current_price + (price_offset * current_price * 0.01) # Assume price_offset is percentage
|
||||
#
|
||||
# # Only include predictions with reasonable confidence
|
||||
# if confidence > 0.3:
|
||||
# prediction = {
|
||||
# 'level': level_idx + 1,
|
||||
# 'type': 'HIGH' if is_high else 'LOW',
|
||||
# 'predicted_price': predicted_price,
|
||||
# 'confidence': confidence,
|
||||
# 'timestamp': current_time,
|
||||
# 'current_price': current_price,
|
||||
# 'price_offset_pct': price_offset * 100,
|
||||
# 'model_output': {
|
||||
# 'type_prob': float(type_prob),
|
||||
# 'price_offset': float(price_offset)
|
||||
# }
|
||||
# }
|
||||
# predictions.append(prediction)
|
||||
#
|
||||
# logger.debug(f"[CNN] Generated {len(predictions)} pivot predictions for {symbol}")
|
||||
#
|
||||
# except Exception as e:
|
||||
# logger.warning(f"Error generating CNN predictions: {e}")
|
||||
#
|
||||
# return predictions
|
||||
#
|
||||
# except Exception as e:
|
||||
# logger.error(f"Error getting CNN pivot predictions: {e}")
|
||||
# return []
|
||||
|
||||
# def _add_cnn_predictions_to_chart(self, fig: go.Figure, predictions: List[Dict[str, Any]], row: int = 1):
|
||||
# """Add CNN predictions as hollow circles to the chart"""
|
||||
# try:
|
||||
# if not predictions:
|
||||
# return
|
||||
#
|
||||
# # Separate HIGH and LOW predictions
|
||||
# high_predictions = [p for p in predictions if p['type'] == 'HIGH']
|
||||
# low_predictions = [p for p in predictions if p['type'] == 'LOW']
|
||||
#
|
||||
# # Add HIGH prediction markers (hollow red circles)
|
||||
# if high_predictions:
|
||||
# # Create future timestamps for display (predictions are for future points)
|
||||
# base_time = high_predictions[0]['timestamp']
|
||||
#
|
||||
# fig.add_trace(
|
||||
# go.Scatter(
|
||||
# x=[base_time + timedelta(minutes=i*5) for i in range(len(high_predictions))],
|
||||
# y=[p['predicted_price'] for p in high_predictions],
|
||||
# mode='markers',
|
||||
# marker=dict(
|
||||
# color='rgba(255, 107, 107, 0)', # Transparent fill
|
||||
# size=[max(8, min(20, p['confidence'] * 20)) for p in high_predictions],
|
||||
# symbol='circle',
|
||||
# line=dict(
|
||||
# color='#ff6b6b', # Red border
|
||||
# width=2
|
||||
# )
|
||||
# ),
|
||||
# name='CNN HIGH Predictions',
|
||||
# showlegend=True,
|
||||
# hovertemplate='<b>CNN HIGH Prediction</b><br>' +
|
||||
# 'Price: $%{y:.2f}<br>' +
|
||||
# 'Confidence: %{customdata:.1%}<br>' +
|
||||
# 'Level: %{text}<extra></extra>',
|
||||
# customdata=[p['confidence'] for p in high_predictions],
|
||||
# text=[f"Level {p['level']}" for p in high_predictions]
|
||||
# ),
|
||||
# row=row, col=1
|
||||
# )
|
||||
#
|
||||
# # Add LOW prediction markers (hollow green circles)
|
||||
# if low_predictions:
|
||||
# base_time = low_predictions[0]['timestamp']
|
||||
#
|
||||
# fig.add_trace(
|
||||
# go.Scatter(
|
||||
# x=[base_time + timedelta(minutes=i*5) for i in range(len(low_predictions))],
|
||||
# y=[p['predicted_price'] for p in low_predictions],
|
||||
# mode='markers',
|
||||
# marker=dict(
|
||||
# color='rgba(0, 255, 136, 0)', # Transparent fill
|
||||
# size=[max(8, min(20, p['confidence'] * 20)) for p in low_predictions],
|
||||
# symbol='circle',
|
||||
# line=dict(
|
||||
# color='#00ff88', # Green border
|
||||
# width=2
|
||||
# )
|
||||
# ),
|
||||
# name='CNN LOW Predictions',
|
||||
# showlegend=True,
|
||||
# hovertemplate='<b>CNN LOW Prediction</b><br>' +
|
||||
# 'Price: $%{y:.2f}<br>' +
|
||||
# 'Confidence: %{customdata:.1%}<br>' +
|
||||
# 'Level: %{text}<extra></extra>',
|
||||
# customdata=[p['confidence'] for p in low_predictions],
|
||||
# text=[f"Level {p['level']}" for p in low_predictions]
|
||||
# ),
|
||||
# row=row, col=1
|
||||
# )
|
||||
#
|
||||
# logger.debug(f"[CHART] Added {len(high_predictions)} HIGH and {len(low_predictions)} LOW CNN predictions to chart")
|
||||
#
|
||||
# except Exception as e:
|
||||
# logger.error(f"Error adding CNN predictions to chart: {e}")
|
||||
|
||||
# def _capture_actual_pivot_data(self, actual_pivot: Dict[str, Any]) -> None:
|
||||
# """Capture actual pivot data when it occurs for training comparison"""
|
||||
# try:
|
||||
# if not hasattr(self, '_pivot_training_data'):
|
||||
# self._pivot_training_data = []
|
||||
#
|
||||
# # Store actual pivot with timestamp for later comparison with predictions
|
||||
# pivot_data = {
|
||||
# 'actual_pivot': actual_pivot,
|
||||
# 'timestamp': datetime.now(),
|
||||
# 'captured_at': datetime.now().isoformat()
|
||||
# }
|
||||
#
|
||||
# self._pivot_training_data.append(pivot_data)
|
||||
#
|
||||
# # Keep only last 1000 actual pivots
|
||||
# if len(self._pivot_training_data) > 1000:
|
||||
# self._pivot_training_data = self._pivot_training_data[-1000:]
|
||||
#
|
||||
# logger.info(f"[TRAINING] Captured actual pivot: {actual_pivot['type']} at ${actual_pivot['price']:.2f}")
|
||||
#
|
||||
# # Save to persistent storage periodically
|
||||
# if len(self._pivot_training_data) % 10 == 0:
|
||||
# self._save_pivot_training_data()
|
||||
#
|
||||
# except Exception as e:
|
||||
# logger.error(f"Error capturing actual pivot data: {e}")
|
||||
|
||||
# def _save_pivot_training_data(self) -> None:
|
||||
# """Save pivot training data to JSON file for model improvement"""
|
||||
# try:
|
||||
# if not hasattr(self, '_pivot_training_data') or not self._pivot_training_data:
|
||||
# return
|
||||
#
|
||||
# # Create data directory if it doesn't exist
|
||||
# import os
|
||||
# os.makedirs('data/cnn_training', exist_ok=True)
|
||||
#
|
||||
# # Save to timestamped file
|
||||
# timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
# filename = f'data/cnn_training/pivot_predictions_vs_actual_{timestamp}.json'
|
||||
#
|
||||
# # Prepare data for JSON serialization
|
||||
# save_data = {
|
||||
# 'metadata': {
|
||||
# 'created_at': datetime.now().isoformat(),
|
||||
# 'total_samples': len(self._pivot_training_data),
|
||||
# 'description': 'CNN pivot predictions compared with actual market pivots'
|
||||
# },
|
||||
# 'training_samples': []
|
||||
# }
|
||||
#
|
||||
# for sample in self._pivot_training_data:
|
||||
# # Convert datetime objects to ISO strings for JSON
|
||||
# json_sample = {
|
||||
# 'actual_pivot': sample['actual_pivot'],
|
||||
# 'timestamp': sample['timestamp'].isoformat() if isinstance(sample['timestamp'], datetime) else sample['timestamp'],
|
||||
# 'captured_at': sample['captured_at']
|
||||
# }
|
||||
# save_data['training_samples'].append(json_sample)
|
||||
#
|
||||
# # Write to file
|
||||
# import json
|
||||
# with open(filename, 'w') as f:
|
||||
# json.dump(save_data, f, indent=2, default=str)
|
||||
#
|
||||
# logger.info(f"[TRAINING] Saved {len(self._pivot_training_data)} pivot training samples to {filename}")
|
||||
#
|
||||
# except Exception as e:
|
||||
# logger.error(f"Error saving pivot training data: {e}")
|
||||
|
||||
# def create_scalping_dashboard(data_provider=None, orchestrator=None, trading_executor=None):
|
||||
# """Create real-time dashboard instance with MEXC integration"""
|
||||
# return RealTimeScalpingDashboard(data_provider, orchestrator, trading_executor)
|
||||
|
2576
web/temp_dashboard.py
Normal file
2576
web/temp_dashboard.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user