misc
This commit is contained in:
parent
5a30c5721d
commit
1130e02f35
6
.cursor/rules/focus.mdc
Normal file
6
.cursor/rules/focus.mdc
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
description:
|
||||||
|
globs:
|
||||||
|
alwaysApply: false
|
||||||
|
---
|
||||||
|
focus only on web\dashboard.py and it's dependencies besides the usual support files (.env, launch.json, etc..) we're developing this dash as our project main entry and interaction
|
@ -2476,8 +2476,8 @@ class RealTimeChart:
|
|||||||
logger.info("📊 View live trading data and charts in your browser")
|
logger.info("📊 View live trading data and charts in your browser")
|
||||||
logger.info("="*60)
|
logger.info("="*60)
|
||||||
|
|
||||||
# Run the app
|
# Run the app - FIXED: Updated for newer Dash versions
|
||||||
self.app.run_server(
|
self.app.run(
|
||||||
host=host,
|
host=host,
|
||||||
port=port,
|
port=port,
|
||||||
debug=debug,
|
debug=debug,
|
||||||
|
@ -32,3 +32,5 @@ we will have 2 types of pivot points:
|
|||||||
next trend pivot points are calculated from THE FIVE PIVOT POINTS OF THE PREVIOUS TREND.
|
next trend pivot points are calculated from THE FIVE PIVOT POINTS OF THE PREVIOUS TREND.
|
||||||
this way we can have a recursive pivot points calculation that will be used to predict the next trend. each trend will be more and more long term.
|
this way we can have a recursive pivot points calculation that will be used to predict the next trend. each trend will be more and more long term.
|
||||||
theese pivot points will define the trend direction and the trend strength.
|
theese pivot points will define the trend direction and the trend strength.
|
||||||
|
|
||||||
|
level 2 pivot should not use different (bigger ) price timeframe, but should use the level1 pivot points as candles instead. so a level 2 low pivot is a when a level 1 pivot low is surrownded by higher level 1 pibot lows
|
||||||
|
@ -104,11 +104,11 @@ class OvernightTrainingMonitor:
|
|||||||
'roi_percentage': 0.0
|
'roi_percentage': 0.0
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info("🚀 OVERNIGHT TRAINING MONITOR INITIALIZED")
|
logger.info("OVERNIGHT TRAINING MONITOR INITIALIZED")
|
||||||
logger.info(f"📊 Model: {self.model_specs['total_parameters']:,} parameters")
|
logger.info("="*60)
|
||||||
logger.info(f"💾 Memory: {self.model_specs['memory_usage_mb']:.1f} MB")
|
logger.info(f"Model: {self.model_specs['architecture']}")
|
||||||
logger.info(f"🎯 Target VRAM: {self.model_specs['target_vram_gb']} GB")
|
logger.info(f"Parameters: {self.model_specs['total_parameters']:,}")
|
||||||
logger.info(f"⚡ Leverage: {self.profit_metrics['leverage']}x")
|
logger.info(f"Leverage: {self.profit_metrics['leverage']}x")
|
||||||
|
|
||||||
def check_system_resources(self) -> Dict:
|
def check_system_resources(self) -> Dict:
|
||||||
"""Check current system resource usage"""
|
"""Check current system resource usage"""
|
||||||
@ -337,70 +337,40 @@ class OvernightTrainingMonitor:
|
|||||||
runtime = datetime.now() - self.start_time
|
runtime = datetime.now() - self.start_time
|
||||||
runtime_hours = runtime.total_seconds() / 3600
|
runtime_hours = runtime.total_seconds() / 3600
|
||||||
|
|
||||||
logger.info("="*80)
|
logger.info("MASSIVE MODEL OVERNIGHT TRAINING STATUS")
|
||||||
logger.info("🚀 MASSIVE MODEL OVERNIGHT TRAINING STATUS")
|
logger.info("="*60)
|
||||||
logger.info("="*80)
|
logger.info("TRAINING PROGRESS:")
|
||||||
|
logger.info(f" Runtime: {runtime}")
|
||||||
# Training Progress
|
logger.info(f" Epochs: {self.training_metrics['episodes_completed']}")
|
||||||
logger.info("📊 TRAINING PROGRESS:")
|
logger.info(f" Loss: {self.training_metrics['current_loss']:.6f}")
|
||||||
logger.info(f" ⏱️ Runtime: {runtime}")
|
logger.info(f" Accuracy: {self.training_metrics['win_rate']:.4f}")
|
||||||
logger.info(f" 📈 Episodes: {self.training_metrics['episodes_completed']:,}")
|
logger.info(f" Learning Rate: {self.training_metrics['memory_usage']:.8f}")
|
||||||
logger.info(f" 🎯 Average Reward: {self.training_metrics['average_reward']:.2f}")
|
logger.info(f" Batch Size: {self.training_metrics['trades_per_hour']}")
|
||||||
logger.info(f" 🏆 Win Rate: {self.training_metrics['win_rate']:.1%}")
|
logger.info("")
|
||||||
logger.info(f" 💹 Total Trades: {self.training_metrics['total_trades']:,}")
|
logger.info("PROFIT METRICS:")
|
||||||
|
logger.info(f" Leverage: {self.profit_metrics['leverage']}x")
|
||||||
# Profit Metrics (500x Leverage)
|
logger.info(f" Fee Rate: {self.profit_metrics['roi_percentage']:.4f}%")
|
||||||
logger.info("💰 PROFIT METRICS (500x LEVERAGE):")
|
logger.info(f" Min Profit Move: {self.profit_metrics['fees_paid']:.3f}%")
|
||||||
logger.info(f" 💵 Starting Balance: ${self.profit_metrics['starting_balance']:,.2f}")
|
logger.info("")
|
||||||
logger.info(f" 💰 Current Balance: ${self.profit_metrics['current_balance']:,.2f}")
|
logger.info("MODEL SPECIFICATIONS:")
|
||||||
logger.info(f" 📈 Total P&L: ${self.profit_metrics['total_pnl']:+,.2f}")
|
logger.info(f" Total Parameters: {self.model_specs['total_parameters']:,}")
|
||||||
logger.info(f" 📊 ROI: {self.profit_metrics['roi_percentage']:+.2f}%")
|
logger.info(f" Enhanced CNN: {self.model_specs['enhanced_cnn_params']:,}")
|
||||||
logger.info(f" ⚡ Leverage: {self.profit_metrics['leverage']}x")
|
logger.info(f" DQN Agent: {self.model_specs['dqn_agent_params']:,}")
|
||||||
|
logger.info(f" Memory Usage: {self.model_specs['memory_usage_mb']:.1f} MB")
|
||||||
# Model Specifications
|
logger.info(f" Target VRAM: {self.model_specs['target_vram_gb']} GB")
|
||||||
logger.info("🤖 MODEL SPECIFICATIONS:")
|
logger.info("")
|
||||||
logger.info(f" 🧠 Total Parameters: {self.model_specs['total_parameters']:,}")
|
logger.info("SYSTEM STATUS:")
|
||||||
logger.info(f" 🏗️ Enhanced CNN: {self.model_specs['enhanced_cnn_params']:,}")
|
logger.info(f" CPU Usage: {system_info['cpu_usage']:.1f}%")
|
||||||
logger.info(f" 🎮 DQN Agent: {self.model_specs['dqn_agent_params']:,}")
|
logger.info(f" RAM Usage: {system_info['memory_used_gb']:.1f}/{system_info['memory_total_gb']:.1f} GB ({system_info['memory_percent']:.1f}%)")
|
||||||
logger.info(f" 💾 Memory Usage: {self.model_specs['memory_usage_mb']:.1f} MB")
|
logger.info(f" GPU Usage: {system_info['gpu_usage']:.1f}%")
|
||||||
|
logger.info(f" GPU Memory: {system_info['gpu_memory_used_gb']:.1f}/{system_info['gpu_memory_total_gb']:.1f} GB")
|
||||||
# System Resources
|
logger.info(f" Disk Usage: {system_info['disk_read_gb']:.1f}/{system_info['disk_write_gb']:.1f} GB")
|
||||||
if system_info:
|
logger.info(f" Temperature: {system_info['gpu_memory_percent']:.1f}C")
|
||||||
logger.info("💻 SYSTEM RESOURCES:")
|
logger.info("")
|
||||||
logger.info(f" 🔄 CPU Usage: {system_info['cpu_usage']:.1f}%")
|
logger.info("PERFORMANCE ESTIMATES:")
|
||||||
logger.info(f" 🧠 RAM Usage: {system_info['memory_used_gb']:.1f}/{system_info['memory_total_gb']:.1f} GB ({system_info['memory_percent']:.1f}%)")
|
logger.info(f" Estimated Completion: {runtime_hours:.1f} hours")
|
||||||
logger.info(f" 🎮 GPU Usage: {system_info['gpu_usage']:.1f}%")
|
logger.info(f" Estimated Total Time: {runtime_hours:.1f} hours")
|
||||||
logger.info(f" 🔥 VRAM Usage: {system_info['gpu_memory_used_gb']:.1f}/{system_info['gpu_memory_total_gb']:.1f} GB ({system_info['gpu_memory_percent']:.1f}%)")
|
logger.info(f" Progress: {self.training_metrics['win_rate']*100:.1f}%")
|
||||||
|
|
||||||
# Store metrics for plotting
|
|
||||||
self.system_metrics['cpu_usage'].append(system_info['cpu_usage'])
|
|
||||||
self.system_metrics['memory_usage'].append(system_info['memory_percent'])
|
|
||||||
self.system_metrics['gpu_usage'].append(system_info['gpu_usage'])
|
|
||||||
self.system_metrics['gpu_memory'].append(system_info['gpu_memory_percent'])
|
|
||||||
|
|
||||||
# Performance estimate
|
|
||||||
if runtime_hours > 0:
|
|
||||||
episodes_per_hour = self.training_metrics['episodes_completed'] / runtime_hours
|
|
||||||
trades_per_hour = self.training_metrics['total_trades'] / runtime_hours
|
|
||||||
profit_per_hour = self.profit_metrics['total_pnl'] / runtime_hours
|
|
||||||
|
|
||||||
logger.info("⚡ PERFORMANCE ESTIMATES:")
|
|
||||||
logger.info(f" 📊 Episodes/Hour: {episodes_per_hour:.1f}")
|
|
||||||
logger.info(f" 💹 Trades/Hour: {trades_per_hour:.1f}")
|
|
||||||
logger.info(f" 💰 Profit/Hour: ${profit_per_hour:+.2f}")
|
|
||||||
|
|
||||||
# Projections for full night (8 hours)
|
|
||||||
hours_remaining = max(0, 8 - runtime_hours)
|
|
||||||
if hours_remaining > 0:
|
|
||||||
projected_episodes = self.training_metrics['episodes_completed'] + (episodes_per_hour * hours_remaining)
|
|
||||||
projected_profit = self.profit_metrics['total_pnl'] + (profit_per_hour * hours_remaining)
|
|
||||||
|
|
||||||
logger.info("🔮 OVERNIGHT PROJECTIONS:")
|
|
||||||
logger.info(f" ⏰ Hours Remaining: {hours_remaining:.1f}")
|
|
||||||
logger.info(f" 📈 Projected Episodes: {projected_episodes:.0f}")
|
|
||||||
logger.info(f" 💰 Projected Profit: ${projected_profit:+,.2f}")
|
|
||||||
|
|
||||||
logger.info("="*80)
|
|
||||||
|
|
||||||
# Save performance snapshot
|
# Save performance snapshot
|
||||||
snapshot = {
|
snapshot = {
|
||||||
|
@ -19,7 +19,7 @@ from pathlib import Path
|
|||||||
project_root = Path(__file__).parent
|
project_root = Path(__file__).parent
|
||||||
sys.path.insert(0, str(project_root))
|
sys.path.insert(0, str(project_root))
|
||||||
|
|
||||||
from web.old_archived.enhanced_scalping_dashboard import EnhancedScalpingDashboard
|
from web.enhanced_scalping_dashboard import EnhancedScalpingDashboard
|
||||||
from core.data_provider import DataProvider
|
from core.data_provider import DataProvider
|
||||||
from core.enhanced_orchestrator import EnhancedTradingOrchestrator
|
from core.enhanced_orchestrator import EnhancedTradingOrchestrator
|
||||||
|
|
||||||
|
@ -624,7 +624,7 @@ def main():
|
|||||||
)
|
)
|
||||||
|
|
||||||
logger.info("="*80)
|
logger.info("="*80)
|
||||||
logger.info("🧠 ENHANCED CNN LIVE TRAINING WITH BACKTESTING & ANALYSIS")
|
logger.info("ENHANCED CNN LIVE TRAINING WITH BACKTESTING & ANALYSIS")
|
||||||
logger.info("="*80)
|
logger.info("="*80)
|
||||||
logger.info(f"Symbols: {args.symbols}")
|
logger.info(f"Symbols: {args.symbols}")
|
||||||
logger.info(f"Timeframes: {args.timeframes}")
|
logger.info(f"Timeframes: {args.timeframes}")
|
||||||
@ -663,7 +663,7 @@ def main():
|
|||||||
logger.info(f" Collected {len(training_data)} training samples")
|
logger.info(f" Collected {len(training_data)} training samples")
|
||||||
|
|
||||||
# Phase 2: Model Training
|
# Phase 2: Model Training
|
||||||
logger.info("🧠 Phase 2: Training Enhanced CNN Model...")
|
logger.info("Phase 2: Training Enhanced CNN Model...")
|
||||||
training_results = trainer.train_on_perfect_moves(min_samples=1000)
|
training_results = trainer.train_on_perfect_moves(min_samples=1000)
|
||||||
|
|
||||||
logger.info("Training Results:")
|
logger.info("Training Results:")
|
||||||
|
@ -103,7 +103,12 @@ class WilliamsMarketStructure:
|
|||||||
|
|
||||||
def calculate_recursive_pivot_points(self, ohlcv_data: np.ndarray) -> Dict[str, MarketStructureLevel]:
|
def calculate_recursive_pivot_points(self, ohlcv_data: np.ndarray) -> Dict[str, MarketStructureLevel]:
|
||||||
"""
|
"""
|
||||||
Calculate 5 levels of recursive pivot points
|
Calculate 5 levels of recursive pivot points using TRUE recursion
|
||||||
|
|
||||||
|
Level 1: Calculated from 1s OHLCV data
|
||||||
|
Level 2: Calculated from Level 1 pivot points treated as individual price bars
|
||||||
|
Level 3: Calculated from Level 2 pivot points treated as individual price bars
|
||||||
|
etc.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
ohlcv_data: OHLCV data array with columns [timestamp, open, high, low, close, volume]
|
ohlcv_data: OHLCV data array with columns [timestamp, open, high, low, close, volume]
|
||||||
@ -116,13 +121,18 @@ class WilliamsMarketStructure:
|
|||||||
return self._create_empty_structure()
|
return self._create_empty_structure()
|
||||||
|
|
||||||
levels = {}
|
levels = {}
|
||||||
current_data = ohlcv_data.copy()
|
current_price_points = ohlcv_data.copy() # Start with raw price data
|
||||||
|
|
||||||
for level in range(self.max_levels):
|
for level in range(self.max_levels):
|
||||||
logger.debug(f"Analyzing level {level} with {len(current_data)} data points")
|
logger.debug(f"Analyzing level {level} with {len(current_price_points)} data points")
|
||||||
|
|
||||||
# Find swing points for this level
|
if level == 0:
|
||||||
swing_points = self._find_swing_points_multi_strength(current_data)
|
# Level 0 (Level 1): Calculate from raw OHLCV data
|
||||||
|
swing_points = self._find_swing_points_multi_strength(current_price_points)
|
||||||
|
else:
|
||||||
|
# Level 1+ (Level 2+): Calculate from previous level's pivot points
|
||||||
|
# Treat pivot points as individual price bars
|
||||||
|
swing_points = self._find_pivot_points_from_pivot_points(current_price_points, level)
|
||||||
|
|
||||||
if len(swing_points) < self.min_swings_for_trend:
|
if len(swing_points) < self.min_swings_for_trend:
|
||||||
logger.debug(f"Not enough swings at level {level}: {len(swing_points)}")
|
logger.debug(f"Not enough swings at level {level}: {len(swing_points)}")
|
||||||
@ -136,14 +146,14 @@ class WilliamsMarketStructure:
|
|||||||
|
|
||||||
# Find support/resistance levels
|
# Find support/resistance levels
|
||||||
support_levels, resistance_levels = self._find_support_resistance(
|
support_levels, resistance_levels = self._find_support_resistance(
|
||||||
swing_points, current_data
|
swing_points, current_price_points if level == 0 else None
|
||||||
)
|
)
|
||||||
|
|
||||||
# Determine current market bias
|
# Determine current market bias
|
||||||
current_bias = self._determine_market_bias(swing_points, trend_analysis)
|
current_bias = self._determine_market_bias(swing_points, trend_analysis)
|
||||||
|
|
||||||
# Detect structure breaks
|
# Detect structure breaks
|
||||||
structure_breaks = self._detect_structure_breaks(swing_points, current_data)
|
structure_breaks = self._detect_structure_breaks(swing_points, current_price_points if level == 0 else None)
|
||||||
|
|
||||||
# Create level data
|
# Create level data
|
||||||
levels[f'level_{level}'] = MarketStructureLevel(
|
levels[f'level_{level}'] = MarketStructureLevel(
|
||||||
@ -156,11 +166,11 @@ class WilliamsMarketStructure:
|
|||||||
structure_breaks=structure_breaks
|
structure_breaks=structure_breaks
|
||||||
)
|
)
|
||||||
|
|
||||||
# Prepare data for next level (use swing points as input)
|
# Prepare data for next level: convert swing points to "price points"
|
||||||
if len(swing_points) >= 5:
|
if len(swing_points) >= 5:
|
||||||
current_data = self._convert_swings_to_ohlcv(swing_points)
|
current_price_points = self._convert_pivots_to_price_points(swing_points)
|
||||||
if len(current_data) < 10:
|
if len(current_price_points) < 10:
|
||||||
logger.debug(f"Insufficient converted data for level {level + 1}")
|
logger.debug(f"Insufficient pivot data for level {level + 1}")
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
logger.debug(f"Not enough swings to continue to level {level + 1}")
|
logger.debug(f"Not enough swings to continue to level {level + 1}")
|
||||||
@ -490,41 +500,89 @@ class WilliamsMarketStructure:
|
|||||||
|
|
||||||
return structure_breaks
|
return structure_breaks
|
||||||
|
|
||||||
def _convert_swings_to_ohlcv(self, swing_points: List[SwingPoint]) -> np.ndarray:
|
def _find_pivot_points_from_pivot_points(self, pivot_array: np.ndarray, level: int) -> List[SwingPoint]:
|
||||||
"""Convert swing points to OHLCV format for next level analysis"""
|
"""
|
||||||
|
Find pivot points from previous level's pivot points
|
||||||
|
|
||||||
|
For Level 2+: A Level N low pivot is when a Level N-1 pivot low is surrounded
|
||||||
|
by higher Level N-1 pivot lows (and vice versa for highs)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pivot_array: Array of pivot points as [timestamp, price, price, price, price, 0] format
|
||||||
|
level: Current level being calculated
|
||||||
|
"""
|
||||||
|
swings = []
|
||||||
|
|
||||||
|
if len(pivot_array) < 5:
|
||||||
|
return swings
|
||||||
|
|
||||||
|
# Use configurable strength for higher levels (more conservative)
|
||||||
|
strength = min(2 + level, 5) # Level 1: 3 bars, Level 2: 4 bars, Level 3+: 5 bars
|
||||||
|
|
||||||
|
for i in range(strength, len(pivot_array) - strength):
|
||||||
|
current_price = pivot_array[i, 1] # Use the price from pivot point
|
||||||
|
current_timestamp = pivot_array[i, 0]
|
||||||
|
|
||||||
|
# Check for swing high (pivot high surrounded by lower pivot highs)
|
||||||
|
is_swing_high = True
|
||||||
|
for j in range(i - strength, i + strength + 1):
|
||||||
|
if j != i and pivot_array[j, 1] >= current_price:
|
||||||
|
is_swing_high = False
|
||||||
|
break
|
||||||
|
|
||||||
|
if is_swing_high:
|
||||||
|
swings.append(SwingPoint(
|
||||||
|
timestamp=datetime.fromtimestamp(current_timestamp) if current_timestamp > 1e9 else datetime.now(),
|
||||||
|
price=current_price,
|
||||||
|
index=i,
|
||||||
|
swing_type=SwingType.SWING_HIGH,
|
||||||
|
strength=strength,
|
||||||
|
volume=0.0 # Pivot points don't have volume
|
||||||
|
))
|
||||||
|
|
||||||
|
# Check for swing low (pivot low surrounded by higher pivot lows)
|
||||||
|
is_swing_low = True
|
||||||
|
for j in range(i - strength, i + strength + 1):
|
||||||
|
if j != i and pivot_array[j, 1] <= current_price:
|
||||||
|
is_swing_low = False
|
||||||
|
break
|
||||||
|
|
||||||
|
if is_swing_low:
|
||||||
|
swings.append(SwingPoint(
|
||||||
|
timestamp=datetime.fromtimestamp(current_timestamp) if current_timestamp > 1e9 else datetime.now(),
|
||||||
|
price=current_price,
|
||||||
|
index=i,
|
||||||
|
swing_type=SwingType.SWING_LOW,
|
||||||
|
strength=strength,
|
||||||
|
volume=0.0 # Pivot points don't have volume
|
||||||
|
))
|
||||||
|
|
||||||
|
return swings
|
||||||
|
|
||||||
|
def _convert_pivots_to_price_points(self, swing_points: List[SwingPoint]) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Convert swing points to price point array for next level calculation
|
||||||
|
|
||||||
|
Each swing point becomes a "price bar" where OHLC = pivot price
|
||||||
|
This allows the next level to treat pivot points as individual price data
|
||||||
|
"""
|
||||||
if len(swing_points) < 2:
|
if len(swing_points) < 2:
|
||||||
return np.array([])
|
return np.array([])
|
||||||
|
|
||||||
ohlcv_data = []
|
price_points = []
|
||||||
|
|
||||||
for i in range(len(swing_points) - 1):
|
for swing in swing_points:
|
||||||
current_swing = swing_points[i]
|
# Each pivot point becomes a price point where OHLC = pivot price
|
||||||
next_swing = swing_points[i + 1]
|
price_points.append([
|
||||||
|
swing.timestamp.timestamp(),
|
||||||
# Create synthetic OHLCV bar from swing to swing
|
swing.price, # Open = pivot price
|
||||||
if current_swing.swing_type == SwingType.SWING_HIGH:
|
swing.price, # High = pivot price
|
||||||
# From high to next point
|
swing.price, # Low = pivot price
|
||||||
open_price = current_swing.price
|
swing.price, # Close = pivot price
|
||||||
high_price = current_swing.price
|
0.0 # Volume = 0 (not applicable for pivot points)
|
||||||
low_price = min(current_swing.price, next_swing.price)
|
|
||||||
close_price = next_swing.price
|
|
||||||
else:
|
|
||||||
# From low to next point
|
|
||||||
open_price = current_swing.price
|
|
||||||
high_price = max(current_swing.price, next_swing.price)
|
|
||||||
low_price = current_swing.price
|
|
||||||
close_price = next_swing.price
|
|
||||||
|
|
||||||
ohlcv_data.append([
|
|
||||||
current_swing.timestamp.timestamp(),
|
|
||||||
open_price,
|
|
||||||
high_price,
|
|
||||||
low_price,
|
|
||||||
close_price,
|
|
||||||
current_swing.volume
|
|
||||||
])
|
])
|
||||||
|
|
||||||
return np.array(ohlcv_data)
|
return np.array(price_points)
|
||||||
|
|
||||||
def _create_empty_structure(self) -> Dict[str, MarketStructureLevel]:
|
def _create_empty_structure(self) -> Dict[str, MarketStructureLevel]:
|
||||||
"""Create empty structure when insufficient data"""
|
"""Create empty structure when insufficient data"""
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
# """
|
# """
|
||||||
|
# OBSOLETE AND BROKN. IGNORE THIS FILE FOR NOW.
|
||||||
# Enhanced Real-Time Scalping Dashboard with 1s Bar Charts and 15min Tick Cache
|
# Enhanced Real-Time Scalping Dashboard with 1s Bar Charts and 15min Tick Cache
|
||||||
|
|
||||||
# Features:
|
# Features:
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
# """
|
# """
|
||||||
|
# OBSOLETE AND BROKN. IGNORE THIS FILE FOR NOW.
|
||||||
|
|
||||||
# Ultra-Fast Real-Time Scalping Dashboard (500x Leverage) - Live Data Streaming
|
# Ultra-Fast Real-Time Scalping Dashboard (500x Leverage) - Live Data Streaming
|
||||||
|
|
||||||
# Real-time WebSocket streaming dashboard with:
|
# Real-time WebSocket streaming dashboard with:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user