training, local log
This commit is contained in:
@@ -785,42 +785,94 @@ class RealTrainingAdapter:
|
|||||||
logger.warning("No price data in any timeframe")
|
logger.warning("No price data in any timeframe")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Concatenate all timeframes along sequence dimension
|
# Use only the primary timeframe (1m) for transformer training
|
||||||
# This gives the model multi-timeframe context
|
# The transformer expects a fixed sequence length of 150
|
||||||
price_data = np.concatenate(all_price_data, axis=0)
|
primary_tf = '1m' if '1m' in timeframes else timeframe_order[0]
|
||||||
|
|
||||||
# Add batch dimension [1, total_seq_len, 5]
|
if primary_tf not in timeframes:
|
||||||
|
logger.warning(f"Primary timeframe {primary_tf} not available")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Get primary timeframe data
|
||||||
|
primary_data = timeframes[primary_tf]
|
||||||
|
closes = np.array(primary_data.get('close', []), dtype=np.float32)
|
||||||
|
|
||||||
|
if len(closes) == 0:
|
||||||
|
logger.warning("No data in primary timeframe")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Use the last 150 candles (or pad/truncate to exactly 150)
|
||||||
|
target_seq_len = 150 # Transformer expects exactly 150 sequence length
|
||||||
|
|
||||||
|
if len(closes) >= target_seq_len:
|
||||||
|
# Take the last 150 candles
|
||||||
|
price_data = np.stack([
|
||||||
|
np.array(primary_data.get('open', [])[-target_seq_len:], dtype=np.float32),
|
||||||
|
np.array(primary_data.get('high', [])[-target_seq_len:], dtype=np.float32),
|
||||||
|
np.array(primary_data.get('low', [])[-target_seq_len:], dtype=np.float32),
|
||||||
|
np.array(primary_data.get('close', [])[-target_seq_len:], dtype=np.float32),
|
||||||
|
np.array(primary_data.get('volume', [])[-target_seq_len:], dtype=np.float32)
|
||||||
|
], axis=-1)
|
||||||
|
else:
|
||||||
|
# Pad with the last available candle
|
||||||
|
last_open = primary_data.get('open', [0])[-1] if primary_data.get('open') else 0
|
||||||
|
last_high = primary_data.get('high', [0])[-1] if primary_data.get('high') else 0
|
||||||
|
last_low = primary_data.get('low', [0])[-1] if primary_data.get('low') else 0
|
||||||
|
last_close = primary_data.get('close', [0])[-1] if primary_data.get('close') else 0
|
||||||
|
last_volume = primary_data.get('volume', [0])[-1] if primary_data.get('volume') else 0
|
||||||
|
|
||||||
|
# Pad arrays to target length
|
||||||
|
opens = np.array(primary_data.get('open', []), dtype=np.float32)
|
||||||
|
highs = np.array(primary_data.get('high', []), dtype=np.float32)
|
||||||
|
lows = np.array(primary_data.get('low', []), dtype=np.float32)
|
||||||
|
closes = np.array(primary_data.get('close', []), dtype=np.float32)
|
||||||
|
volumes = np.array(primary_data.get('volume', []), dtype=np.float32)
|
||||||
|
|
||||||
|
# Pad with last values
|
||||||
|
while len(opens) < target_seq_len:
|
||||||
|
opens = np.append(opens, last_open)
|
||||||
|
highs = np.append(highs, last_high)
|
||||||
|
lows = np.append(lows, last_low)
|
||||||
|
closes = np.append(closes, last_close)
|
||||||
|
volumes = np.append(volumes, last_volume)
|
||||||
|
|
||||||
|
price_data = np.stack([opens, highs, lows, closes, volumes], axis=-1)
|
||||||
|
|
||||||
|
# Add batch dimension [1, 150, 5]
|
||||||
price_data = torch.tensor(price_data, dtype=torch.float32).unsqueeze(0)
|
price_data = torch.tensor(price_data, dtype=torch.float32).unsqueeze(0)
|
||||||
|
|
||||||
# Get primary timeframe for reference
|
# Sequence length is now exactly 150
|
||||||
primary_tf = '1m' if '1m' in timeframes else timeframe_order[0]
|
total_seq_len = 150
|
||||||
closes = np.array(timeframes[primary_tf].get('close', []), dtype=np.float32)
|
|
||||||
|
|
||||||
# Create placeholder COB data (zeros if not available)
|
# Create placeholder COB data (zeros if not available)
|
||||||
# COB data shape: [1, seq_len, cob_features]
|
# COB data shape: [1, 150, cob_features]
|
||||||
|
# MUST match the total sequence length from price_data (150)
|
||||||
# Transformer expects 100 COB features (as defined in TransformerConfig)
|
# Transformer expects 100 COB features (as defined in TransformerConfig)
|
||||||
cob_data = torch.zeros(1, len(closes), 100, dtype=torch.float32) # 100 COB features
|
cob_data = torch.zeros(1, 150, 100, dtype=torch.float32) # Match price seq_len (150)
|
||||||
|
|
||||||
# Create technical indicators (simple ones for now)
|
# Create technical indicators (simple ones for now)
|
||||||
# tech_data shape: [1, features]
|
# tech_data shape: [1, features]
|
||||||
tech_features = []
|
tech_features = []
|
||||||
|
|
||||||
|
# Use the closes data from the price_data we just created
|
||||||
|
closes_for_tech = price_data[0, :, 3].numpy() # Close prices from OHLCV data
|
||||||
|
|
||||||
# Add simple technical indicators
|
# Add simple technical indicators
|
||||||
if len(closes) >= 20:
|
if len(closes_for_tech) >= 20:
|
||||||
sma_20 = np.mean(closes[-20:])
|
sma_20 = np.mean(closes_for_tech[-20:])
|
||||||
tech_features.append(closes[-1] / sma_20 - 1.0) # Price vs SMA
|
tech_features.append(closes_for_tech[-1] / sma_20 - 1.0) # Price vs SMA
|
||||||
else:
|
else:
|
||||||
tech_features.append(0.0)
|
tech_features.append(0.0)
|
||||||
|
|
||||||
if len(closes) >= 2:
|
if len(closes_for_tech) >= 2:
|
||||||
returns = (closes[-1] - closes[-2]) / closes[-2]
|
returns = (closes_for_tech[-1] - closes_for_tech[-2]) / closes_for_tech[-2]
|
||||||
tech_features.append(returns) # Recent return
|
tech_features.append(returns) # Recent return
|
||||||
else:
|
else:
|
||||||
tech_features.append(0.0)
|
tech_features.append(0.0)
|
||||||
|
|
||||||
# Add volatility
|
# Add volatility
|
||||||
if len(closes) >= 20:
|
if len(closes_for_tech) >= 20:
|
||||||
volatility = np.std(closes[-20:]) / np.mean(closes[-20:])
|
volatility = np.std(closes_for_tech[-20:]) / np.mean(closes_for_tech[-20:])
|
||||||
tech_features.append(volatility)
|
tech_features.append(volatility)
|
||||||
else:
|
else:
|
||||||
tech_features.append(0.0)
|
tech_features.append(0.0)
|
||||||
@@ -836,36 +888,36 @@ class RealTrainingAdapter:
|
|||||||
market_features = []
|
market_features = []
|
||||||
|
|
||||||
# Add volume profile
|
# Add volume profile
|
||||||
primary_volumes = np.array(timeframes[primary_tf].get('volume', []), dtype=np.float32)
|
volumes_for_tech = price_data[0, :, 4].numpy() # Volume from OHLCV data
|
||||||
if len(primary_volumes) >= 20:
|
if len(volumes_for_tech) >= 20:
|
||||||
vol_ratio = primary_volumes[-1] / np.mean(primary_volumes[-20:])
|
vol_ratio = volumes_for_tech[-1] / np.mean(volumes_for_tech[-20:])
|
||||||
market_features.append(vol_ratio)
|
market_features.append(vol_ratio)
|
||||||
else:
|
else:
|
||||||
market_features.append(1.0)
|
market_features.append(1.0)
|
||||||
|
|
||||||
# Add price range
|
# Add price range
|
||||||
primary_highs = np.array(timeframes[primary_tf].get('high', []), dtype=np.float32)
|
highs_for_tech = price_data[0, :, 1].numpy() # High from OHLCV data
|
||||||
primary_lows = np.array(timeframes[primary_tf].get('low', []), dtype=np.float32)
|
lows_for_tech = price_data[0, :, 2].numpy() # Low from OHLCV data
|
||||||
if len(primary_highs) >= 20 and len(primary_lows) >= 20:
|
if len(highs_for_tech) >= 20 and len(lows_for_tech) >= 20:
|
||||||
price_range = (np.max(primary_highs[-20:]) - np.min(primary_lows[-20:])) / closes[-1]
|
price_range = (np.max(highs_for_tech[-20:]) - np.min(lows_for_tech[-20:])) / closes_for_tech[-1]
|
||||||
market_features.append(price_range)
|
market_features.append(price_range)
|
||||||
else:
|
else:
|
||||||
market_features.append(0.0)
|
market_features.append(0.0)
|
||||||
|
|
||||||
# Add pivot point features
|
# Add pivot point features
|
||||||
# Calculate simple pivot points from recent price action
|
# Calculate simple pivot points from recent price action
|
||||||
if len(primary_highs) >= 5 and len(primary_lows) >= 5:
|
if len(highs_for_tech) >= 5 and len(lows_for_tech) >= 5:
|
||||||
# Pivot Point = (High + Low + Close) / 3
|
# Pivot Point = (High + Low + Close) / 3
|
||||||
pivot = (primary_highs[-1] + primary_lows[-1] + closes[-1]) / 3.0
|
pivot = (highs_for_tech[-1] + lows_for_tech[-1] + closes_for_tech[-1]) / 3.0
|
||||||
|
|
||||||
# Support and Resistance levels
|
# Support and Resistance levels
|
||||||
r1 = 2 * pivot - primary_lows[-1] # Resistance 1
|
r1 = 2 * pivot - lows_for_tech[-1] # Resistance 1
|
||||||
s1 = 2 * pivot - primary_highs[-1] # Support 1
|
s1 = 2 * pivot - highs_for_tech[-1] # Support 1
|
||||||
|
|
||||||
# Normalize relative to current price
|
# Normalize relative to current price
|
||||||
pivot_distance = (closes[-1] - pivot) / closes[-1]
|
pivot_distance = (closes_for_tech[-1] - pivot) / closes_for_tech[-1]
|
||||||
r1_distance = (closes[-1] - r1) / closes[-1]
|
r1_distance = (closes_for_tech[-1] - r1) / closes_for_tech[-1]
|
||||||
s1_distance = (closes[-1] - s1) / closes[-1]
|
s1_distance = (closes_for_tech[-1] - s1) / closes_for_tech[-1]
|
||||||
|
|
||||||
market_features.extend([pivot_distance, r1_distance, s1_distance])
|
market_features.extend([pivot_distance, r1_distance, s1_distance])
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -45,10 +45,33 @@
|
|||||||
"entry_state": {},
|
"entry_state": {},
|
||||||
"exit_state": {}
|
"exit_state": {}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"annotation_id": "91847a37-6315-4546-b5a0-573118311322",
|
||||||
|
"symbol": "ETH/USDT",
|
||||||
|
"timeframe": "1s",
|
||||||
|
"entry": {
|
||||||
|
"timestamp": "2025-10-25 13:08:04",
|
||||||
|
"price": 3940.24,
|
||||||
|
"index": 25
|
||||||
|
},
|
||||||
|
"exit": {
|
||||||
|
"timestamp": "2025-10-25 13:15:12",
|
||||||
|
"price": 3942.59,
|
||||||
|
"index": 57
|
||||||
|
},
|
||||||
|
"direction": "LONG",
|
||||||
|
"profit_loss_pct": 0.05964103709419639,
|
||||||
|
"notes": "",
|
||||||
|
"created_at": "2025-10-25T16:17:02.931920",
|
||||||
|
"market_context": {
|
||||||
|
"entry_state": {},
|
||||||
|
"exit_state": {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"total_annotations": 2,
|
"total_annotations": 3,
|
||||||
"last_updated": "2025-10-24T23:35:14.216759"
|
"last_updated": "2025-10-25T16:17:02.931920"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -75,12 +75,23 @@ except ImportError:
|
|||||||
HistoricalDataLoader = data_module.HistoricalDataLoader
|
HistoricalDataLoader = data_module.HistoricalDataLoader
|
||||||
TimeRangeManager = data_module.TimeRangeManager
|
TimeRangeManager = data_module.TimeRangeManager
|
||||||
|
|
||||||
# Setup logging
|
# Setup logging - configure before any logging occurs
|
||||||
|
log_dir = Path(__file__).parent.parent / 'logs'
|
||||||
|
log_dir.mkdir(exist_ok=True)
|
||||||
|
log_file = log_dir / 'annotate_app.log'
|
||||||
|
|
||||||
|
# Configure logging to both file and console
|
||||||
|
# File mode 'w' truncates the file on each run
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
level=logging.INFO,
|
level=logging.INFO,
|
||||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||||
|
handlers=[
|
||||||
|
logging.FileHandler(log_file, mode='w'), # Truncate on each run
|
||||||
|
logging.StreamHandler(sys.stdout) # Also print to console
|
||||||
|
]
|
||||||
)
|
)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.info(f"Logging to: {log_file}")
|
||||||
|
|
||||||
class AnnotationDashboard:
|
class AnnotationDashboard:
|
||||||
"""Main annotation dashboard application"""
|
"""Main annotation dashboard application"""
|
||||||
@@ -1261,6 +1272,11 @@ class AnnotationDashboard:
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Main entry point"""
|
"""Main entry point"""
|
||||||
|
logger.info("=" * 80)
|
||||||
|
logger.info("ANNOTATE Application Starting")
|
||||||
|
logger.info(f"Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||||
|
logger.info("=" * 80)
|
||||||
|
|
||||||
dashboard = AnnotationDashboard()
|
dashboard = AnnotationDashboard()
|
||||||
dashboard.run(debug=True)
|
dashboard.run(debug=True)
|
||||||
|
|
||||||
|
|||||||
@@ -243,10 +243,11 @@ class MarketRegimeDetector(nn.Module):
|
|||||||
|
|
||||||
# Weighted combination based on regime probabilities
|
# Weighted combination based on regime probabilities
|
||||||
regime_stack = torch.stack(regime_outputs, dim=0) # (n_regimes, batch, seq_len, d_model)
|
regime_stack = torch.stack(regime_outputs, dim=0) # (n_regimes, batch, seq_len, d_model)
|
||||||
regime_weights = regime_probs.unsqueeze(1).unsqueeze(3) # (batch, 1, 1, n_regimes)
|
regime_weights = regime_probs.unsqueeze(0).unsqueeze(2).unsqueeze(3) # (1, batch, 1, 1, n_regimes)
|
||||||
|
regime_weights = regime_weights.permute(4, 1, 2, 3, 0).squeeze(-1) # (n_regimes, batch, 1, 1)
|
||||||
|
|
||||||
# Weighted sum across regimes
|
# Weighted sum across regimes
|
||||||
adapted_output = torch.sum(regime_stack * regime_weights.transpose(0, 3), dim=0)
|
adapted_output = torch.sum(regime_stack * regime_weights, dim=0)
|
||||||
|
|
||||||
return adapted_output, regime_probs
|
return adapted_output, regime_probs
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user