unify model names
This commit is contained in:
@@ -15,19 +15,41 @@ import asyncio
|
||||
import logging
|
||||
import time
|
||||
import threading
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Optional, Any, Tuple, Union
|
||||
from dataclasses import dataclass, field
|
||||
from collections import deque
|
||||
import json
|
||||
|
||||
# Try to import optional dependencies
|
||||
try:
|
||||
import numpy as np
|
||||
HAS_NUMPY = True
|
||||
except ImportError:
|
||||
np = None
|
||||
HAS_NUMPY = False
|
||||
|
||||
try:
|
||||
import pandas as pd
|
||||
HAS_PANDAS = True
|
||||
except ImportError:
|
||||
pd = None
|
||||
HAS_PANDAS = False
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
# Try to import PyTorch
|
||||
try:
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
HAS_TORCH = True
|
||||
except ImportError:
|
||||
torch = None
|
||||
nn = None
|
||||
optim = None
|
||||
HAS_TORCH = False
|
||||
|
||||
from .config import get_config
|
||||
from .data_provider import DataProvider
|
||||
@@ -227,7 +249,7 @@ class TradingOrchestrator:
|
||||
self.rl_agent.load_best_checkpoint() # This loads the state into the model
|
||||
# Check if we have checkpoints available
|
||||
from NN.training.model_manager import load_best_checkpoint
|
||||
result = load_best_checkpoint("dqn_agent")
|
||||
result = load_best_checkpoint("dqn")
|
||||
if result:
|
||||
file_path, metadata = result
|
||||
self.model_states['dqn']['initial_loss'] = getattr(metadata, 'initial_loss', None)
|
||||
@@ -267,7 +289,7 @@ class TradingOrchestrator:
|
||||
checkpoint_loaded = False
|
||||
try:
|
||||
from NN.training.model_manager import load_best_checkpoint
|
||||
result = load_best_checkpoint("enhanced_cnn")
|
||||
result = load_best_checkpoint("cnn")
|
||||
if result:
|
||||
file_path, metadata = result
|
||||
self.model_states['cnn']['initial_loss'] = 0.412
|
||||
@@ -347,57 +369,96 @@ class TradingOrchestrator:
|
||||
self.extrema_trainer = None
|
||||
|
||||
# Initialize COB RL Model - UNIFIED with ModelManager
|
||||
cob_rl_available = False
|
||||
try:
|
||||
from NN.models.cob_rl_model import COBRLModelInterface
|
||||
cob_rl_available = True
|
||||
except ImportError as e:
|
||||
logger.warning(f"COB RL dependencies not available: {e}")
|
||||
cob_rl_available = False
|
||||
|
||||
# Initialize COB RL model using unified approach
|
||||
self.cob_rl_agent = COBRLModelInterface(
|
||||
model_checkpoint_dir="@checkpoints/cob_rl",
|
||||
device='cuda' if torch.cuda.is_available() else 'cpu'
|
||||
)
|
||||
if cob_rl_available:
|
||||
try:
|
||||
# Initialize COB RL model using unified approach
|
||||
self.cob_rl_agent = COBRLModelInterface(
|
||||
model_checkpoint_dir="@checkpoints/cob_rl",
|
||||
device='cuda' if (HAS_TORCH and torch.cuda.is_available()) else 'cpu'
|
||||
)
|
||||
|
||||
# Add COB RL to model states tracking
|
||||
self.model_states['cob_rl'] = {
|
||||
'initial_loss': None,
|
||||
'current_loss': None,
|
||||
'best_loss': None,
|
||||
'checkpoint_loaded': False
|
||||
}
|
||||
|
||||
# Load best checkpoint using unified ModelManager
|
||||
checkpoint_loaded = False
|
||||
try:
|
||||
from NN.training.model_manager import load_best_checkpoint
|
||||
result = load_best_checkpoint("cob_rl")
|
||||
if result:
|
||||
file_path, metadata = result
|
||||
self.model_states['cob_rl']['initial_loss'] = getattr(metadata, 'loss', None)
|
||||
self.model_states['cob_rl']['current_loss'] = getattr(metadata, 'loss', None)
|
||||
self.model_states['cob_rl']['best_loss'] = getattr(metadata, 'loss', None)
|
||||
self.model_states['cob_rl']['checkpoint_loaded'] = True
|
||||
self.model_states['cob_rl']['checkpoint_filename'] = getattr(metadata, 'checkpoint_id', 'unknown')
|
||||
checkpoint_loaded = True
|
||||
loss_str = f"{getattr(metadata, 'loss', 'N/A'):.4f}" if getattr(metadata, 'loss', None) is not None else "N/A"
|
||||
logger.info(f"COB RL checkpoint loaded: {getattr(metadata, 'checkpoint_id', 'unknown')} (loss={loss_str})")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error loading COB RL checkpoint: {e}")
|
||||
|
||||
if not checkpoint_loaded:
|
||||
# New model - no synthetic data, start fresh
|
||||
self.model_states['cob_rl']['initial_loss'] = None
|
||||
self.model_states['cob_rl']['current_loss'] = None
|
||||
self.model_states['cob_rl']['best_loss'] = None
|
||||
self.model_states['cob_rl']['checkpoint_filename'] = 'none (fresh start)'
|
||||
logger.info("COB RL starting fresh - no checkpoint found")
|
||||
|
||||
logger.info("COB RL Agent initialized and integrated with unified ModelManager")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error initializing COB RL: {e}")
|
||||
self.cob_rl_agent = None
|
||||
cob_rl_available = False
|
||||
|
||||
if not cob_rl_available:
|
||||
# COB RL not available due to missing dependencies
|
||||
# Still try to load checkpoint metadata for display purposes
|
||||
logger.info("COB RL dependencies missing - attempting checkpoint metadata load only")
|
||||
|
||||
# Add COB RL to model states tracking
|
||||
self.model_states['cob_rl'] = {
|
||||
'initial_loss': None,
|
||||
'current_loss': None,
|
||||
'best_loss': None,
|
||||
'checkpoint_loaded': False
|
||||
'checkpoint_loaded': False,
|
||||
'checkpoint_filename': 'dependencies missing'
|
||||
}
|
||||
|
||||
# Load best checkpoint using unified ModelManager
|
||||
checkpoint_loaded = False
|
||||
# Try to load checkpoint metadata even without the model
|
||||
try:
|
||||
from NN.training.model_manager import load_best_checkpoint
|
||||
result = load_best_checkpoint("cob_rl_agent")
|
||||
result = load_best_checkpoint("cob_rl")
|
||||
if result:
|
||||
file_path, metadata = result
|
||||
self.model_states['cob_rl']['initial_loss'] = metadata.loss
|
||||
self.model_states['cob_rl']['current_loss'] = metadata.loss
|
||||
self.model_states['cob_rl']['best_loss'] = metadata.loss
|
||||
self.model_states['cob_rl']['checkpoint_loaded'] = True
|
||||
self.model_states['cob_rl']['checkpoint_filename'] = metadata.checkpoint_id
|
||||
checkpoint_loaded = True
|
||||
loss_str = f"{metadata.loss:.4f}" if metadata.loss is not None else "N/A"
|
||||
logger.info(f"COB RL checkpoint loaded: {metadata.checkpoint_id} (loss={loss_str})")
|
||||
self.model_states['cob_rl']['checkpoint_filename'] = getattr(metadata, 'checkpoint_id', 'found')
|
||||
logger.info(f"COB RL checkpoint metadata loaded (model unavailable): {getattr(metadata, 'checkpoint_id', 'unknown')}")
|
||||
else:
|
||||
logger.info("No COB RL checkpoint found")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error loading COB RL checkpoint: {e}")
|
||||
logger.debug(f"Could not load COB RL checkpoint metadata: {e}")
|
||||
|
||||
if not checkpoint_loaded:
|
||||
# New model - no synthetic data, start fresh
|
||||
self.model_states['cob_rl']['initial_loss'] = None
|
||||
self.model_states['cob_rl']['current_loss'] = None
|
||||
self.model_states['cob_rl']['best_loss'] = None
|
||||
self.model_states['cob_rl']['checkpoint_filename'] = 'none (fresh start)'
|
||||
logger.info("COB RL starting fresh - no checkpoint found")
|
||||
|
||||
logger.info("COB RL Agent initialized and integrated with unified ModelManager")
|
||||
logger.info(" - Uses @checkpoints/ directory structure")
|
||||
logger.info(" - Follows same load/save/checkpoint flow as other models")
|
||||
logger.info(" - Integrated with enhanced real-time training system")
|
||||
|
||||
except ImportError as e:
|
||||
logger.warning(f"COB RL Model not available: {e}")
|
||||
self.cob_rl_agent = None
|
||||
|
||||
logger.info("COB RL initialization completed")
|
||||
logger.info(" - Uses @checkpoints/ directory structure")
|
||||
logger.info(" - Follows same load/save/checkpoint flow as other models")
|
||||
logger.info(" - Gracefully handles missing dependencies")
|
||||
|
||||
# Initialize TRANSFORMER Model
|
||||
try:
|
||||
|
Reference in New Issue
Block a user