diff --git a/ANNOTATE/web/app.py b/ANNOTATE/web/app.py index 1a42e4d..2d22553 100644 --- a/ANNOTATE/web/app.py +++ b/ANNOTATE/web/app.py @@ -589,8 +589,14 @@ class AnnotationDashboard: # Backtest runner for replaying visible chart with predictions self.backtest_runner = BacktestRunner() - # Don't auto-load models - wait for user to click LOAD button - logger.info("Models available for lazy loading: " + ", ".join(self.available_models)) + # Check if we should auto-load a model at startup + auto_load_model = os.getenv('AUTO_LOAD_MODEL', 'Transformer') # Default: Transformer + + if auto_load_model and auto_load_model.lower() != 'none': + logger.info(f"Auto-loading model: {auto_load_model}") + self._auto_load_model(auto_load_model) + else: + logger.info("Auto-load disabled. Models available for lazy loading: " + ", ".join(self.available_models)) # Initialize data loader with existing DataProvider self.data_loader = HistoricalDataLoader(self.data_provider) if self.data_provider else None @@ -605,6 +611,73 @@ class AnnotationDashboard: logger.info("Annotation Dashboard initialized") + def _auto_load_model(self, model_name: str): + """ + Auto-load a model at startup in background thread + + Args: + model_name: Name of model to load (DQN, CNN, or Transformer) + """ + def load_in_background(): + try: + logger.info(f"Starting auto-load for {model_name}...") + + # Initialize orchestrator if not already done + if not self.orchestrator: + logger.info("Initializing TradingOrchestrator...") + self.orchestrator = TradingOrchestrator( + data_provider=self.data_provider, + config=self.config + ) + self.training_adapter.orchestrator = self.orchestrator + logger.info("TradingOrchestrator initialized") + + # Load the specific model + if model_name == 'Transformer': + logger.info("Loading Transformer model...") + self.orchestrator.load_transformer_model() + self.loaded_models['Transformer'] = self.orchestrator.primary_transformer_trainer + logger.info("Transformer model loaded successfully") + + elif model_name == 'CNN': + logger.info("Loading CNN model...") + self.orchestrator.load_cnn_model() + self.loaded_models['CNN'] = self.orchestrator.cnn_model + logger.info("CNN model loaded successfully") + + elif model_name == 'DQN': + logger.info("Loading DQN model...") + self.orchestrator.load_dqn_model() + self.loaded_models['DQN'] = self.orchestrator.dqn_agent + logger.info("DQN model loaded successfully") + + else: + logger.warning(f"Unknown model name: {model_name}") + return + + # Get checkpoint info for display + checkpoint_info = self._get_best_checkpoint_info(model_name) + if checkpoint_info: + logger.info(f" Checkpoint: {checkpoint_info.get('filename', 'N/A')}") + if checkpoint_info.get('accuracy'): + logger.info(f" Accuracy: {checkpoint_info['accuracy']:.2%}") + if checkpoint_info.get('loss'): + logger.info(f" Loss: {checkpoint_info['loss']:.4f}") + + self.models_loading = False + logger.info(f"{model_name} model ready for inference and training") + + except Exception as e: + logger.error(f"Error auto-loading {model_name} model: {e}") + import traceback + logger.error(traceback.format_exc()) + self.models_loading = False + + # Start loading in background thread + self.models_loading = True + thread = threading.Thread(target=load_in_background, daemon=True) + thread.start() + def _get_best_checkpoint_info(self, model_name: str) -> Optional[Dict]: """ Get best checkpoint info for a model without loading it diff --git a/config/logging.env.example b/config/logging.env.example index ae14f9f..e565770 100644 --- a/config/logging.env.example +++ b/config/logging.env.example @@ -1,4 +1,18 @@ +# ============================================================================ +# ANNOTATE Application Configuration +# ============================================================================ + +# ---------------------------------------------------------------------------- +# Model Auto-Loading +# ---------------------------------------------------------------------------- +# Automatically load a model at startup for immediate use +# Options: Transformer, CNN, DQN, none +# Default: Transformer +AUTO_LOAD_MODEL=Transformer + +# ---------------------------------------------------------------------------- # Logging Configuration +# ---------------------------------------------------------------------------- # Comma-separated list of enabled logging channels # Available channels: core, trading, training, inference, pivots, data, websocket, api, webui, performance, debug # Leave empty to use defaults (pivots, websocket, api, webui, debug are disabled by default) diff --git a/docs/AUTO_LOAD_MODEL.md b/docs/AUTO_LOAD_MODEL.md new file mode 100644 index 0000000..c5c5f62 --- /dev/null +++ b/docs/AUTO_LOAD_MODEL.md @@ -0,0 +1,245 @@ +# Auto-Load Model Configuration + +The ANNOTATE application can automatically load a neural network model at startup, making it immediately available for inference and training without manual intervention. + +## Configuration + +### Environment Variable + +Set the `AUTO_LOAD_MODEL` environment variable to specify which model to load: + +```bash +# Windows PowerShell +$env:AUTO_LOAD_MODEL="Transformer" + +# Windows CMD +set AUTO_LOAD_MODEL=Transformer + +# Linux/Mac +export AUTO_LOAD_MODEL=Transformer +``` + +### Available Options + +| Value | Description | +|-------|-------------| +| `Transformer` | Load the Transformer model (default) | +| `CNN` | Load the CNN model | +| `DQN` | Load the DQN agent | +| `none` | Disable auto-loading (manual load required) | + +### Default Behavior + +If `AUTO_LOAD_MODEL` is not set, the application defaults to loading the **Transformer** model. + +## Usage Examples + +### Load Transformer (Default) + +```bash +# Explicitly set (same as default) +$env:AUTO_LOAD_MODEL="Transformer" +python ANNOTATE/web/app.py +``` + +**Output:** +``` +=== Logging Channel Status === + ... +=============================== + +Auto-loading model: Transformer +Starting auto-load for Transformer... +Initializing TradingOrchestrator... +TradingOrchestrator initialized +Loading Transformer model... +Transformer model loaded successfully + Checkpoint: transformer_best_epoch61_20251122.pt + Accuracy: 85.67% + Loss: 0.2345 +Transformer model ready for inference and training +``` + +### Load CNN Model + +```bash +$env:AUTO_LOAD_MODEL="CNN" +python ANNOTATE/web/app.py +``` + +### Load DQN Agent + +```bash +$env:AUTO_LOAD_MODEL="DQN" +python ANNOTATE/web/app.py +``` + +### Disable Auto-Loading + +```bash +$env:AUTO_LOAD_MODEL="none" +python ANNOTATE/web/app.py +``` + +**Output:** +``` +Auto-load disabled. Models available for lazy loading: DQN, CNN, Transformer +``` + +## Benefits + +### ✅ **Immediate Availability** +- Model is ready as soon as the app starts +- No need to click "Load Models" button +- Start inference/training immediately + +### ✅ **Faster Workflow** +- Skip manual model loading step +- Ideal for production/automated deployments +- Reduces startup clicks + +### ✅ **Background Loading** +- Models load in a background thread +- UI remains responsive during loading +- No blocking of other operations + +### ✅ **Checkpoint Auto-Discovery** +- Automatically loads the best checkpoint +- Shows checkpoint info in logs +- Displays accuracy and loss metrics + +## Technical Details + +### Loading Process + +1. **Startup Detection**: App checks `AUTO_LOAD_MODEL` environment variable +2. **Background Thread**: Model loading happens in a separate thread +3. **Orchestrator Init**: `TradingOrchestrator` is initialized if needed +4. **Model Loading**: Specific model is loaded with best checkpoint +5. **Ready State**: Model is added to `loaded_models` dict + +### Checkpoint Selection + +The auto-loader uses the same checkpoint selection logic as manual loading: + +1. **Database Query**: Checks `checkpoint_metadata` table for active checkpoint +2. **Filesystem Scan**: Falls back to scanning checkpoint directory +3. **Best Selection**: Chooses checkpoint with highest accuracy +4. **Metadata Display**: Shows checkpoint info in logs + +### Thread Safety + +- Loading happens in daemon thread +- `models_loading` flag prevents concurrent loads +- Orchestrator is thread-safe +- Training adapter is updated after load + +## Integration with Other Features + +### Real-Time Training + +Auto-loaded models are immediately available for: +- Per-candle training +- Pivot-based training +- Live inference + +### Checkpointing + +Auto-loaded models use the existing checkpoint system: +- Real-time checkpoints save during training +- Best checkpoints are kept +- Metrics are tracked in database + +### API Endpoints + +Once auto-loaded, the model appears in: +- `/api/available-models` - Shows as loaded +- `/api/realtime-inference/start` - Ready for inference +- `/api/training/start` - Ready for training + +## Configuration File + +Add to your `config/logging.env` or environment: + +```bash +# Model Configuration +AUTO_LOAD_MODEL=Transformer + +# Logging Configuration +LOG_CHANNELS=core,trading,training,inference,data,performance +``` + +## Troubleshooting + +### Model Fails to Load + +**Symptoms:** +``` +Error auto-loading Transformer model: ... +``` + +**Solutions:** +1. Check if checkpoint files exist in `models/checkpoints/` +2. Verify model architecture matches checkpoint +3. Check logs for specific error details +4. Try manual loading to see detailed error + +### Wrong Model Loaded + +**Symptoms:** +- Expected Transformer but CNN loaded +- Model name mismatch + +**Solutions:** +1. Check `AUTO_LOAD_MODEL` environment variable +2. Restart shell to clear old environment +3. Verify spelling (case-sensitive) + +### Slow Startup + +**Symptoms:** +- App takes long to start +- UI unresponsive initially + +**Solutions:** +- This is normal - model loading takes 5-30 seconds +- Loading happens in background, UI should still be responsive +- Check GPU availability (CUDA initialization can be slow) +- Consider disabling auto-load for development: `AUTO_LOAD_MODEL=none` + +## Best Practices + +### Development + +```bash +# Disable auto-load for faster iteration +$env:AUTO_LOAD_MODEL="none" +``` + +### Production + +```bash +# Auto-load Transformer for immediate availability +$env:AUTO_LOAD_MODEL="Transformer" +``` + +### Testing + +```bash +# Load specific model for testing +$env:AUTO_LOAD_MODEL="DQN" +``` + +### CI/CD + +```bash +# Disable in CI pipelines (no GPU) +export AUTO_LOAD_MODEL=none +``` + +## See Also + +- [Logging Configuration](LOGGING.md) - Configure logging channels +- [Checkpoint System](../ANNOTATE/core/real_training_adapter.py) - Checkpoint management +- [Model Training](../NN/models/) - Model architectures +