LLM proxy integration
This commit is contained in:
@@ -31,6 +31,7 @@ import torch.optim as optim
|
||||
|
||||
# Text export integration
|
||||
from .text_export_integration import TextExportManager
|
||||
from .llm_proxy import LLMProxy, LLMConfig
|
||||
import pandas as pd
|
||||
from pathlib import Path
|
||||
|
||||
@@ -572,6 +573,7 @@ class TradingOrchestrator:
|
||||
self._initialize_transformer_model() # Initialize transformer model
|
||||
self._initialize_enhanced_training_system() # Initialize real-time training
|
||||
self._initialize_text_export_manager() # Initialize text data export
|
||||
self._initialize_llm_proxy() # Initialize LLM proxy for trading signals
|
||||
|
||||
def _normalize_model_name(self, name: str) -> str:
|
||||
"""Map various registry/UI names to canonical toggle keys."""
|
||||
@@ -7040,7 +7042,7 @@ class TradingOrchestrator:
|
||||
'main_symbol': self.symbol,
|
||||
'ref1_symbol': self.ref_symbols[0] if self.ref_symbols else 'BTC/USDT',
|
||||
'ref2_symbol': 'SPX', # Default to SPX for now
|
||||
'export_dir': 'data/text_exports'
|
||||
'export_dir': 'NN/training/samples/txt'
|
||||
}
|
||||
|
||||
self.text_export_manager.export_config.update(export_config)
|
||||
@@ -7053,6 +7055,35 @@ class TradingOrchestrator:
|
||||
logger.error(f"Error initializing text export manager: {e}")
|
||||
self.text_export_manager = None
|
||||
|
||||
def _initialize_llm_proxy(self):
|
||||
"""Initialize LLM proxy for trading signals"""
|
||||
try:
|
||||
# Get LLM configuration from config file or use defaults
|
||||
llm_config = self.config.get('llm_proxy', {})
|
||||
|
||||
llm_proxy_config = LLMConfig(
|
||||
base_url=llm_config.get('base_url', 'http://localhost:1234'),
|
||||
model=llm_config.get('model', 'openai/gpt-oss-20b'),
|
||||
temperature=llm_config.get('temperature', 0.7),
|
||||
max_tokens=llm_config.get('max_tokens', -1),
|
||||
timeout=llm_config.get('timeout', 30),
|
||||
api_key=llm_config.get('api_key')
|
||||
)
|
||||
|
||||
self.llm_proxy = LLMProxy(
|
||||
config=llm_proxy_config,
|
||||
data_dir='NN/training/samples/txt'
|
||||
)
|
||||
|
||||
logger.info("LLM proxy initialized")
|
||||
logger.info(f" - Model: {llm_proxy_config.model}")
|
||||
logger.info(f" - Base URL: {llm_proxy_config.base_url}")
|
||||
logger.info(f" - Temperature: {llm_proxy_config.temperature}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error initializing LLM proxy: {e}")
|
||||
self.llm_proxy = None
|
||||
|
||||
def start_text_export(self) -> bool:
|
||||
"""Start text data export"""
|
||||
try:
|
||||
@@ -7087,6 +7118,91 @@ class TradingOrchestrator:
|
||||
logger.error(f"Error getting text export status: {e}")
|
||||
return {'enabled': False, 'initialized': False, 'error': str(e)}
|
||||
|
||||
def start_llm_proxy(self) -> bool:
|
||||
"""Start LLM proxy for trading signals"""
|
||||
try:
|
||||
if not hasattr(self, 'llm_proxy') or not self.llm_proxy:
|
||||
logger.warning("LLM proxy not initialized")
|
||||
return False
|
||||
|
||||
self.llm_proxy.start()
|
||||
logger.info("LLM proxy started")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error starting LLM proxy: {e}")
|
||||
return False
|
||||
|
||||
def stop_llm_proxy(self) -> bool:
|
||||
"""Stop LLM proxy"""
|
||||
try:
|
||||
if not hasattr(self, 'llm_proxy') or not self.llm_proxy:
|
||||
return True
|
||||
|
||||
self.llm_proxy.stop()
|
||||
logger.info("LLM proxy stopped")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error stopping LLM proxy: {e}")
|
||||
return False
|
||||
|
||||
def get_llm_proxy_status(self) -> Dict[str, Any]:
|
||||
"""Get LLM proxy status"""
|
||||
try:
|
||||
if not hasattr(self, 'llm_proxy') or not self.llm_proxy:
|
||||
return {'enabled': False, 'initialized': False, 'error': 'Not initialized'}
|
||||
|
||||
return self.llm_proxy.get_status()
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting LLM proxy status: {e}")
|
||||
return {'enabled': False, 'initialized': False, 'error': str(e)}
|
||||
|
||||
def get_latest_llm_signal(self, symbol: str = 'ETH'):
|
||||
"""Get latest LLM trading signal"""
|
||||
try:
|
||||
if not hasattr(self, 'llm_proxy') or not self.llm_proxy:
|
||||
return None
|
||||
|
||||
return self.llm_proxy.get_latest_signal(symbol)
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting LLM signal: {e}")
|
||||
return None
|
||||
|
||||
def update_llm_config(self, new_config: Dict[str, Any]) -> bool:
|
||||
"""Update LLM proxy configuration"""
|
||||
try:
|
||||
if not hasattr(self, 'llm_proxy') or not self.llm_proxy:
|
||||
logger.warning("LLM proxy not initialized")
|
||||
return False
|
||||
|
||||
# Create new config
|
||||
llm_proxy_config = LLMConfig(
|
||||
base_url=new_config.get('base_url', 'http://localhost:1234'),
|
||||
model=new_config.get('model', 'openai/gpt-oss-20b'),
|
||||
temperature=new_config.get('temperature', 0.7),
|
||||
max_tokens=new_config.get('max_tokens', -1),
|
||||
timeout=new_config.get('timeout', 30),
|
||||
api_key=new_config.get('api_key')
|
||||
)
|
||||
|
||||
# Stop current proxy
|
||||
was_running = self.llm_proxy.is_running
|
||||
if was_running:
|
||||
self.llm_proxy.stop()
|
||||
|
||||
# Update config
|
||||
self.llm_proxy.update_config(llm_proxy_config)
|
||||
|
||||
# Restart if it was running
|
||||
if was_running:
|
||||
self.llm_proxy.start()
|
||||
|
||||
logger.info("LLM proxy configuration updated")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating LLM config: {e}")
|
||||
return False
|
||||
|
||||
def get_enhanced_training_stats(self) -> Dict[str, Any]:
|
||||
"""Get enhanced training system statistics with orchestrator integration"""
|
||||
try:
|
||||
|
||||
Reference in New Issue
Block a user