docker container, inference chaining
This commit is contained in:
23
Dockerfile
Normal file
23
Dockerfile
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# For more information, please refer to https://aka.ms/vscode-docker-python
|
||||||
|
FROM python:3-slim
|
||||||
|
|
||||||
|
# Keeps Python from generating .pyc files in the container
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
|
|
||||||
|
# Turns off buffering for easier container logging
|
||||||
|
ENV PYTHONUNBUFFERED=1
|
||||||
|
|
||||||
|
# Install pip requirements
|
||||||
|
COPY requirements.txt .
|
||||||
|
RUN python -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY . /app
|
||||||
|
|
||||||
|
# Creates a non-root user with an explicit UID and adds permission to access the /app folder
|
||||||
|
# For more info, please refer to https://aka.ms/vscode-docker-python-configure-containers
|
||||||
|
RUN adduser -u 5678 --disabled-password --gecos "" appuser && chown -R appuser /app
|
||||||
|
USER appuser
|
||||||
|
|
||||||
|
# During debugging, this entry point will be overridden. For more information, please refer to https://aka.ms/vscode-docker-python-debug
|
||||||
|
CMD ["python", "run_clean_dashboard.py"]
|
6
compose.yaml
Normal file
6
compose.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
services:
|
||||||
|
gogo2:
|
||||||
|
image: gogo2
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: ./Dockerfile
|
@@ -2651,4 +2651,160 @@ class TradingOrchestrator:
|
|||||||
return result
|
return result
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error getting OHLCV data: {e}")
|
logger.error(f"Error getting OHLCV data: {e}")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
def chain_inference(self, symbol: str, n_steps: int = 10) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
Chain n inference steps using real models instead of mock predictions.
|
||||||
|
Each step uses the previous prediction as input for the next prediction.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'ETH/USDT')
|
||||||
|
n_steps: Number of chained predictions to generate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of prediction dictionaries with timestamps
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
logger.info(f"🔗 Starting chained inference for {symbol} with {n_steps} steps")
|
||||||
|
|
||||||
|
predictions = []
|
||||||
|
current_data = None
|
||||||
|
|
||||||
|
for step in range(n_steps):
|
||||||
|
try:
|
||||||
|
# Get current market data for the first step
|
||||||
|
if step == 0:
|
||||||
|
current_data = self._get_current_market_data(symbol)
|
||||||
|
if not current_data:
|
||||||
|
logger.warning(f"No market data available for {symbol}")
|
||||||
|
break
|
||||||
|
|
||||||
|
# Run inference with available models
|
||||||
|
step_predictions = []
|
||||||
|
|
||||||
|
# CNN Model inference
|
||||||
|
if hasattr(self, 'cnn_model') and self.cnn_model:
|
||||||
|
try:
|
||||||
|
cnn_pred = self.cnn_model.predict(current_data)
|
||||||
|
if cnn_pred:
|
||||||
|
step_predictions.append({
|
||||||
|
'model': 'CNN',
|
||||||
|
'prediction': cnn_pred,
|
||||||
|
'confidence': cnn_pred.get('confidence', 0.5)
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"CNN inference error: {e}")
|
||||||
|
|
||||||
|
# DQN Model inference
|
||||||
|
if hasattr(self, 'dqn_model') and self.dqn_model:
|
||||||
|
try:
|
||||||
|
dqn_pred = self.dqn_model.predict(current_data)
|
||||||
|
if dqn_pred:
|
||||||
|
step_predictions.append({
|
||||||
|
'model': 'DQN',
|
||||||
|
'prediction': dqn_pred,
|
||||||
|
'confidence': dqn_pred.get('confidence', 0.5)
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"DQN inference error: {e}")
|
||||||
|
|
||||||
|
# COB RL Model inference
|
||||||
|
if hasattr(self, 'cob_rl_agent') and self.cob_rl_agent:
|
||||||
|
try:
|
||||||
|
cob_pred = self.cob_rl_agent.predict(current_data)
|
||||||
|
if cob_pred:
|
||||||
|
step_predictions.append({
|
||||||
|
'model': 'COB_RL',
|
||||||
|
'prediction': cob_pred,
|
||||||
|
'confidence': cob_pred.get('confidence', 0.5)
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"COB RL inference error: {e}")
|
||||||
|
|
||||||
|
if not step_predictions:
|
||||||
|
logger.warning(f"No model predictions available for step {step}")
|
||||||
|
break
|
||||||
|
|
||||||
|
# Combine predictions (simple average for now)
|
||||||
|
combined_prediction = self._combine_predictions(step_predictions)
|
||||||
|
|
||||||
|
# Add timestamp for future prediction
|
||||||
|
prediction_time = datetime.now() + timedelta(minutes=step + 1)
|
||||||
|
combined_prediction['timestamp'] = prediction_time
|
||||||
|
combined_prediction['step'] = step
|
||||||
|
|
||||||
|
predictions.append(combined_prediction)
|
||||||
|
|
||||||
|
# Update current_data for next iteration using the prediction
|
||||||
|
current_data = self._update_data_with_prediction(current_data, combined_prediction)
|
||||||
|
|
||||||
|
logger.debug(f"Step {step}: Generated prediction for {prediction_time}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in chained inference step {step}: {e}")
|
||||||
|
break
|
||||||
|
|
||||||
|
logger.info(f"✅ Chained inference completed: {len(predictions)} predictions generated")
|
||||||
|
return predictions
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in chained inference: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _get_current_market_data(self, symbol: str) -> Optional[Dict]:
|
||||||
|
"""Get current market data for inference"""
|
||||||
|
try:
|
||||||
|
# This would get real market data - placeholder for now
|
||||||
|
return {
|
||||||
|
'symbol': symbol,
|
||||||
|
'timestamp': datetime.now(),
|
||||||
|
'price': 4300.0, # Placeholder
|
||||||
|
'volume': 1000.0,
|
||||||
|
'features': [4300.0, 4305.0, 4295.0, 4302.0, 1000.0] # OHLCV placeholder
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting market data: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _combine_predictions(self, predictions: List[Dict]) -> Dict:
|
||||||
|
"""Combine multiple model predictions into a single prediction"""
|
||||||
|
try:
|
||||||
|
if not predictions:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Simple averaging for now
|
||||||
|
avg_confidence = sum(p['confidence'] for p in predictions) / len(predictions)
|
||||||
|
|
||||||
|
# Use the prediction with highest confidence
|
||||||
|
best_pred = max(predictions, key=lambda x: x['confidence'])
|
||||||
|
|
||||||
|
return {
|
||||||
|
'prediction': best_pred['prediction'],
|
||||||
|
'confidence': avg_confidence,
|
||||||
|
'models_used': len(predictions),
|
||||||
|
'model': best_pred['model']
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error combining predictions: {e}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def _update_data_with_prediction(self, current_data: Dict, prediction: Dict) -> Dict:
|
||||||
|
"""Update current data with the prediction for next iteration"""
|
||||||
|
try:
|
||||||
|
# Simple update - use predicted price as new current price
|
||||||
|
updated_data = current_data.copy()
|
||||||
|
pred_data = prediction.get('prediction', {})
|
||||||
|
|
||||||
|
if 'price' in pred_data:
|
||||||
|
updated_data['price'] = pred_data['price']
|
||||||
|
|
||||||
|
# Update timestamp
|
||||||
|
updated_data['timestamp'] = prediction.get('timestamp', datetime.now())
|
||||||
|
|
||||||
|
return updated_data
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error updating data with prediction: {e}")
|
||||||
|
return current_data
|
@@ -24,4 +24,7 @@ dash-bootstrap-components>=2.0.0
|
|||||||
# NVIDIA GPU (CUDA):
|
# NVIDIA GPU (CUDA):
|
||||||
# Visit https://pytorch.org/get-started/locally/ for the correct command for your CUDA version.
|
# Visit https://pytorch.org/get-started/locally/ for the correct command for your CUDA version.
|
||||||
# Example (CUDA 12.1):
|
# Example (CUDA 12.1):
|
||||||
# pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
|
# pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
|
||||||
|
#
|
||||||
|
# AMD Strix Halo NPU Acceleration:
|
||||||
|
# pip install onnxruntime-directml onnx transformers optimum
|
Reference in New Issue
Block a user